repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
PaulWay/insights-core | insights/parsers/tests/test_hammer_ping.py | 1 | 1657 | from insights.parsers import hammer_ping
from insights.tests import context_wrap
HAMMERPING = """
candlepin:
Status: FAIL
Server Response:Message:404 Resource Not Found
candlepin_auth:
Status: FAIL
Server Response: Message: Katello::Resources::Candlepin::CandlepinPing: 404 Resource Not Found <html>... (skipping some generic 404 html page details) </html> (GET /candlepin/status)
pulp:
Status: ok
Server Response: Duration: 61ms
pulp_auth:
Status: ok
Server Response: Duration: 27ms
elasticsearch:
Status: ok
Server Response: Duration: 35ms
foreman_tasks:
Status: ok
server Response: Duration: 1ms
""".strip()
def test_hammer_ping():
status = hammer_ping.HammerPing(context_wrap(HAMMERPING))
dic = status.data
assert len(status) == 6
assert len(dic) == 6
assert dic.keys() == [
'candlepin', 'candlepin_auth', 'foreman_tasks', 'elasticsearch',
'pulp_auth', 'pulp'
]
assert dic['candlepin'].keys() == ['status', 'response']
assert dic['candlepin']['status'] == 'FAIL'
assert dic['candlepin']['response'] == '404 Resource Not Found'
assert dic['pulp_auth']['status'] == 'ok'
assert dic['pulp_auth']['response'] == '27ms'
assert not status.is_ok('candlepin_auth')
assert status.is_ok('pulp')
assert status.response_of('candlepin') == '404 Resource Not Found'
assert status.response_of('elasticsearch') == '35ms'
# A service that isn't in the list should return false values
assert not status.is_ok('nonexistent')
assert status.response_of('nonexistent') == ''
| apache-2.0 |
asana/python-asana | asana/resources/gen/jobs.py | 2 | 1382 | # coding=utf-8
class _Jobs:
def __init__(self, client=None):
self.client = client
def get_job(self, job_gid, params=None, **options):
"""Get a job by id
:param str job_gid: (required) Globally unique identifier for the job.
:param Object params: Parameters for the request
:param **options
- opt_fields {list[str]}: Defines fields to return. Some requests return *compact* representations of objects in order to conserve resources and complete the request more efficiently. Other times requests return more information than you may need. This option allows you to list the exact set of fields that the API should be sure to return for the objects. The field names should be provided as paths, described below. The id of included objects will always be returned, regardless of the field options.
- opt_pretty {bool}: Provides “pretty” output. Provides the response in a “pretty” format. In the case of JSON this means doing proper line breaking and indentation to make it readable. This will take extra time and increase the response size so it is advisable only to use this during debugging.
:return: Object
"""
if params is None:
params = {}
path = "/jobs/{job_gid}".replace("{job_gid}", job_gid)
return self.client.get(path, params, **options)
| mit |
Mhynlo/SickRage | lib/rebulk/toposort.py | 38 | 2553 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2014 True Blade Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Original:
# - https://bitbucket.org/ericvsmith/toposort (1.4)
# Modifications:
# - merged Pull request #2 for CyclicDependency error
# - import reduce as original name
# - support python 2.6 dict comprehension
# pylint: skip-file
from functools import reduce
class CyclicDependency(ValueError):
def __init__(self, cyclic):
s = 'Cyclic dependencies exist among these items: {0}'.format(', '.join(repr(x) for x in cyclic.items()))
super(CyclicDependency, self).__init__(s)
self.cyclic = cyclic
def toposort(data):
"""
Dependencies are expressed as a dictionary whose keys are items
and whose values are a set of dependent items. Output is a list of
sets in topological order. The first set consists of items with no
dependences, each subsequent set consists of items that depend upon
items in the preceeding sets.
:param data:
:type data:
:return:
:rtype:
"""
# Special case empty input.
if len(data) == 0:
return
# Copy the input so as to leave it unmodified.
data = data.copy()
# Ignore self dependencies.
for k, v in data.items():
v.discard(k)
# Find all items that don't depend on anything.
extra_items_in_deps = reduce(set.union, data.values()) - set(data.keys())
# Add empty dependences where needed.
data.update(dict((item, set()) for item in extra_items_in_deps))
while True:
ordered = set(item for item, dep in data.items() if len(dep) == 0)
if not ordered:
break
yield ordered
data = dict((item, (dep - ordered))
for item, dep in data.items()
if item not in ordered)
if len(data) != 0:
raise CyclicDependency(data)
def toposort_flatten(data, sort=True):
"""
Returns a single list of dependencies. For any set returned by
toposort(), those items are sorted and appended to the result (just to
make the results deterministic).
:param data:
:type data:
:param sort:
:type sort:
:return: Single list of dependencies.
:rtype: list
"""
result = []
for d in toposort(data):
result.extend((sorted if sort else list)(d))
return result
| gpl-3.0 |
marcoarruda/MissionPlanner | Lib/optparse.py | 55 | 62827 | """A powerful, extensible, and easy-to-use option parser.
By Greg Ward <gward@python.net>
Originally distributed as Optik.
For support, use the optik-users@lists.sourceforge.net mailing list
(http://lists.sourceforge.net/lists/listinfo/optik-users).
Simple usage example:
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-f", "--file", dest="filename",
help="write report to FILE", metavar="FILE")
parser.add_option("-q", "--quiet",
action="store_false", dest="verbose", default=True,
help="don't print status messages to stdout")
(options, args) = parser.parse_args()
"""
__version__ = "1.5.3"
__all__ = ['Option',
'make_option',
'SUPPRESS_HELP',
'SUPPRESS_USAGE',
'Values',
'OptionContainer',
'OptionGroup',
'OptionParser',
'HelpFormatter',
'IndentedHelpFormatter',
'TitledHelpFormatter',
'OptParseError',
'OptionError',
'OptionConflictError',
'OptionValueError',
'BadOptionError']
__copyright__ = """
Copyright (c) 2001-2006 Gregory P. Ward. All rights reserved.
Copyright (c) 2002-2006 Python Software Foundation. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the author nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import sys, os
import types
import textwrap
def _repr(self):
return "<%s at 0x%x: %s>" % (self.__class__.__name__, id(self), self)
# This file was generated from:
# Id: option_parser.py 527 2006-07-23 15:21:30Z greg
# Id: option.py 522 2006-06-11 16:22:03Z gward
# Id: help.py 527 2006-07-23 15:21:30Z greg
# Id: errors.py 509 2006-04-20 00:58:24Z gward
try:
from gettext import gettext
except ImportError:
def gettext(message):
return message
_ = gettext
class OptParseError (Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
class OptionError (OptParseError):
"""
Raised if an Option instance is created with invalid or
inconsistent arguments.
"""
def __init__(self, msg, option):
self.msg = msg
self.option_id = str(option)
def __str__(self):
if self.option_id:
return "option %s: %s" % (self.option_id, self.msg)
else:
return self.msg
class OptionConflictError (OptionError):
"""
Raised if conflicting options are added to an OptionParser.
"""
class OptionValueError (OptParseError):
"""
Raised if an invalid option value is encountered on the command
line.
"""
class BadOptionError (OptParseError):
"""
Raised if an invalid option is seen on the command line.
"""
def __init__(self, opt_str):
self.opt_str = opt_str
def __str__(self):
return _("no such option: %s") % self.opt_str
class AmbiguousOptionError (BadOptionError):
"""
Raised if an ambiguous option is seen on the command line.
"""
def __init__(self, opt_str, possibilities):
BadOptionError.__init__(self, opt_str)
self.possibilities = possibilities
def __str__(self):
return (_("ambiguous option: %s (%s?)")
% (self.opt_str, ", ".join(self.possibilities)))
class HelpFormatter:
"""
Abstract base class for formatting option help. OptionParser
instances should use one of the HelpFormatter subclasses for
formatting help; by default IndentedHelpFormatter is used.
Instance attributes:
parser : OptionParser
the controlling OptionParser instance
indent_increment : int
the number of columns to indent per nesting level
max_help_position : int
the maximum starting column for option help text
help_position : int
the calculated starting column for option help text;
initially the same as the maximum
width : int
total number of columns for output (pass None to constructor for
this value to be taken from the $COLUMNS environment variable)
level : int
current indentation level
current_indent : int
current indentation level (in columns)
help_width : int
number of columns available for option help text (calculated)
default_tag : str
text to replace with each option's default value, "%default"
by default. Set to false value to disable default value expansion.
option_strings : { Option : str }
maps Option instances to the snippet of help text explaining
the syntax of that option, e.g. "-h, --help" or
"-fFILE, --file=FILE"
_short_opt_fmt : str
format string controlling how short options with values are
printed in help text. Must be either "%s%s" ("-fFILE") or
"%s %s" ("-f FILE"), because those are the two syntaxes that
Optik supports.
_long_opt_fmt : str
similar but for long options; must be either "%s %s" ("--file FILE")
or "%s=%s" ("--file=FILE").
"""
NO_DEFAULT_VALUE = "none"
def __init__(self,
indent_increment,
max_help_position,
width,
short_first):
self.parser = None
self.indent_increment = indent_increment
self.help_position = self.max_help_position = max_help_position
if width is None:
try:
width = int(os.environ['COLUMNS'])
except (KeyError, ValueError):
width = 80
width -= 2
self.width = width
self.current_indent = 0
self.level = 0
self.help_width = None # computed later
self.short_first = short_first
self.default_tag = "%default"
self.option_strings = {}
self._short_opt_fmt = "%s %s"
self._long_opt_fmt = "%s=%s"
def set_parser(self, parser):
self.parser = parser
def set_short_opt_delimiter(self, delim):
if delim not in ("", " "):
raise ValueError(
"invalid metavar delimiter for short options: %r" % delim)
self._short_opt_fmt = "%s" + delim + "%s"
def set_long_opt_delimiter(self, delim):
if delim not in ("=", " "):
raise ValueError(
"invalid metavar delimiter for long options: %r" % delim)
self._long_opt_fmt = "%s" + delim + "%s"
def indent(self):
self.current_indent += self.indent_increment
self.level += 1
def dedent(self):
self.current_indent -= self.indent_increment
assert self.current_indent >= 0, "Indent decreased below 0."
self.level -= 1
def format_usage(self, usage):
raise NotImplementedError, "subclasses must implement"
def format_heading(self, heading):
raise NotImplementedError, "subclasses must implement"
def _format_text(self, text):
"""
Format a paragraph of free-form text for inclusion in the
help output at the current indentation level.
"""
text_width = self.width - self.current_indent
indent = " "*self.current_indent
return textwrap.fill(text,
text_width,
initial_indent=indent,
subsequent_indent=indent)
def format_description(self, description):
if description:
return self._format_text(description) + "\n"
else:
return ""
def format_epilog(self, epilog):
if epilog:
return "\n" + self._format_text(epilog) + "\n"
else:
return ""
def expand_default(self, option):
if self.parser is None or not self.default_tag:
return option.help
default_value = self.parser.defaults.get(option.dest)
if default_value is NO_DEFAULT or default_value is None:
default_value = self.NO_DEFAULT_VALUE
return option.help.replace(self.default_tag, str(default_value))
def format_option(self, option):
# The help for each option consists of two parts:
# * the opt strings and metavars
# eg. ("-x", or "-fFILENAME, --file=FILENAME")
# * the user-supplied help string
# eg. ("turn on expert mode", "read data from FILENAME")
#
# If possible, we write both of these on the same line:
# -x turn on expert mode
#
# But if the opt string list is too long, we put the help
# string on a second line, indented to the same column it would
# start in if it fit on the first line.
# -fFILENAME, --file=FILENAME
# read data from FILENAME
result = []
opts = self.option_strings[option]
opt_width = self.help_position - self.current_indent - 2
if len(opts) > opt_width:
opts = "%*s%s\n" % (self.current_indent, "", opts)
indent_first = self.help_position
else: # start help on same line as opts
opts = "%*s%-*s " % (self.current_indent, "", opt_width, opts)
indent_first = 0
result.append(opts)
if option.help:
help_text = self.expand_default(option)
help_lines = textwrap.wrap(help_text, self.help_width)
result.append("%*s%s\n" % (indent_first, "", help_lines[0]))
result.extend(["%*s%s\n" % (self.help_position, "", line)
for line in help_lines[1:]])
elif opts[-1] != "\n":
result.append("\n")
return "".join(result)
def store_option_strings(self, parser):
self.indent()
max_len = 0
for opt in parser.option_list:
strings = self.format_option_strings(opt)
self.option_strings[opt] = strings
max_len = max(max_len, len(strings) + self.current_indent)
self.indent()
for group in parser.option_groups:
for opt in group.option_list:
strings = self.format_option_strings(opt)
self.option_strings[opt] = strings
max_len = max(max_len, len(strings) + self.current_indent)
self.dedent()
self.dedent()
self.help_position = min(max_len + 2, self.max_help_position)
self.help_width = self.width - self.help_position
def format_option_strings(self, option):
"""Return a comma-separated list of option strings & metavariables."""
if option.takes_value():
metavar = option.metavar or option.dest.upper()
short_opts = [self._short_opt_fmt % (sopt, metavar)
for sopt in option._short_opts]
long_opts = [self._long_opt_fmt % (lopt, metavar)
for lopt in option._long_opts]
else:
short_opts = option._short_opts
long_opts = option._long_opts
if self.short_first:
opts = short_opts + long_opts
else:
opts = long_opts + short_opts
return ", ".join(opts)
class IndentedHelpFormatter (HelpFormatter):
"""Format help with indented section bodies.
"""
def __init__(self,
indent_increment=2,
max_help_position=24,
width=None,
short_first=1):
HelpFormatter.__init__(
self, indent_increment, max_help_position, width, short_first)
def format_usage(self, usage):
return _("Usage: %s\n") % usage
def format_heading(self, heading):
return "%*s%s:\n" % (self.current_indent, "", heading)
class TitledHelpFormatter (HelpFormatter):
"""Format help with underlined section headers.
"""
def __init__(self,
indent_increment=0,
max_help_position=24,
width=None,
short_first=0):
HelpFormatter.__init__ (
self, indent_increment, max_help_position, width, short_first)
def format_usage(self, usage):
return "%s %s\n" % (self.format_heading(_("Usage")), usage)
def format_heading(self, heading):
return "%s\n%s\n" % (heading, "=-"[self.level] * len(heading))
def _parse_num(val, type):
if val[:2].lower() == "0x": # hexadecimal
radix = 16
elif val[:2].lower() == "0b": # binary
radix = 2
val = val[2:] or "0" # have to remove "0b" prefix
elif val[:1] == "0": # octal
radix = 8
else: # decimal
radix = 10
return type(val, radix)
def _parse_int(val):
return _parse_num(val, int)
def _parse_long(val):
return _parse_num(val, long)
_builtin_cvt = { "int" : (_parse_int, _("integer")),
"long" : (_parse_long, _("long integer")),
"float" : (float, _("floating-point")),
"complex" : (complex, _("complex")) }
def check_builtin(option, opt, value):
(cvt, what) = _builtin_cvt[option.type]
try:
return cvt(value)
except ValueError:
raise OptionValueError(
_("option %s: invalid %s value: %r") % (opt, what, value))
def check_choice(option, opt, value):
if value in option.choices:
return value
else:
choices = ", ".join(map(repr, option.choices))
raise OptionValueError(
_("option %s: invalid choice: %r (choose from %s)")
% (opt, value, choices))
# Not supplying a default is different from a default of None,
# so we need an explicit "not supplied" value.
NO_DEFAULT = ("NO", "DEFAULT")
class Option:
"""
Instance attributes:
_short_opts : [string]
_long_opts : [string]
action : string
type : string
dest : string
default : any
nargs : int
const : any
choices : [string]
callback : function
callback_args : (any*)
callback_kwargs : { string : any }
help : string
metavar : string
"""
# The list of instance attributes that may be set through
# keyword args to the constructor.
ATTRS = ['action',
'type',
'dest',
'default',
'nargs',
'const',
'choices',
'callback',
'callback_args',
'callback_kwargs',
'help',
'metavar']
# The set of actions allowed by option parsers. Explicitly listed
# here so the constructor can validate its arguments.
ACTIONS = ("store",
"store_const",
"store_true",
"store_false",
"append",
"append_const",
"count",
"callback",
"help",
"version")
# The set of actions that involve storing a value somewhere;
# also listed just for constructor argument validation. (If
# the action is one of these, there must be a destination.)
STORE_ACTIONS = ("store",
"store_const",
"store_true",
"store_false",
"append",
"append_const",
"count")
# The set of actions for which it makes sense to supply a value
# type, ie. which may consume an argument from the command line.
TYPED_ACTIONS = ("store",
"append",
"callback")
# The set of actions which *require* a value type, ie. that
# always consume an argument from the command line.
ALWAYS_TYPED_ACTIONS = ("store",
"append")
# The set of actions which take a 'const' attribute.
CONST_ACTIONS = ("store_const",
"append_const")
# The set of known types for option parsers. Again, listed here for
# constructor argument validation.
TYPES = ("string", "int", "long", "float", "complex", "choice")
# Dictionary of argument checking functions, which convert and
# validate option arguments according to the option type.
#
# Signature of checking functions is:
# check(option : Option, opt : string, value : string) -> any
# where
# option is the Option instance calling the checker
# opt is the actual option seen on the command-line
# (eg. "-a", "--file")
# value is the option argument seen on the command-line
#
# The return value should be in the appropriate Python type
# for option.type -- eg. an integer if option.type == "int".
#
# If no checker is defined for a type, arguments will be
# unchecked and remain strings.
TYPE_CHECKER = { "int" : check_builtin,
"long" : check_builtin,
"float" : check_builtin,
"complex": check_builtin,
"choice" : check_choice,
}
# CHECK_METHODS is a list of unbound method objects; they are called
# by the constructor, in order, after all attributes are
# initialized. The list is created and filled in later, after all
# the methods are actually defined. (I just put it here because I
# like to define and document all class attributes in the same
# place.) Subclasses that add another _check_*() method should
# define their own CHECK_METHODS list that adds their check method
# to those from this class.
CHECK_METHODS = None
# -- Constructor/initialization methods ----------------------------
def __init__(self, *opts, **attrs):
# Set _short_opts, _long_opts attrs from 'opts' tuple.
# Have to be set now, in case no option strings are supplied.
self._short_opts = []
self._long_opts = []
opts = self._check_opt_strings(opts)
self._set_opt_strings(opts)
# Set all other attrs (action, type, etc.) from 'attrs' dict
self._set_attrs(attrs)
# Check all the attributes we just set. There are lots of
# complicated interdependencies, but luckily they can be farmed
# out to the _check_*() methods listed in CHECK_METHODS -- which
# could be handy for subclasses! The one thing these all share
# is that they raise OptionError if they discover a problem.
for checker in self.CHECK_METHODS:
checker(self)
def _check_opt_strings(self, opts):
# Filter out None because early versions of Optik had exactly
# one short option and one long option, either of which
# could be None.
opts = filter(None, opts)
if not opts:
raise TypeError("at least one option string must be supplied")
return opts
def _set_opt_strings(self, opts):
for opt in opts:
if len(opt) < 2:
raise OptionError(
"invalid option string %r: "
"must be at least two characters long" % opt, self)
elif len(opt) == 2:
if not (opt[0] == "-" and opt[1] != "-"):
raise OptionError(
"invalid short option string %r: "
"must be of the form -x, (x any non-dash char)" % opt,
self)
self._short_opts.append(opt)
else:
if not (opt[0:2] == "--" and opt[2] != "-"):
raise OptionError(
"invalid long option string %r: "
"must start with --, followed by non-dash" % opt,
self)
self._long_opts.append(opt)
def _set_attrs(self, attrs):
for attr in self.ATTRS:
if attr in attrs:
setattr(self, attr, attrs[attr])
del attrs[attr]
else:
if attr == 'default':
setattr(self, attr, NO_DEFAULT)
else:
setattr(self, attr, None)
if attrs:
attrs = attrs.keys()
attrs.sort()
raise OptionError(
"invalid keyword arguments: %s" % ", ".join(attrs),
self)
# -- Constructor validation methods --------------------------------
def _check_action(self):
if self.action is None:
self.action = "store"
elif self.action not in self.ACTIONS:
raise OptionError("invalid action: %r" % self.action, self)
def _check_type(self):
if self.type is None:
if self.action in self.ALWAYS_TYPED_ACTIONS:
if self.choices is not None:
# The "choices" attribute implies "choice" type.
self.type = "choice"
else:
# No type given? "string" is the most sensible default.
self.type = "string"
else:
# Allow type objects or builtin type conversion functions
# (int, str, etc.) as an alternative to their names. (The
# complicated check of __builtin__ is only necessary for
# Python 2.1 and earlier, and is short-circuited by the
# first check on modern Pythons.)
import __builtin__
if ( type(self.type) is types.TypeType or
(hasattr(self.type, "__name__") and
getattr(__builtin__, self.type.__name__, None) is self.type) ):
self.type = self.type.__name__
if self.type == "str":
self.type = "string"
if self.type not in self.TYPES:
raise OptionError("invalid option type: %r" % self.type, self)
if self.action not in self.TYPED_ACTIONS:
raise OptionError(
"must not supply a type for action %r" % self.action, self)
def _check_choice(self):
if self.type == "choice":
if self.choices is None:
raise OptionError(
"must supply a list of choices for type 'choice'", self)
elif type(self.choices) not in (types.TupleType, types.ListType):
raise OptionError(
"choices must be a list of strings ('%s' supplied)"
% str(type(self.choices)).split("'")[1], self)
elif self.choices is not None:
raise OptionError(
"must not supply choices for type %r" % self.type, self)
def _check_dest(self):
# No destination given, and we need one for this action. The
# self.type check is for callbacks that take a value.
takes_value = (self.action in self.STORE_ACTIONS or
self.type is not None)
if self.dest is None and takes_value:
# Glean a destination from the first long option string,
# or from the first short option string if no long options.
if self._long_opts:
# eg. "--foo-bar" -> "foo_bar"
self.dest = self._long_opts[0][2:].replace('-', '_')
else:
self.dest = self._short_opts[0][1]
def _check_const(self):
if self.action not in self.CONST_ACTIONS and self.const is not None:
raise OptionError(
"'const' must not be supplied for action %r" % self.action,
self)
def _check_nargs(self):
if self.action in self.TYPED_ACTIONS:
if self.nargs is None:
self.nargs = 1
elif self.nargs is not None:
raise OptionError(
"'nargs' must not be supplied for action %r" % self.action,
self)
def _check_callback(self):
if self.action == "callback":
if not hasattr(self.callback, '__call__'):
raise OptionError(
"callback not callable: %r" % self.callback, self)
if (self.callback_args is not None and
type(self.callback_args) is not types.TupleType):
raise OptionError(
"callback_args, if supplied, must be a tuple: not %r"
% self.callback_args, self)
if (self.callback_kwargs is not None and
type(self.callback_kwargs) is not types.DictType):
raise OptionError(
"callback_kwargs, if supplied, must be a dict: not %r"
% self.callback_kwargs, self)
else:
if self.callback is not None:
raise OptionError(
"callback supplied (%r) for non-callback option"
% self.callback, self)
if self.callback_args is not None:
raise OptionError(
"callback_args supplied for non-callback option", self)
if self.callback_kwargs is not None:
raise OptionError(
"callback_kwargs supplied for non-callback option", self)
CHECK_METHODS = [_check_action,
_check_type,
_check_choice,
_check_dest,
_check_const,
_check_nargs,
_check_callback]
# -- Miscellaneous methods -----------------------------------------
def __str__(self):
return "/".join(self._short_opts + self._long_opts)
__repr__ = _repr
def takes_value(self):
return self.type is not None
def get_opt_string(self):
if self._long_opts:
return self._long_opts[0]
else:
return self._short_opts[0]
# -- Processing methods --------------------------------------------
def check_value(self, opt, value):
checker = self.TYPE_CHECKER.get(self.type)
if checker is None:
return value
else:
return checker(self, opt, value)
def convert_value(self, opt, value):
if value is not None:
if self.nargs == 1:
return self.check_value(opt, value)
else:
return tuple([self.check_value(opt, v) for v in value])
def process(self, opt, value, values, parser):
# First, convert the value(s) to the right type. Howl if any
# value(s) are bogus.
value = self.convert_value(opt, value)
# And then take whatever action is expected of us.
# This is a separate method to make life easier for
# subclasses to add new actions.
return self.take_action(
self.action, self.dest, opt, value, values, parser)
def take_action(self, action, dest, opt, value, values, parser):
if action == "store":
setattr(values, dest, value)
elif action == "store_const":
setattr(values, dest, self.const)
elif action == "store_true":
setattr(values, dest, True)
elif action == "store_false":
setattr(values, dest, False)
elif action == "append":
values.ensure_value(dest, []).append(value)
elif action == "append_const":
values.ensure_value(dest, []).append(self.const)
elif action == "count":
setattr(values, dest, values.ensure_value(dest, 0) + 1)
elif action == "callback":
args = self.callback_args or ()
kwargs = self.callback_kwargs or {}
self.callback(self, opt, value, parser, *args, **kwargs)
elif action == "help":
parser.print_help()
parser.exit()
elif action == "version":
parser.print_version()
parser.exit()
else:
raise ValueError("unknown action %r" % self.action)
return 1
# class Option
SUPPRESS_HELP = "SUPPRESS"+"HELP"
SUPPRESS_USAGE = "SUPPRESS"+"USAGE"
try:
basestring
except NameError:
def isbasestring(x):
return isinstance(x, (types.StringType, types.UnicodeType))
else:
def isbasestring(x):
return isinstance(x, basestring)
class Values:
def __init__(self, defaults=None):
if defaults:
for (attr, val) in defaults.items():
setattr(self, attr, val)
def __str__(self):
return str(self.__dict__)
__repr__ = _repr
def __cmp__(self, other):
if isinstance(other, Values):
return cmp(self.__dict__, other.__dict__)
elif isinstance(other, types.DictType):
return cmp(self.__dict__, other)
else:
return -1
def _update_careful(self, dict):
"""
Update the option values from an arbitrary dictionary, but only
use keys from dict that already have a corresponding attribute
in self. Any keys in dict without a corresponding attribute
are silently ignored.
"""
for attr in dir(self):
if attr in dict:
dval = dict[attr]
if dval is not None:
setattr(self, attr, dval)
def _update_loose(self, dict):
"""
Update the option values from an arbitrary dictionary,
using all keys from the dictionary regardless of whether
they have a corresponding attribute in self or not.
"""
self.__dict__.update(dict)
def _update(self, dict, mode):
if mode == "careful":
self._update_careful(dict)
elif mode == "loose":
self._update_loose(dict)
else:
raise ValueError, "invalid update mode: %r" % mode
def read_module(self, modname, mode="careful"):
__import__(modname)
mod = sys.modules[modname]
self._update(vars(mod), mode)
def read_file(self, filename, mode="careful"):
vars = {}
execfile(filename, vars)
self._update(vars, mode)
def ensure_value(self, attr, value):
if not hasattr(self, attr) or getattr(self, attr) is None:
setattr(self, attr, value)
return getattr(self, attr)
class OptionContainer:
"""
Abstract base class.
Class attributes:
standard_option_list : [Option]
list of standard options that will be accepted by all instances
of this parser class (intended to be overridden by subclasses).
Instance attributes:
option_list : [Option]
the list of Option objects contained by this OptionContainer
_short_opt : { string : Option }
dictionary mapping short option strings, eg. "-f" or "-X",
to the Option instances that implement them. If an Option
has multiple short option strings, it will appears in this
dictionary multiple times. [1]
_long_opt : { string : Option }
dictionary mapping long option strings, eg. "--file" or
"--exclude", to the Option instances that implement them.
Again, a given Option can occur multiple times in this
dictionary. [1]
defaults : { string : any }
dictionary mapping option destination names to default
values for each destination [1]
[1] These mappings are common to (shared by) all components of the
controlling OptionParser, where they are initially created.
"""
def __init__(self, option_class, conflict_handler, description):
# Initialize the option list and related data structures.
# This method must be provided by subclasses, and it must
# initialize at least the following instance attributes:
# option_list, _short_opt, _long_opt, defaults.
self._create_option_list()
self.option_class = option_class
self.set_conflict_handler(conflict_handler)
self.set_description(description)
def _create_option_mappings(self):
# For use by OptionParser constructor -- create the master
# option mappings used by this OptionParser and all
# OptionGroups that it owns.
self._short_opt = {} # single letter -> Option instance
self._long_opt = {} # long option -> Option instance
self.defaults = {} # maps option dest -> default value
def _share_option_mappings(self, parser):
# For use by OptionGroup constructor -- use shared option
# mappings from the OptionParser that owns this OptionGroup.
self._short_opt = parser._short_opt
self._long_opt = parser._long_opt
self.defaults = parser.defaults
def set_conflict_handler(self, handler):
if handler not in ("error", "resolve"):
raise ValueError, "invalid conflict_resolution value %r" % handler
self.conflict_handler = handler
def set_description(self, description):
self.description = description
def get_description(self):
return self.description
def destroy(self):
"""see OptionParser.destroy()."""
del self._short_opt
del self._long_opt
del self.defaults
# -- Option-adding methods -----------------------------------------
def _check_conflict(self, option):
conflict_opts = []
for opt in option._short_opts:
if opt in self._short_opt:
conflict_opts.append((opt, self._short_opt[opt]))
for opt in option._long_opts:
if opt in self._long_opt:
conflict_opts.append((opt, self._long_opt[opt]))
if conflict_opts:
handler = self.conflict_handler
if handler == "error":
raise OptionConflictError(
"conflicting option string(s): %s"
% ", ".join([co[0] for co in conflict_opts]),
option)
elif handler == "resolve":
for (opt, c_option) in conflict_opts:
if opt.startswith("--"):
c_option._long_opts.remove(opt)
del self._long_opt[opt]
else:
c_option._short_opts.remove(opt)
del self._short_opt[opt]
if not (c_option._short_opts or c_option._long_opts):
c_option.container.option_list.remove(c_option)
def add_option(self, *args, **kwargs):
"""add_option(Option)
add_option(opt_str, ..., kwarg=val, ...)
"""
if type(args[0]) in types.StringTypes:
option = self.option_class(*args, **kwargs)
elif len(args) == 1 and not kwargs:
option = args[0]
if not isinstance(option, Option):
raise TypeError, "not an Option instance: %r" % option
else:
raise TypeError, "invalid arguments"
self._check_conflict(option)
self.option_list.append(option)
option.container = self
for opt in option._short_opts:
self._short_opt[opt] = option
for opt in option._long_opts:
self._long_opt[opt] = option
if option.dest is not None: # option has a dest, we need a default
if option.default is not NO_DEFAULT:
self.defaults[option.dest] = option.default
elif option.dest not in self.defaults:
self.defaults[option.dest] = None
return option
def add_options(self, option_list):
for option in option_list:
self.add_option(option)
# -- Option query/removal methods ----------------------------------
def get_option(self, opt_str):
return (self._short_opt.get(opt_str) or
self._long_opt.get(opt_str))
def has_option(self, opt_str):
return (opt_str in self._short_opt or
opt_str in self._long_opt)
def remove_option(self, opt_str):
option = self._short_opt.get(opt_str)
if option is None:
option = self._long_opt.get(opt_str)
if option is None:
raise ValueError("no such option %r" % opt_str)
for opt in option._short_opts:
del self._short_opt[opt]
for opt in option._long_opts:
del self._long_opt[opt]
option.container.option_list.remove(option)
# -- Help-formatting methods ---------------------------------------
def format_option_help(self, formatter):
if not self.option_list:
return ""
result = []
for option in self.option_list:
if not option.help is SUPPRESS_HELP:
result.append(formatter.format_option(option))
return "".join(result)
def format_description(self, formatter):
return formatter.format_description(self.get_description())
def format_help(self, formatter):
result = []
if self.description:
result.append(self.format_description(formatter))
if self.option_list:
result.append(self.format_option_help(formatter))
return "\n".join(result)
class OptionGroup (OptionContainer):
def __init__(self, parser, title, description=None):
self.parser = parser
OptionContainer.__init__(
self, parser.option_class, parser.conflict_handler, description)
self.title = title
def _create_option_list(self):
self.option_list = []
self._share_option_mappings(self.parser)
def set_title(self, title):
self.title = title
def destroy(self):
"""see OptionParser.destroy()."""
OptionContainer.destroy(self)
del self.option_list
# -- Help-formatting methods ---------------------------------------
def format_help(self, formatter):
result = formatter.format_heading(self.title)
formatter.indent()
result += OptionContainer.format_help(self, formatter)
formatter.dedent()
return result
class OptionParser (OptionContainer):
"""
Class attributes:
standard_option_list : [Option]
list of standard options that will be accepted by all instances
of this parser class (intended to be overridden by subclasses).
Instance attributes:
usage : string
a usage string for your program. Before it is displayed
to the user, "%prog" will be expanded to the name of
your program (self.prog or os.path.basename(sys.argv[0])).
prog : string
the name of the current program (to override
os.path.basename(sys.argv[0])).
description : string
A paragraph of text giving a brief overview of your program.
optparse reformats this paragraph to fit the current terminal
width and prints it when the user requests help (after usage,
but before the list of options).
epilog : string
paragraph of help text to print after option help
option_groups : [OptionGroup]
list of option groups in this parser (option groups are
irrelevant for parsing the command-line, but very useful
for generating help)
allow_interspersed_args : bool = true
if true, positional arguments may be interspersed with options.
Assuming -a and -b each take a single argument, the command-line
-ablah foo bar -bboo baz
will be interpreted the same as
-ablah -bboo -- foo bar baz
If this flag were false, that command line would be interpreted as
-ablah -- foo bar -bboo baz
-- ie. we stop processing options as soon as we see the first
non-option argument. (This is the tradition followed by
Python's getopt module, Perl's Getopt::Std, and other argument-
parsing libraries, but it is generally annoying to users.)
process_default_values : bool = true
if true, option default values are processed similarly to option
values from the command line: that is, they are passed to the
type-checking function for the option's type (as long as the
default value is a string). (This really only matters if you
have defined custom types; see SF bug #955889.) Set it to false
to restore the behaviour of Optik 1.4.1 and earlier.
rargs : [string]
the argument list currently being parsed. Only set when
parse_args() is active, and continually trimmed down as
we consume arguments. Mainly there for the benefit of
callback options.
largs : [string]
the list of leftover arguments that we have skipped while
parsing options. If allow_interspersed_args is false, this
list is always empty.
values : Values
the set of option values currently being accumulated. Only
set when parse_args() is active. Also mainly for callbacks.
Because of the 'rargs', 'largs', and 'values' attributes,
OptionParser is not thread-safe. If, for some perverse reason, you
need to parse command-line arguments simultaneously in different
threads, use different OptionParser instances.
"""
standard_option_list = []
def __init__(self,
usage=None,
option_list=None,
option_class=Option,
version=None,
conflict_handler="error",
description=None,
formatter=None,
add_help_option=True,
prog=None,
epilog=None):
OptionContainer.__init__(
self, option_class, conflict_handler, description)
self.set_usage(usage)
self.prog = prog
self.version = version
self.allow_interspersed_args = True
self.process_default_values = True
if formatter is None:
formatter = IndentedHelpFormatter()
self.formatter = formatter
self.formatter.set_parser(self)
self.epilog = epilog
# Populate the option list; initial sources are the
# standard_option_list class attribute, the 'option_list'
# argument, and (if applicable) the _add_version_option() and
# _add_help_option() methods.
self._populate_option_list(option_list,
add_help=add_help_option)
self._init_parsing_state()
def destroy(self):
"""
Declare that you are done with this OptionParser. This cleans up
reference cycles so the OptionParser (and all objects referenced by
it) can be garbage-collected promptly. After calling destroy(), the
OptionParser is unusable.
"""
OptionContainer.destroy(self)
for group in self.option_groups:
group.destroy()
del self.option_list
del self.option_groups
del self.formatter
# -- Private methods -----------------------------------------------
# (used by our or OptionContainer's constructor)
def _create_option_list(self):
self.option_list = []
self.option_groups = []
self._create_option_mappings()
def _add_help_option(self):
self.add_option("-h", "--help",
action="help",
help=_("show this help message and exit"))
def _add_version_option(self):
self.add_option("--version",
action="version",
help=_("show program's version number and exit"))
def _populate_option_list(self, option_list, add_help=True):
if self.standard_option_list:
self.add_options(self.standard_option_list)
if option_list:
self.add_options(option_list)
if self.version:
self._add_version_option()
if add_help:
self._add_help_option()
def _init_parsing_state(self):
# These are set in parse_args() for the convenience of callbacks.
self.rargs = None
self.largs = None
self.values = None
# -- Simple modifier methods ---------------------------------------
def set_usage(self, usage):
if usage is None:
self.usage = _("%prog [options]")
elif usage is SUPPRESS_USAGE:
self.usage = None
# For backwards compatibility with Optik 1.3 and earlier.
elif usage.lower().startswith("usage: "):
self.usage = usage[7:]
else:
self.usage = usage
def enable_interspersed_args(self):
"""Set parsing to not stop on the first non-option, allowing
interspersing switches with command arguments. This is the
default behavior. See also disable_interspersed_args() and the
class documentation description of the attribute
allow_interspersed_args."""
self.allow_interspersed_args = True
def disable_interspersed_args(self):
"""Set parsing to stop on the first non-option. Use this if
you have a command processor which runs another command that
has options of its own and you want to make sure these options
don't get confused.
"""
self.allow_interspersed_args = False
def set_process_default_values(self, process):
self.process_default_values = process
def set_default(self, dest, value):
self.defaults[dest] = value
def set_defaults(self, **kwargs):
self.defaults.update(kwargs)
def _get_all_options(self):
options = self.option_list[:]
for group in self.option_groups:
options.extend(group.option_list)
return options
def get_default_values(self):
if not self.process_default_values:
# Old, pre-Optik 1.5 behaviour.
return Values(self.defaults)
defaults = self.defaults.copy()
for option in self._get_all_options():
default = defaults.get(option.dest)
if isbasestring(default):
opt_str = option.get_opt_string()
defaults[option.dest] = option.check_value(opt_str, default)
return Values(defaults)
# -- OptionGroup methods -------------------------------------------
def add_option_group(self, *args, **kwargs):
# XXX lots of overlap with OptionContainer.add_option()
if type(args[0]) is types.StringType:
group = OptionGroup(self, *args, **kwargs)
elif len(args) == 1 and not kwargs:
group = args[0]
if not isinstance(group, OptionGroup):
raise TypeError, "not an OptionGroup instance: %r" % group
if group.parser is not self:
raise ValueError, "invalid OptionGroup (wrong parser)"
else:
raise TypeError, "invalid arguments"
self.option_groups.append(group)
return group
def get_option_group(self, opt_str):
option = (self._short_opt.get(opt_str) or
self._long_opt.get(opt_str))
if option and option.container is not self:
return option.container
return None
# -- Option-parsing methods ----------------------------------------
def _get_args(self, args):
if args is None:
return sys.argv[1:]
else:
return args[:] # don't modify caller's list
def parse_args(self, args=None, values=None):
"""
parse_args(args : [string] = sys.argv[1:],
values : Values = None)
-> (values : Values, args : [string])
Parse the command-line options found in 'args' (default:
sys.argv[1:]). Any errors result in a call to 'error()', which
by default prints the usage message to stderr and calls
sys.exit() with an error message. On success returns a pair
(values, args) where 'values' is an Values instance (with all
your option values) and 'args' is the list of arguments left
over after parsing options.
"""
rargs = self._get_args(args)
if values is None:
values = self.get_default_values()
# Store the halves of the argument list as attributes for the
# convenience of callbacks:
# rargs
# the rest of the command-line (the "r" stands for
# "remaining" or "right-hand")
# largs
# the leftover arguments -- ie. what's left after removing
# options and their arguments (the "l" stands for "leftover"
# or "left-hand")
self.rargs = rargs
self.largs = largs = []
self.values = values
try:
stop = self._process_args(largs, rargs, values)
except (BadOptionError, OptionValueError), err:
self.error(str(err))
args = largs + rargs
return self.check_values(values, args)
def check_values(self, values, args):
"""
check_values(values : Values, args : [string])
-> (values : Values, args : [string])
Check that the supplied option values and leftover arguments are
valid. Returns the option values and leftover arguments
(possibly adjusted, possibly completely new -- whatever you
like). Default implementation just returns the passed-in
values; subclasses may override as desired.
"""
return (values, args)
def _process_args(self, largs, rargs, values):
"""_process_args(largs : [string],
rargs : [string],
values : Values)
Process command-line arguments and populate 'values', consuming
options and arguments from 'rargs'. If 'allow_interspersed_args' is
false, stop at the first non-option argument. If true, accumulate any
interspersed non-option arguments in 'largs'.
"""
while rargs:
arg = rargs[0]
# We handle bare "--" explicitly, and bare "-" is handled by the
# standard arg handler since the short arg case ensures that the
# len of the opt string is greater than 1.
if arg == "--":
del rargs[0]
return
elif arg[0:2] == "--":
# process a single long option (possibly with value(s))
self._process_long_opt(rargs, values)
elif arg[:1] == "-" and len(arg) > 1:
# process a cluster of short options (possibly with
# value(s) for the last one only)
self._process_short_opts(rargs, values)
elif self.allow_interspersed_args:
largs.append(arg)
del rargs[0]
else:
return # stop now, leave this arg in rargs
# Say this is the original argument list:
# [arg0, arg1, ..., arg(i-1), arg(i), arg(i+1), ..., arg(N-1)]
# ^
# (we are about to process arg(i)).
#
# Then rargs is [arg(i), ..., arg(N-1)] and largs is a *subset* of
# [arg0, ..., arg(i-1)] (any options and their arguments will have
# been removed from largs).
#
# The while loop will usually consume 1 or more arguments per pass.
# If it consumes 1 (eg. arg is an option that takes no arguments),
# then after _process_arg() is done the situation is:
#
# largs = subset of [arg0, ..., arg(i)]
# rargs = [arg(i+1), ..., arg(N-1)]
#
# If allow_interspersed_args is false, largs will always be
# *empty* -- still a subset of [arg0, ..., arg(i-1)], but
# not a very interesting subset!
def _match_long_opt(self, opt):
"""_match_long_opt(opt : string) -> string
Determine which long option string 'opt' matches, ie. which one
it is an unambiguous abbrevation for. Raises BadOptionError if
'opt' doesn't unambiguously match any long option string.
"""
return _match_abbrev(opt, self._long_opt)
def _process_long_opt(self, rargs, values):
arg = rargs.pop(0)
# Value explicitly attached to arg? Pretend it's the next
# argument.
if "=" in arg:
(opt, next_arg) = arg.split("=", 1)
rargs.insert(0, next_arg)
had_explicit_value = True
else:
opt = arg
had_explicit_value = False
opt = self._match_long_opt(opt)
option = self._long_opt[opt]
if option.takes_value():
nargs = option.nargs
if len(rargs) < nargs:
if nargs == 1:
self.error(_("%s option requires an argument") % opt)
else:
self.error(_("%s option requires %d arguments")
% (opt, nargs))
elif nargs == 1:
value = rargs.pop(0)
else:
value = tuple(rargs[0:nargs])
del rargs[0:nargs]
elif had_explicit_value:
self.error(_("%s option does not take a value") % opt)
else:
value = None
option.process(opt, value, values, self)
def _process_short_opts(self, rargs, values):
arg = rargs.pop(0)
stop = False
i = 1
for ch in arg[1:]:
opt = "-" + ch
option = self._short_opt.get(opt)
i += 1 # we have consumed a character
if not option:
raise BadOptionError(opt)
if option.takes_value():
# Any characters left in arg? Pretend they're the
# next arg, and stop consuming characters of arg.
if i < len(arg):
rargs.insert(0, arg[i:])
stop = True
nargs = option.nargs
if len(rargs) < nargs:
if nargs == 1:
self.error(_("%s option requires an argument") % opt)
else:
self.error(_("%s option requires %d arguments")
% (opt, nargs))
elif nargs == 1:
value = rargs.pop(0)
else:
value = tuple(rargs[0:nargs])
del rargs[0:nargs]
else: # option doesn't take a value
value = None
option.process(opt, value, values, self)
if stop:
break
# -- Feedback methods ----------------------------------------------
def get_prog_name(self):
if self.prog is None:
return os.path.basename(sys.argv[0])
else:
return self.prog
def expand_prog_name(self, s):
return s.replace("%prog", self.get_prog_name())
def get_description(self):
return self.expand_prog_name(self.description)
def exit(self, status=0, msg=None):
if msg:
sys.stderr.write(msg)
sys.exit(status)
def error(self, msg):
"""error(msg : string)
Print a usage message incorporating 'msg' to stderr and exit.
If you override this in a subclass, it should not return -- it
should either exit or raise an exception.
"""
self.print_usage(sys.stderr)
self.exit(2, "%s: error: %s\n" % (self.get_prog_name(), msg))
def get_usage(self):
if self.usage:
return self.formatter.format_usage(
self.expand_prog_name(self.usage))
else:
return ""
def print_usage(self, file=None):
"""print_usage(file : file = stdout)
Print the usage message for the current program (self.usage) to
'file' (default stdout). Any occurrence of the string "%prog" in
self.usage is replaced with the name of the current program
(basename of sys.argv[0]). Does nothing if self.usage is empty
or not defined.
"""
if self.usage:
print >>file, self.get_usage()
def get_version(self):
if self.version:
return self.expand_prog_name(self.version)
else:
return ""
def print_version(self, file=None):
"""print_version(file : file = stdout)
Print the version message for this program (self.version) to
'file' (default stdout). As with print_usage(), any occurrence
of "%prog" in self.version is replaced by the current program's
name. Does nothing if self.version is empty or undefined.
"""
if self.version:
print >>file, self.get_version()
def format_option_help(self, formatter=None):
if formatter is None:
formatter = self.formatter
formatter.store_option_strings(self)
result = []
result.append(formatter.format_heading(_("Options")))
formatter.indent()
if self.option_list:
result.append(OptionContainer.format_option_help(self, formatter))
result.append("\n")
for group in self.option_groups:
result.append(group.format_help(formatter))
result.append("\n")
formatter.dedent()
# Drop the last "\n", or the header if no options or option groups:
return "".join(result[:-1])
def format_epilog(self, formatter):
return formatter.format_epilog(self.epilog)
def format_help(self, formatter=None):
if formatter is None:
formatter = self.formatter
result = []
if self.usage:
result.append(self.get_usage() + "\n")
if self.description:
result.append(self.format_description(formatter) + "\n")
result.append(self.format_option_help(formatter))
result.append(self.format_epilog(formatter))
return "".join(result)
# used by test suite
def _get_encoding(self, file):
encoding = getattr(file, "encoding", None)
if not encoding:
encoding = sys.getdefaultencoding()
return encoding
def print_help(self, file=None):
"""print_help(file : file = stdout)
Print an extended help message, listing all options and any
help text provided with them, to 'file' (default stdout).
"""
if file is None:
file = sys.stdout
encoding = self._get_encoding(file)
file.write(self.format_help().encode(encoding, "replace"))
# class OptionParser
def _match_abbrev(s, wordmap):
"""_match_abbrev(s : string, wordmap : {string : Option}) -> string
Return the string key in 'wordmap' for which 's' is an unambiguous
abbreviation. If 's' is found to be ambiguous or doesn't match any of
'words', raise BadOptionError.
"""
# Is there an exact match?
if s in wordmap:
return s
else:
# Isolate all words with s as a prefix.
possibilities = [word for word in wordmap.keys()
if word.startswith(s)]
# No exact match, so there had better be just one possibility.
if len(possibilities) == 1:
return possibilities[0]
elif not possibilities:
raise BadOptionError(s)
else:
# More than one possible completion: ambiguous prefix.
possibilities.sort()
raise AmbiguousOptionError(s, possibilities)
# Some day, there might be many Option classes. As of Optik 1.3, the
# preferred way to instantiate Options is indirectly, via make_option(),
# which will become a factory function when there are many Option
# classes.
make_option = Option
| gpl-3.0 |
lupyuen/RaspberryPiImage | home/pi/GrovePi/Software/Python/others/temboo/Library/Box/Files/DeleteFile.py | 5 | 3424 | # -*- coding: utf-8 -*-
###############################################################################
#
# DeleteFile
# Moves a file to the trash.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class DeleteFile(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the DeleteFile Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(DeleteFile, self).__init__(temboo_session, '/Library/Box/Files/DeleteFile')
def new_input_set(self):
return DeleteFileInputSet()
def _make_result_set(self, result, path):
return DeleteFileResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return DeleteFileChoreographyExecution(session, exec_id, path)
class DeleteFileInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the DeleteFile
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccessToken(self, value):
"""
Set the value of the AccessToken input for this Choreo. ((required, string) The access token retrieved during the OAuth2 process.)
"""
super(DeleteFileInputSet, self)._set_input('AccessToken', value)
def set_AsUser(self, value):
"""
Set the value of the AsUser input for this Choreo. ((optional, string) The ID of the user. Only used for enterprise administrators to make API calls for their managed users.)
"""
super(DeleteFileInputSet, self)._set_input('AsUser', value)
def set_FileID(self, value):
"""
Set the value of the FileID input for this Choreo. ((required, string) The id of the file that you want to delete.)
"""
super(DeleteFileInputSet, self)._set_input('FileID', value)
class DeleteFileResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the DeleteFile Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((json) The response from Box.)
"""
return self._output.get('Response', None)
class DeleteFileChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return DeleteFileResultSet(response, path)
| apache-2.0 |
bulldy80/gyp_unofficial | pylib/gyp/MSVSSettings_test.py | 395 | 65937 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for the MSVSSettings.py file."""
import StringIO
import unittest
import gyp.MSVSSettings as MSVSSettings
class TestSequenceFunctions(unittest.TestCase):
def setUp(self):
self.stderr = StringIO.StringIO()
def _ExpectedWarnings(self, expected):
"""Compares recorded lines to expected warnings."""
self.stderr.seek(0)
actual = self.stderr.read().split('\n')
actual = [line for line in actual if line]
self.assertEqual(sorted(expected), sorted(actual))
def testValidateMSVSSettings_tool_names(self):
"""Tests that only MSVS tool names are allowed."""
MSVSSettings.ValidateMSVSSettings(
{'VCCLCompilerTool': {},
'VCLinkerTool': {},
'VCMIDLTool': {},
'foo': {},
'VCResourceCompilerTool': {},
'VCLibrarianTool': {},
'VCManifestTool': {},
'ClCompile': {}},
self.stderr)
self._ExpectedWarnings([
'Warning: unrecognized tool foo',
'Warning: unrecognized tool ClCompile'])
def testValidateMSVSSettings_settings(self):
"""Tests that for invalid MSVS settings."""
MSVSSettings.ValidateMSVSSettings(
{'VCCLCompilerTool': {
'AdditionalIncludeDirectories': 'folder1;folder2',
'AdditionalOptions': ['string1', 'string2'],
'AdditionalUsingDirectories': 'folder1;folder2',
'AssemblerListingLocation': 'a_file_name',
'AssemblerOutput': '0',
'BasicRuntimeChecks': '5',
'BrowseInformation': 'fdkslj',
'BrowseInformationFile': 'a_file_name',
'BufferSecurityCheck': 'true',
'CallingConvention': '-1',
'CompileAs': '1',
'DebugInformationFormat': '2',
'DefaultCharIsUnsigned': 'true',
'Detect64BitPortabilityProblems': 'true',
'DisableLanguageExtensions': 'true',
'DisableSpecificWarnings': 'string1;string2',
'EnableEnhancedInstructionSet': '1',
'EnableFiberSafeOptimizations': 'true',
'EnableFunctionLevelLinking': 'true',
'EnableIntrinsicFunctions': 'true',
'EnablePREfast': 'true',
'Enableprefast': 'bogus',
'ErrorReporting': '1',
'ExceptionHandling': '1',
'ExpandAttributedSource': 'true',
'FavorSizeOrSpeed': '1',
'FloatingPointExceptions': 'true',
'FloatingPointModel': '1',
'ForceConformanceInForLoopScope': 'true',
'ForcedIncludeFiles': 'file1;file2',
'ForcedUsingFiles': 'file1;file2',
'GeneratePreprocessedFile': '1',
'GenerateXMLDocumentationFiles': 'true',
'IgnoreStandardIncludePath': 'true',
'InlineFunctionExpansion': '1',
'KeepComments': 'true',
'MinimalRebuild': 'true',
'ObjectFile': 'a_file_name',
'OmitDefaultLibName': 'true',
'OmitFramePointers': 'true',
'OpenMP': 'true',
'Optimization': '1',
'PrecompiledHeaderFile': 'a_file_name',
'PrecompiledHeaderThrough': 'a_file_name',
'PreprocessorDefinitions': 'string1;string2',
'ProgramDataBaseFileName': 'a_file_name',
'RuntimeLibrary': '1',
'RuntimeTypeInfo': 'true',
'ShowIncludes': 'true',
'SmallerTypeCheck': 'true',
'StringPooling': 'true',
'StructMemberAlignment': '1',
'SuppressStartupBanner': 'true',
'TreatWChar_tAsBuiltInType': 'true',
'UndefineAllPreprocessorDefinitions': 'true',
'UndefinePreprocessorDefinitions': 'string1;string2',
'UseFullPaths': 'true',
'UsePrecompiledHeader': '1',
'UseUnicodeResponseFiles': 'true',
'WarnAsError': 'true',
'WarningLevel': '1',
'WholeProgramOptimization': 'true',
'XMLDocumentationFileName': 'a_file_name',
'ZZXYZ': 'bogus'},
'VCLinkerTool': {
'AdditionalDependencies': 'file1;file2',
'AdditionalDependencies_excluded': 'file3',
'AdditionalLibraryDirectories': 'folder1;folder2',
'AdditionalManifestDependencies': 'file1;file2',
'AdditionalOptions': 'a string1',
'AddModuleNamesToAssembly': 'file1;file2',
'AllowIsolation': 'true',
'AssemblyDebug': '2',
'AssemblyLinkResource': 'file1;file2',
'BaseAddress': 'a string1',
'CLRImageType': '2',
'CLRThreadAttribute': '2',
'CLRUnmanagedCodeCheck': 'true',
'DataExecutionPrevention': '2',
'DelayLoadDLLs': 'file1;file2',
'DelaySign': 'true',
'Driver': '2',
'EmbedManagedResourceFile': 'file1;file2',
'EnableCOMDATFolding': '2',
'EnableUAC': 'true',
'EntryPointSymbol': 'a string1',
'ErrorReporting': '2',
'FixedBaseAddress': '2',
'ForceSymbolReferences': 'file1;file2',
'FunctionOrder': 'a_file_name',
'GenerateDebugInformation': 'true',
'GenerateManifest': 'true',
'GenerateMapFile': 'true',
'HeapCommitSize': 'a string1',
'HeapReserveSize': 'a string1',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreDefaultLibraryNames': 'file1;file2',
'IgnoreEmbeddedIDL': 'true',
'IgnoreImportLibrary': 'true',
'ImportLibrary': 'a_file_name',
'KeyContainer': 'a_file_name',
'KeyFile': 'a_file_name',
'LargeAddressAware': '2',
'LinkIncremental': '2',
'LinkLibraryDependencies': 'true',
'LinkTimeCodeGeneration': '2',
'ManifestFile': 'a_file_name',
'MapExports': 'true',
'MapFileName': 'a_file_name',
'MergedIDLBaseFileName': 'a_file_name',
'MergeSections': 'a string1',
'MidlCommandFile': 'a_file_name',
'ModuleDefinitionFile': 'a_file_name',
'OptimizeForWindows98': '1',
'OptimizeReferences': '2',
'OutputFile': 'a_file_name',
'PerUserRedirection': 'true',
'Profile': 'true',
'ProfileGuidedDatabase': 'a_file_name',
'ProgramDatabaseFile': 'a_file_name',
'RandomizedBaseAddress': '2',
'RegisterOutput': 'true',
'ResourceOnlyDLL': 'true',
'SetChecksum': 'true',
'ShowProgress': '2',
'StackCommitSize': 'a string1',
'StackReserveSize': 'a string1',
'StripPrivateSymbols': 'a_file_name',
'SubSystem': '2',
'SupportUnloadOfDelayLoadedDLL': 'true',
'SuppressStartupBanner': 'true',
'SwapRunFromCD': 'true',
'SwapRunFromNet': 'true',
'TargetMachine': '2',
'TerminalServerAware': '2',
'TurnOffAssemblyGeneration': 'true',
'TypeLibraryFile': 'a_file_name',
'TypeLibraryResourceID': '33',
'UACExecutionLevel': '2',
'UACUIAccess': 'true',
'UseLibraryDependencyInputs': 'true',
'UseUnicodeResponseFiles': 'true',
'Version': 'a string1'},
'VCMIDLTool': {
'AdditionalIncludeDirectories': 'folder1;folder2',
'AdditionalOptions': 'a string1',
'CPreprocessOptions': 'a string1',
'DefaultCharType': '1',
'DLLDataFileName': 'a_file_name',
'EnableErrorChecks': '1',
'ErrorCheckAllocations': 'true',
'ErrorCheckBounds': 'true',
'ErrorCheckEnumRange': 'true',
'ErrorCheckRefPointers': 'true',
'ErrorCheckStubData': 'true',
'GenerateStublessProxies': 'true',
'GenerateTypeLibrary': 'true',
'HeaderFileName': 'a_file_name',
'IgnoreStandardIncludePath': 'true',
'InterfaceIdentifierFileName': 'a_file_name',
'MkTypLibCompatible': 'true',
'notgood': 'bogus',
'OutputDirectory': 'a string1',
'PreprocessorDefinitions': 'string1;string2',
'ProxyFileName': 'a_file_name',
'RedirectOutputAndErrors': 'a_file_name',
'StructMemberAlignment': '1',
'SuppressStartupBanner': 'true',
'TargetEnvironment': '1',
'TypeLibraryName': 'a_file_name',
'UndefinePreprocessorDefinitions': 'string1;string2',
'ValidateParameters': 'true',
'WarnAsError': 'true',
'WarningLevel': '1'},
'VCResourceCompilerTool': {
'AdditionalOptions': 'a string1',
'AdditionalIncludeDirectories': 'folder1;folder2',
'Culture': '1003',
'IgnoreStandardIncludePath': 'true',
'notgood2': 'bogus',
'PreprocessorDefinitions': 'string1;string2',
'ResourceOutputFileName': 'a string1',
'ShowProgress': 'true',
'SuppressStartupBanner': 'true',
'UndefinePreprocessorDefinitions': 'string1;string2'},
'VCLibrarianTool': {
'AdditionalDependencies': 'file1;file2',
'AdditionalLibraryDirectories': 'folder1;folder2',
'AdditionalOptions': 'a string1',
'ExportNamedFunctions': 'string1;string2',
'ForceSymbolReferences': 'a string1',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreSpecificDefaultLibraries': 'file1;file2',
'LinkLibraryDependencies': 'true',
'ModuleDefinitionFile': 'a_file_name',
'OutputFile': 'a_file_name',
'SuppressStartupBanner': 'true',
'UseUnicodeResponseFiles': 'true'},
'VCManifestTool': {
'AdditionalManifestFiles': 'file1;file2',
'AdditionalOptions': 'a string1',
'AssemblyIdentity': 'a string1',
'ComponentFileName': 'a_file_name',
'DependencyInformationFile': 'a_file_name',
'GenerateCatalogFiles': 'true',
'InputResourceManifests': 'a string1',
'ManifestResourceFile': 'a_file_name',
'OutputManifestFile': 'a_file_name',
'RegistrarScriptFile': 'a_file_name',
'ReplacementsFile': 'a_file_name',
'SuppressStartupBanner': 'true',
'TypeLibraryFile': 'a_file_name',
'UpdateFileHashes': 'truel',
'UpdateFileHashesSearchPath': 'a_file_name',
'UseFAT32Workaround': 'true',
'UseUnicodeResponseFiles': 'true',
'VerboseOutput': 'true'}},
self.stderr)
self._ExpectedWarnings([
'Warning: for VCCLCompilerTool/BasicRuntimeChecks, '
'index value (5) not in expected range [0, 4)',
'Warning: for VCCLCompilerTool/BrowseInformation, '
"invalid literal for int() with base 10: 'fdkslj'",
'Warning: for VCCLCompilerTool/CallingConvention, '
'index value (-1) not in expected range [0, 4)',
'Warning: for VCCLCompilerTool/DebugInformationFormat, '
'converted value for 2 not specified.',
'Warning: unrecognized setting VCCLCompilerTool/Enableprefast',
'Warning: unrecognized setting VCCLCompilerTool/ZZXYZ',
'Warning: for VCLinkerTool/TargetMachine, '
'converted value for 2 not specified.',
'Warning: unrecognized setting VCMIDLTool/notgood',
'Warning: unrecognized setting VCResourceCompilerTool/notgood2',
'Warning: for VCManifestTool/UpdateFileHashes, '
"expected bool; got 'truel'"
''])
def testValidateMSBuildSettings_settings(self):
"""Tests that for invalid MSBuild settings."""
MSVSSettings.ValidateMSBuildSettings(
{'ClCompile': {
'AdditionalIncludeDirectories': 'folder1;folder2',
'AdditionalOptions': ['string1', 'string2'],
'AdditionalUsingDirectories': 'folder1;folder2',
'AssemblerListingLocation': 'a_file_name',
'AssemblerOutput': 'NoListing',
'BasicRuntimeChecks': 'StackFrameRuntimeCheck',
'BrowseInformation': 'false',
'BrowseInformationFile': 'a_file_name',
'BufferSecurityCheck': 'true',
'BuildingInIDE': 'true',
'CallingConvention': 'Cdecl',
'CompileAs': 'CompileAsC',
'CompileAsManaged': 'Pure',
'CreateHotpatchableImage': 'true',
'DebugInformationFormat': 'ProgramDatabase',
'DisableLanguageExtensions': 'true',
'DisableSpecificWarnings': 'string1;string2',
'EnableEnhancedInstructionSet': 'StreamingSIMDExtensions',
'EnableFiberSafeOptimizations': 'true',
'EnablePREfast': 'true',
'Enableprefast': 'bogus',
'ErrorReporting': 'Prompt',
'ExceptionHandling': 'SyncCThrow',
'ExpandAttributedSource': 'true',
'FavorSizeOrSpeed': 'Neither',
'FloatingPointExceptions': 'true',
'FloatingPointModel': 'Precise',
'ForceConformanceInForLoopScope': 'true',
'ForcedIncludeFiles': 'file1;file2',
'ForcedUsingFiles': 'file1;file2',
'FunctionLevelLinking': 'false',
'GenerateXMLDocumentationFiles': 'true',
'IgnoreStandardIncludePath': 'true',
'InlineFunctionExpansion': 'OnlyExplicitInline',
'IntrinsicFunctions': 'false',
'MinimalRebuild': 'true',
'MultiProcessorCompilation': 'true',
'ObjectFileName': 'a_file_name',
'OmitDefaultLibName': 'true',
'OmitFramePointers': 'true',
'OpenMPSupport': 'true',
'Optimization': 'Disabled',
'PrecompiledHeader': 'NotUsing',
'PrecompiledHeaderFile': 'a_file_name',
'PrecompiledHeaderOutputFile': 'a_file_name',
'PreprocessKeepComments': 'true',
'PreprocessorDefinitions': 'string1;string2',
'PreprocessOutputPath': 'a string1',
'PreprocessSuppressLineNumbers': 'false',
'PreprocessToFile': 'false',
'ProcessorNumber': '33',
'ProgramDataBaseFileName': 'a_file_name',
'RuntimeLibrary': 'MultiThreaded',
'RuntimeTypeInfo': 'true',
'ShowIncludes': 'true',
'SmallerTypeCheck': 'true',
'StringPooling': 'true',
'StructMemberAlignment': '1Byte',
'SuppressStartupBanner': 'true',
'TrackerLogDirectory': 'a_folder',
'TreatSpecificWarningsAsErrors': 'string1;string2',
'TreatWarningAsError': 'true',
'TreatWChar_tAsBuiltInType': 'true',
'UndefineAllPreprocessorDefinitions': 'true',
'UndefinePreprocessorDefinitions': 'string1;string2',
'UseFullPaths': 'true',
'UseUnicodeForAssemblerListing': 'true',
'WarningLevel': 'TurnOffAllWarnings',
'WholeProgramOptimization': 'true',
'XMLDocumentationFileName': 'a_file_name',
'ZZXYZ': 'bogus'},
'Link': {
'AdditionalDependencies': 'file1;file2',
'AdditionalLibraryDirectories': 'folder1;folder2',
'AdditionalManifestDependencies': 'file1;file2',
'AdditionalOptions': 'a string1',
'AddModuleNamesToAssembly': 'file1;file2',
'AllowIsolation': 'true',
'AssemblyDebug': '',
'AssemblyLinkResource': 'file1;file2',
'BaseAddress': 'a string1',
'BuildingInIDE': 'true',
'CLRImageType': 'ForceIJWImage',
'CLRSupportLastError': 'Enabled',
'CLRThreadAttribute': 'MTAThreadingAttribute',
'CLRUnmanagedCodeCheck': 'true',
'CreateHotPatchableImage': 'X86Image',
'DataExecutionPrevention': 'false',
'DelayLoadDLLs': 'file1;file2',
'DelaySign': 'true',
'Driver': 'NotSet',
'EmbedManagedResourceFile': 'file1;file2',
'EnableCOMDATFolding': 'false',
'EnableUAC': 'true',
'EntryPointSymbol': 'a string1',
'FixedBaseAddress': 'false',
'ForceFileOutput': 'Enabled',
'ForceSymbolReferences': 'file1;file2',
'FunctionOrder': 'a_file_name',
'GenerateDebugInformation': 'true',
'GenerateMapFile': 'true',
'HeapCommitSize': 'a string1',
'HeapReserveSize': 'a string1',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreEmbeddedIDL': 'true',
'IgnoreSpecificDefaultLibraries': 'a_file_list',
'ImageHasSafeExceptionHandlers': 'true',
'ImportLibrary': 'a_file_name',
'KeyContainer': 'a_file_name',
'KeyFile': 'a_file_name',
'LargeAddressAware': 'false',
'LinkDLL': 'true',
'LinkErrorReporting': 'SendErrorReport',
'LinkStatus': 'true',
'LinkTimeCodeGeneration': 'UseLinkTimeCodeGeneration',
'ManifestFile': 'a_file_name',
'MapExports': 'true',
'MapFileName': 'a_file_name',
'MergedIDLBaseFileName': 'a_file_name',
'MergeSections': 'a string1',
'MidlCommandFile': 'a_file_name',
'MinimumRequiredVersion': 'a string1',
'ModuleDefinitionFile': 'a_file_name',
'MSDOSStubFileName': 'a_file_name',
'NoEntryPoint': 'true',
'OptimizeReferences': 'false',
'OutputFile': 'a_file_name',
'PerUserRedirection': 'true',
'PreventDllBinding': 'true',
'Profile': 'true',
'ProfileGuidedDatabase': 'a_file_name',
'ProgramDatabaseFile': 'a_file_name',
'RandomizedBaseAddress': 'false',
'RegisterOutput': 'true',
'SectionAlignment': '33',
'SetChecksum': 'true',
'ShowProgress': 'LinkVerboseREF',
'SpecifySectionAttributes': 'a string1',
'StackCommitSize': 'a string1',
'StackReserveSize': 'a string1',
'StripPrivateSymbols': 'a_file_name',
'SubSystem': 'Console',
'SupportNobindOfDelayLoadedDLL': 'true',
'SupportUnloadOfDelayLoadedDLL': 'true',
'SuppressStartupBanner': 'true',
'SwapRunFromCD': 'true',
'SwapRunFromNET': 'true',
'TargetMachine': 'MachineX86',
'TerminalServerAware': 'false',
'TrackerLogDirectory': 'a_folder',
'TreatLinkerWarningAsErrors': 'true',
'TurnOffAssemblyGeneration': 'true',
'TypeLibraryFile': 'a_file_name',
'TypeLibraryResourceID': '33',
'UACExecutionLevel': 'AsInvoker',
'UACUIAccess': 'true',
'Version': 'a string1'},
'ResourceCompile': {
'AdditionalIncludeDirectories': 'folder1;folder2',
'AdditionalOptions': 'a string1',
'Culture': '0x236',
'IgnoreStandardIncludePath': 'true',
'NullTerminateStrings': 'true',
'PreprocessorDefinitions': 'string1;string2',
'ResourceOutputFileName': 'a string1',
'ShowProgress': 'true',
'SuppressStartupBanner': 'true',
'TrackerLogDirectory': 'a_folder',
'UndefinePreprocessorDefinitions': 'string1;string2'},
'Midl': {
'AdditionalIncludeDirectories': 'folder1;folder2',
'AdditionalOptions': 'a string1',
'ApplicationConfigurationMode': 'true',
'ClientStubFile': 'a_file_name',
'CPreprocessOptions': 'a string1',
'DefaultCharType': 'Signed',
'DllDataFileName': 'a_file_name',
'EnableErrorChecks': 'EnableCustom',
'ErrorCheckAllocations': 'true',
'ErrorCheckBounds': 'true',
'ErrorCheckEnumRange': 'true',
'ErrorCheckRefPointers': 'true',
'ErrorCheckStubData': 'true',
'GenerateClientFiles': 'Stub',
'GenerateServerFiles': 'None',
'GenerateStublessProxies': 'true',
'GenerateTypeLibrary': 'true',
'HeaderFileName': 'a_file_name',
'IgnoreStandardIncludePath': 'true',
'InterfaceIdentifierFileName': 'a_file_name',
'LocaleID': '33',
'MkTypLibCompatible': 'true',
'OutputDirectory': 'a string1',
'PreprocessorDefinitions': 'string1;string2',
'ProxyFileName': 'a_file_name',
'RedirectOutputAndErrors': 'a_file_name',
'ServerStubFile': 'a_file_name',
'StructMemberAlignment': 'NotSet',
'SuppressCompilerWarnings': 'true',
'SuppressStartupBanner': 'true',
'TargetEnvironment': 'Itanium',
'TrackerLogDirectory': 'a_folder',
'TypeLibFormat': 'NewFormat',
'TypeLibraryName': 'a_file_name',
'UndefinePreprocessorDefinitions': 'string1;string2',
'ValidateAllParameters': 'true',
'WarnAsError': 'true',
'WarningLevel': '1'},
'Lib': {
'AdditionalDependencies': 'file1;file2',
'AdditionalLibraryDirectories': 'folder1;folder2',
'AdditionalOptions': 'a string1',
'DisplayLibrary': 'a string1',
'ErrorReporting': 'PromptImmediately',
'ExportNamedFunctions': 'string1;string2',
'ForceSymbolReferences': 'a string1',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreSpecificDefaultLibraries': 'file1;file2',
'LinkTimeCodeGeneration': 'true',
'MinimumRequiredVersion': 'a string1',
'ModuleDefinitionFile': 'a_file_name',
'Name': 'a_file_name',
'OutputFile': 'a_file_name',
'RemoveObjects': 'file1;file2',
'SubSystem': 'Console',
'SuppressStartupBanner': 'true',
'TargetMachine': 'MachineX86i',
'TrackerLogDirectory': 'a_folder',
'TreatLibWarningAsErrors': 'true',
'UseUnicodeResponseFiles': 'true',
'Verbose': 'true'},
'Manifest': {
'AdditionalManifestFiles': 'file1;file2',
'AdditionalOptions': 'a string1',
'AssemblyIdentity': 'a string1',
'ComponentFileName': 'a_file_name',
'EnableDPIAwareness': 'fal',
'GenerateCatalogFiles': 'truel',
'GenerateCategoryTags': 'true',
'InputResourceManifests': 'a string1',
'ManifestFromManagedAssembly': 'a_file_name',
'notgood3': 'bogus',
'OutputManifestFile': 'a_file_name',
'OutputResourceManifests': 'a string1',
'RegistrarScriptFile': 'a_file_name',
'ReplacementsFile': 'a_file_name',
'SuppressDependencyElement': 'true',
'SuppressStartupBanner': 'true',
'TrackerLogDirectory': 'a_folder',
'TypeLibraryFile': 'a_file_name',
'UpdateFileHashes': 'true',
'UpdateFileHashesSearchPath': 'a_file_name',
'VerboseOutput': 'true'},
'ProjectReference': {
'LinkLibraryDependencies': 'true',
'UseLibraryDependencyInputs': 'true'},
'ManifestResourceCompile': {
'ResourceOutputFileName': 'a_file_name'},
'': {
'EmbedManifest': 'true',
'GenerateManifest': 'true',
'IgnoreImportLibrary': 'true',
'LinkIncremental': 'false'}},
self.stderr)
self._ExpectedWarnings([
'Warning: unrecognized setting ClCompile/Enableprefast',
'Warning: unrecognized setting ClCompile/ZZXYZ',
'Warning: unrecognized setting Manifest/notgood3',
'Warning: for Manifest/GenerateCatalogFiles, '
"expected bool; got 'truel'",
'Warning: for Lib/TargetMachine, unrecognized enumerated value '
'MachineX86i',
"Warning: for Manifest/EnableDPIAwareness, expected bool; got 'fal'"])
def testConvertToMSBuildSettings_empty(self):
"""Tests an empty conversion."""
msvs_settings = {}
expected_msbuild_settings = {}
actual_msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(
msvs_settings,
self.stderr)
self.assertEqual(expected_msbuild_settings, actual_msbuild_settings)
self._ExpectedWarnings([])
def testConvertToMSBuildSettings_minimal(self):
"""Tests a minimal conversion."""
msvs_settings = {
'VCCLCompilerTool': {
'AdditionalIncludeDirectories': 'dir1',
'AdditionalOptions': '/foo',
'BasicRuntimeChecks': '0',
},
'VCLinkerTool': {
'LinkTimeCodeGeneration': '1',
'ErrorReporting': '1',
'DataExecutionPrevention': '2',
},
}
expected_msbuild_settings = {
'ClCompile': {
'AdditionalIncludeDirectories': 'dir1',
'AdditionalOptions': '/foo',
'BasicRuntimeChecks': 'Default',
},
'Link': {
'LinkTimeCodeGeneration': 'UseLinkTimeCodeGeneration',
'LinkErrorReporting': 'PromptImmediately',
'DataExecutionPrevention': 'true',
},
}
actual_msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(
msvs_settings,
self.stderr)
self.assertEqual(expected_msbuild_settings, actual_msbuild_settings)
self._ExpectedWarnings([])
def testConvertToMSBuildSettings_warnings(self):
"""Tests conversion that generates warnings."""
msvs_settings = {
'VCCLCompilerTool': {
'AdditionalIncludeDirectories': '1',
'AdditionalOptions': '2',
# These are incorrect values:
'BasicRuntimeChecks': '12',
'BrowseInformation': '21',
'UsePrecompiledHeader': '13',
'GeneratePreprocessedFile': '14'},
'VCLinkerTool': {
# These are incorrect values:
'Driver': '10',
'LinkTimeCodeGeneration': '31',
'ErrorReporting': '21',
'FixedBaseAddress': '6'},
'VCResourceCompilerTool': {
# Custom
'Culture': '1003'}}
expected_msbuild_settings = {
'ClCompile': {
'AdditionalIncludeDirectories': '1',
'AdditionalOptions': '2'},
'Link': {},
'ResourceCompile': {
# Custom
'Culture': '0x03eb'}}
actual_msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(
msvs_settings,
self.stderr)
self.assertEqual(expected_msbuild_settings, actual_msbuild_settings)
self._ExpectedWarnings([
'Warning: while converting VCCLCompilerTool/BasicRuntimeChecks to '
'MSBuild, index value (12) not in expected range [0, 4)',
'Warning: while converting VCCLCompilerTool/BrowseInformation to '
'MSBuild, index value (21) not in expected range [0, 3)',
'Warning: while converting VCCLCompilerTool/UsePrecompiledHeader to '
'MSBuild, index value (13) not in expected range [0, 3)',
'Warning: while converting VCCLCompilerTool/GeneratePreprocessedFile to '
'MSBuild, value must be one of [0, 1, 2]; got 14',
'Warning: while converting VCLinkerTool/Driver to '
'MSBuild, index value (10) not in expected range [0, 4)',
'Warning: while converting VCLinkerTool/LinkTimeCodeGeneration to '
'MSBuild, index value (31) not in expected range [0, 5)',
'Warning: while converting VCLinkerTool/ErrorReporting to '
'MSBuild, index value (21) not in expected range [0, 3)',
'Warning: while converting VCLinkerTool/FixedBaseAddress to '
'MSBuild, index value (6) not in expected range [0, 3)',
])
def testConvertToMSBuildSettings_full_synthetic(self):
"""Tests conversion of all the MSBuild settings."""
msvs_settings = {
'VCCLCompilerTool': {
'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'AdditionalUsingDirectories': 'folder1;folder2;folder3',
'AssemblerListingLocation': 'a_file_name',
'AssemblerOutput': '0',
'BasicRuntimeChecks': '1',
'BrowseInformation': '2',
'BrowseInformationFile': 'a_file_name',
'BufferSecurityCheck': 'true',
'CallingConvention': '0',
'CompileAs': '1',
'DebugInformationFormat': '4',
'DefaultCharIsUnsigned': 'true',
'Detect64BitPortabilityProblems': 'true',
'DisableLanguageExtensions': 'true',
'DisableSpecificWarnings': 'd1;d2;d3',
'EnableEnhancedInstructionSet': '0',
'EnableFiberSafeOptimizations': 'true',
'EnableFunctionLevelLinking': 'true',
'EnableIntrinsicFunctions': 'true',
'EnablePREfast': 'true',
'ErrorReporting': '1',
'ExceptionHandling': '2',
'ExpandAttributedSource': 'true',
'FavorSizeOrSpeed': '0',
'FloatingPointExceptions': 'true',
'FloatingPointModel': '1',
'ForceConformanceInForLoopScope': 'true',
'ForcedIncludeFiles': 'file1;file2;file3',
'ForcedUsingFiles': 'file1;file2;file3',
'GeneratePreprocessedFile': '1',
'GenerateXMLDocumentationFiles': 'true',
'IgnoreStandardIncludePath': 'true',
'InlineFunctionExpansion': '2',
'KeepComments': 'true',
'MinimalRebuild': 'true',
'ObjectFile': 'a_file_name',
'OmitDefaultLibName': 'true',
'OmitFramePointers': 'true',
'OpenMP': 'true',
'Optimization': '3',
'PrecompiledHeaderFile': 'a_file_name',
'PrecompiledHeaderThrough': 'a_file_name',
'PreprocessorDefinitions': 'd1;d2;d3',
'ProgramDataBaseFileName': 'a_file_name',
'RuntimeLibrary': '0',
'RuntimeTypeInfo': 'true',
'ShowIncludes': 'true',
'SmallerTypeCheck': 'true',
'StringPooling': 'true',
'StructMemberAlignment': '1',
'SuppressStartupBanner': 'true',
'TreatWChar_tAsBuiltInType': 'true',
'UndefineAllPreprocessorDefinitions': 'true',
'UndefinePreprocessorDefinitions': 'd1;d2;d3',
'UseFullPaths': 'true',
'UsePrecompiledHeader': '1',
'UseUnicodeResponseFiles': 'true',
'WarnAsError': 'true',
'WarningLevel': '2',
'WholeProgramOptimization': 'true',
'XMLDocumentationFileName': 'a_file_name'},
'VCLinkerTool': {
'AdditionalDependencies': 'file1;file2;file3',
'AdditionalLibraryDirectories': 'folder1;folder2;folder3',
'AdditionalLibraryDirectories_excluded': 'folder1;folder2;folder3',
'AdditionalManifestDependencies': 'file1;file2;file3',
'AdditionalOptions': 'a_string',
'AddModuleNamesToAssembly': 'file1;file2;file3',
'AllowIsolation': 'true',
'AssemblyDebug': '0',
'AssemblyLinkResource': 'file1;file2;file3',
'BaseAddress': 'a_string',
'CLRImageType': '1',
'CLRThreadAttribute': '2',
'CLRUnmanagedCodeCheck': 'true',
'DataExecutionPrevention': '0',
'DelayLoadDLLs': 'file1;file2;file3',
'DelaySign': 'true',
'Driver': '1',
'EmbedManagedResourceFile': 'file1;file2;file3',
'EnableCOMDATFolding': '0',
'EnableUAC': 'true',
'EntryPointSymbol': 'a_string',
'ErrorReporting': '0',
'FixedBaseAddress': '1',
'ForceSymbolReferences': 'file1;file2;file3',
'FunctionOrder': 'a_file_name',
'GenerateDebugInformation': 'true',
'GenerateManifest': 'true',
'GenerateMapFile': 'true',
'HeapCommitSize': 'a_string',
'HeapReserveSize': 'a_string',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreDefaultLibraryNames': 'file1;file2;file3',
'IgnoreEmbeddedIDL': 'true',
'IgnoreImportLibrary': 'true',
'ImportLibrary': 'a_file_name',
'KeyContainer': 'a_file_name',
'KeyFile': 'a_file_name',
'LargeAddressAware': '2',
'LinkIncremental': '1',
'LinkLibraryDependencies': 'true',
'LinkTimeCodeGeneration': '2',
'ManifestFile': 'a_file_name',
'MapExports': 'true',
'MapFileName': 'a_file_name',
'MergedIDLBaseFileName': 'a_file_name',
'MergeSections': 'a_string',
'MidlCommandFile': 'a_file_name',
'ModuleDefinitionFile': 'a_file_name',
'OptimizeForWindows98': '1',
'OptimizeReferences': '0',
'OutputFile': 'a_file_name',
'PerUserRedirection': 'true',
'Profile': 'true',
'ProfileGuidedDatabase': 'a_file_name',
'ProgramDatabaseFile': 'a_file_name',
'RandomizedBaseAddress': '1',
'RegisterOutput': 'true',
'ResourceOnlyDLL': 'true',
'SetChecksum': 'true',
'ShowProgress': '0',
'StackCommitSize': 'a_string',
'StackReserveSize': 'a_string',
'StripPrivateSymbols': 'a_file_name',
'SubSystem': '2',
'SupportUnloadOfDelayLoadedDLL': 'true',
'SuppressStartupBanner': 'true',
'SwapRunFromCD': 'true',
'SwapRunFromNet': 'true',
'TargetMachine': '3',
'TerminalServerAware': '2',
'TurnOffAssemblyGeneration': 'true',
'TypeLibraryFile': 'a_file_name',
'TypeLibraryResourceID': '33',
'UACExecutionLevel': '1',
'UACUIAccess': 'true',
'UseLibraryDependencyInputs': 'false',
'UseUnicodeResponseFiles': 'true',
'Version': 'a_string'},
'VCResourceCompilerTool': {
'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'Culture': '1003',
'IgnoreStandardIncludePath': 'true',
'PreprocessorDefinitions': 'd1;d2;d3',
'ResourceOutputFileName': 'a_string',
'ShowProgress': 'true',
'SuppressStartupBanner': 'true',
'UndefinePreprocessorDefinitions': 'd1;d2;d3'},
'VCMIDLTool': {
'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'CPreprocessOptions': 'a_string',
'DefaultCharType': '0',
'DLLDataFileName': 'a_file_name',
'EnableErrorChecks': '2',
'ErrorCheckAllocations': 'true',
'ErrorCheckBounds': 'true',
'ErrorCheckEnumRange': 'true',
'ErrorCheckRefPointers': 'true',
'ErrorCheckStubData': 'true',
'GenerateStublessProxies': 'true',
'GenerateTypeLibrary': 'true',
'HeaderFileName': 'a_file_name',
'IgnoreStandardIncludePath': 'true',
'InterfaceIdentifierFileName': 'a_file_name',
'MkTypLibCompatible': 'true',
'OutputDirectory': 'a_string',
'PreprocessorDefinitions': 'd1;d2;d3',
'ProxyFileName': 'a_file_name',
'RedirectOutputAndErrors': 'a_file_name',
'StructMemberAlignment': '3',
'SuppressStartupBanner': 'true',
'TargetEnvironment': '1',
'TypeLibraryName': 'a_file_name',
'UndefinePreprocessorDefinitions': 'd1;d2;d3',
'ValidateParameters': 'true',
'WarnAsError': 'true',
'WarningLevel': '4'},
'VCLibrarianTool': {
'AdditionalDependencies': 'file1;file2;file3',
'AdditionalLibraryDirectories': 'folder1;folder2;folder3',
'AdditionalLibraryDirectories_excluded': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'ExportNamedFunctions': 'd1;d2;d3',
'ForceSymbolReferences': 'a_string',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreSpecificDefaultLibraries': 'file1;file2;file3',
'LinkLibraryDependencies': 'true',
'ModuleDefinitionFile': 'a_file_name',
'OutputFile': 'a_file_name',
'SuppressStartupBanner': 'true',
'UseUnicodeResponseFiles': 'true'},
'VCManifestTool': {
'AdditionalManifestFiles': 'file1;file2;file3',
'AdditionalOptions': 'a_string',
'AssemblyIdentity': 'a_string',
'ComponentFileName': 'a_file_name',
'DependencyInformationFile': 'a_file_name',
'EmbedManifest': 'true',
'GenerateCatalogFiles': 'true',
'InputResourceManifests': 'a_string',
'ManifestResourceFile': 'my_name',
'OutputManifestFile': 'a_file_name',
'RegistrarScriptFile': 'a_file_name',
'ReplacementsFile': 'a_file_name',
'SuppressStartupBanner': 'true',
'TypeLibraryFile': 'a_file_name',
'UpdateFileHashes': 'true',
'UpdateFileHashesSearchPath': 'a_file_name',
'UseFAT32Workaround': 'true',
'UseUnicodeResponseFiles': 'true',
'VerboseOutput': 'true'}}
expected_msbuild_settings = {
'ClCompile': {
'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string /J',
'AdditionalUsingDirectories': 'folder1;folder2;folder3',
'AssemblerListingLocation': 'a_file_name',
'AssemblerOutput': 'NoListing',
'BasicRuntimeChecks': 'StackFrameRuntimeCheck',
'BrowseInformation': 'true',
'BrowseInformationFile': 'a_file_name',
'BufferSecurityCheck': 'true',
'CallingConvention': 'Cdecl',
'CompileAs': 'CompileAsC',
'DebugInformationFormat': 'EditAndContinue',
'DisableLanguageExtensions': 'true',
'DisableSpecificWarnings': 'd1;d2;d3',
'EnableEnhancedInstructionSet': 'NotSet',
'EnableFiberSafeOptimizations': 'true',
'EnablePREfast': 'true',
'ErrorReporting': 'Prompt',
'ExceptionHandling': 'Async',
'ExpandAttributedSource': 'true',
'FavorSizeOrSpeed': 'Neither',
'FloatingPointExceptions': 'true',
'FloatingPointModel': 'Strict',
'ForceConformanceInForLoopScope': 'true',
'ForcedIncludeFiles': 'file1;file2;file3',
'ForcedUsingFiles': 'file1;file2;file3',
'FunctionLevelLinking': 'true',
'GenerateXMLDocumentationFiles': 'true',
'IgnoreStandardIncludePath': 'true',
'InlineFunctionExpansion': 'AnySuitable',
'IntrinsicFunctions': 'true',
'MinimalRebuild': 'true',
'ObjectFileName': 'a_file_name',
'OmitDefaultLibName': 'true',
'OmitFramePointers': 'true',
'OpenMPSupport': 'true',
'Optimization': 'Full',
'PrecompiledHeader': 'Create',
'PrecompiledHeaderFile': 'a_file_name',
'PrecompiledHeaderOutputFile': 'a_file_name',
'PreprocessKeepComments': 'true',
'PreprocessorDefinitions': 'd1;d2;d3',
'PreprocessSuppressLineNumbers': 'false',
'PreprocessToFile': 'true',
'ProgramDataBaseFileName': 'a_file_name',
'RuntimeLibrary': 'MultiThreaded',
'RuntimeTypeInfo': 'true',
'ShowIncludes': 'true',
'SmallerTypeCheck': 'true',
'StringPooling': 'true',
'StructMemberAlignment': '1Byte',
'SuppressStartupBanner': 'true',
'TreatWarningAsError': 'true',
'TreatWChar_tAsBuiltInType': 'true',
'UndefineAllPreprocessorDefinitions': 'true',
'UndefinePreprocessorDefinitions': 'd1;d2;d3',
'UseFullPaths': 'true',
'WarningLevel': 'Level2',
'WholeProgramOptimization': 'true',
'XMLDocumentationFileName': 'a_file_name'},
'Link': {
'AdditionalDependencies': 'file1;file2;file3',
'AdditionalLibraryDirectories': 'folder1;folder2;folder3',
'AdditionalManifestDependencies': 'file1;file2;file3',
'AdditionalOptions': 'a_string',
'AddModuleNamesToAssembly': 'file1;file2;file3',
'AllowIsolation': 'true',
'AssemblyDebug': '',
'AssemblyLinkResource': 'file1;file2;file3',
'BaseAddress': 'a_string',
'CLRImageType': 'ForceIJWImage',
'CLRThreadAttribute': 'STAThreadingAttribute',
'CLRUnmanagedCodeCheck': 'true',
'DataExecutionPrevention': '',
'DelayLoadDLLs': 'file1;file2;file3',
'DelaySign': 'true',
'Driver': 'Driver',
'EmbedManagedResourceFile': 'file1;file2;file3',
'EnableCOMDATFolding': '',
'EnableUAC': 'true',
'EntryPointSymbol': 'a_string',
'FixedBaseAddress': 'false',
'ForceSymbolReferences': 'file1;file2;file3',
'FunctionOrder': 'a_file_name',
'GenerateDebugInformation': 'true',
'GenerateMapFile': 'true',
'HeapCommitSize': 'a_string',
'HeapReserveSize': 'a_string',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreEmbeddedIDL': 'true',
'IgnoreSpecificDefaultLibraries': 'file1;file2;file3',
'ImportLibrary': 'a_file_name',
'KeyContainer': 'a_file_name',
'KeyFile': 'a_file_name',
'LargeAddressAware': 'true',
'LinkErrorReporting': 'NoErrorReport',
'LinkTimeCodeGeneration': 'PGInstrument',
'ManifestFile': 'a_file_name',
'MapExports': 'true',
'MapFileName': 'a_file_name',
'MergedIDLBaseFileName': 'a_file_name',
'MergeSections': 'a_string',
'MidlCommandFile': 'a_file_name',
'ModuleDefinitionFile': 'a_file_name',
'NoEntryPoint': 'true',
'OptimizeReferences': '',
'OutputFile': 'a_file_name',
'PerUserRedirection': 'true',
'Profile': 'true',
'ProfileGuidedDatabase': 'a_file_name',
'ProgramDatabaseFile': 'a_file_name',
'RandomizedBaseAddress': 'false',
'RegisterOutput': 'true',
'SetChecksum': 'true',
'ShowProgress': 'NotSet',
'StackCommitSize': 'a_string',
'StackReserveSize': 'a_string',
'StripPrivateSymbols': 'a_file_name',
'SubSystem': 'Windows',
'SupportUnloadOfDelayLoadedDLL': 'true',
'SuppressStartupBanner': 'true',
'SwapRunFromCD': 'true',
'SwapRunFromNET': 'true',
'TargetMachine': 'MachineARM',
'TerminalServerAware': 'true',
'TurnOffAssemblyGeneration': 'true',
'TypeLibraryFile': 'a_file_name',
'TypeLibraryResourceID': '33',
'UACExecutionLevel': 'HighestAvailable',
'UACUIAccess': 'true',
'Version': 'a_string'},
'ResourceCompile': {
'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'Culture': '0x03eb',
'IgnoreStandardIncludePath': 'true',
'PreprocessorDefinitions': 'd1;d2;d3',
'ResourceOutputFileName': 'a_string',
'ShowProgress': 'true',
'SuppressStartupBanner': 'true',
'UndefinePreprocessorDefinitions': 'd1;d2;d3'},
'Midl': {
'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'CPreprocessOptions': 'a_string',
'DefaultCharType': 'Unsigned',
'DllDataFileName': 'a_file_name',
'EnableErrorChecks': 'All',
'ErrorCheckAllocations': 'true',
'ErrorCheckBounds': 'true',
'ErrorCheckEnumRange': 'true',
'ErrorCheckRefPointers': 'true',
'ErrorCheckStubData': 'true',
'GenerateStublessProxies': 'true',
'GenerateTypeLibrary': 'true',
'HeaderFileName': 'a_file_name',
'IgnoreStandardIncludePath': 'true',
'InterfaceIdentifierFileName': 'a_file_name',
'MkTypLibCompatible': 'true',
'OutputDirectory': 'a_string',
'PreprocessorDefinitions': 'd1;d2;d3',
'ProxyFileName': 'a_file_name',
'RedirectOutputAndErrors': 'a_file_name',
'StructMemberAlignment': '4',
'SuppressStartupBanner': 'true',
'TargetEnvironment': 'Win32',
'TypeLibraryName': 'a_file_name',
'UndefinePreprocessorDefinitions': 'd1;d2;d3',
'ValidateAllParameters': 'true',
'WarnAsError': 'true',
'WarningLevel': '4'},
'Lib': {
'AdditionalDependencies': 'file1;file2;file3',
'AdditionalLibraryDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'ExportNamedFunctions': 'd1;d2;d3',
'ForceSymbolReferences': 'a_string',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreSpecificDefaultLibraries': 'file1;file2;file3',
'ModuleDefinitionFile': 'a_file_name',
'OutputFile': 'a_file_name',
'SuppressStartupBanner': 'true',
'UseUnicodeResponseFiles': 'true'},
'Manifest': {
'AdditionalManifestFiles': 'file1;file2;file3',
'AdditionalOptions': 'a_string',
'AssemblyIdentity': 'a_string',
'ComponentFileName': 'a_file_name',
'GenerateCatalogFiles': 'true',
'InputResourceManifests': 'a_string',
'OutputManifestFile': 'a_file_name',
'RegistrarScriptFile': 'a_file_name',
'ReplacementsFile': 'a_file_name',
'SuppressStartupBanner': 'true',
'TypeLibraryFile': 'a_file_name',
'UpdateFileHashes': 'true',
'UpdateFileHashesSearchPath': 'a_file_name',
'VerboseOutput': 'true'},
'ManifestResourceCompile': {
'ResourceOutputFileName': 'my_name'},
'ProjectReference': {
'LinkLibraryDependencies': 'true',
'UseLibraryDependencyInputs': 'false'},
'': {
'EmbedManifest': 'true',
'GenerateManifest': 'true',
'IgnoreImportLibrary': 'true',
'LinkIncremental': 'false'}}
actual_msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(
msvs_settings,
self.stderr)
self.assertEqual(expected_msbuild_settings, actual_msbuild_settings)
self._ExpectedWarnings([])
def testConvertToMSBuildSettings_actual(self):
"""Tests the conversion of an actual project.
A VS2008 project with most of the options defined was created through the
VS2008 IDE. It was then converted to VS2010. The tool settings found in
the .vcproj and .vcxproj files were converted to the two dictionaries
msvs_settings and expected_msbuild_settings.
Note that for many settings, the VS2010 converter adds macros like
%(AdditionalIncludeDirectories) to make sure than inherited values are
included. Since the Gyp projects we generate do not use inheritance,
we removed these macros. They were:
ClCompile:
AdditionalIncludeDirectories: ';%(AdditionalIncludeDirectories)'
AdditionalOptions: ' %(AdditionalOptions)'
AdditionalUsingDirectories: ';%(AdditionalUsingDirectories)'
DisableSpecificWarnings: ';%(DisableSpecificWarnings)',
ForcedIncludeFiles: ';%(ForcedIncludeFiles)',
ForcedUsingFiles: ';%(ForcedUsingFiles)',
PreprocessorDefinitions: ';%(PreprocessorDefinitions)',
UndefinePreprocessorDefinitions:
';%(UndefinePreprocessorDefinitions)',
Link:
AdditionalDependencies: ';%(AdditionalDependencies)',
AdditionalLibraryDirectories: ';%(AdditionalLibraryDirectories)',
AdditionalManifestDependencies:
';%(AdditionalManifestDependencies)',
AdditionalOptions: ' %(AdditionalOptions)',
AddModuleNamesToAssembly: ';%(AddModuleNamesToAssembly)',
AssemblyLinkResource: ';%(AssemblyLinkResource)',
DelayLoadDLLs: ';%(DelayLoadDLLs)',
EmbedManagedResourceFile: ';%(EmbedManagedResourceFile)',
ForceSymbolReferences: ';%(ForceSymbolReferences)',
IgnoreSpecificDefaultLibraries:
';%(IgnoreSpecificDefaultLibraries)',
ResourceCompile:
AdditionalIncludeDirectories: ';%(AdditionalIncludeDirectories)',
AdditionalOptions: ' %(AdditionalOptions)',
PreprocessorDefinitions: ';%(PreprocessorDefinitions)',
Manifest:
AdditionalManifestFiles: ';%(AdditionalManifestFiles)',
AdditionalOptions: ' %(AdditionalOptions)',
InputResourceManifests: ';%(InputResourceManifests)',
"""
msvs_settings = {
'VCCLCompilerTool': {
'AdditionalIncludeDirectories': 'dir1',
'AdditionalOptions': '/more',
'AdditionalUsingDirectories': 'test',
'AssemblerListingLocation': '$(IntDir)\\a',
'AssemblerOutput': '1',
'BasicRuntimeChecks': '3',
'BrowseInformation': '1',
'BrowseInformationFile': '$(IntDir)\\e',
'BufferSecurityCheck': 'false',
'CallingConvention': '1',
'CompileAs': '1',
'DebugInformationFormat': '4',
'DefaultCharIsUnsigned': 'true',
'Detect64BitPortabilityProblems': 'true',
'DisableLanguageExtensions': 'true',
'DisableSpecificWarnings': 'abc',
'EnableEnhancedInstructionSet': '1',
'EnableFiberSafeOptimizations': 'true',
'EnableFunctionLevelLinking': 'true',
'EnableIntrinsicFunctions': 'true',
'EnablePREfast': 'true',
'ErrorReporting': '2',
'ExceptionHandling': '2',
'ExpandAttributedSource': 'true',
'FavorSizeOrSpeed': '2',
'FloatingPointExceptions': 'true',
'FloatingPointModel': '1',
'ForceConformanceInForLoopScope': 'false',
'ForcedIncludeFiles': 'def',
'ForcedUsingFiles': 'ge',
'GeneratePreprocessedFile': '2',
'GenerateXMLDocumentationFiles': 'true',
'IgnoreStandardIncludePath': 'true',
'InlineFunctionExpansion': '1',
'KeepComments': 'true',
'MinimalRebuild': 'true',
'ObjectFile': '$(IntDir)\\b',
'OmitDefaultLibName': 'true',
'OmitFramePointers': 'true',
'OpenMP': 'true',
'Optimization': '3',
'PrecompiledHeaderFile': '$(IntDir)\\$(TargetName).pche',
'PrecompiledHeaderThrough': 'StdAfx.hd',
'PreprocessorDefinitions': 'WIN32;_DEBUG;_CONSOLE',
'ProgramDataBaseFileName': '$(IntDir)\\vc90b.pdb',
'RuntimeLibrary': '3',
'RuntimeTypeInfo': 'false',
'ShowIncludes': 'true',
'SmallerTypeCheck': 'true',
'StringPooling': 'true',
'StructMemberAlignment': '3',
'SuppressStartupBanner': 'false',
'TreatWChar_tAsBuiltInType': 'false',
'UndefineAllPreprocessorDefinitions': 'true',
'UndefinePreprocessorDefinitions': 'wer',
'UseFullPaths': 'true',
'UsePrecompiledHeader': '0',
'UseUnicodeResponseFiles': 'false',
'WarnAsError': 'true',
'WarningLevel': '3',
'WholeProgramOptimization': 'true',
'XMLDocumentationFileName': '$(IntDir)\\c'},
'VCLinkerTool': {
'AdditionalDependencies': 'zx',
'AdditionalLibraryDirectories': 'asd',
'AdditionalManifestDependencies': 's2',
'AdditionalOptions': '/mor2',
'AddModuleNamesToAssembly': 'd1',
'AllowIsolation': 'false',
'AssemblyDebug': '1',
'AssemblyLinkResource': 'd5',
'BaseAddress': '23423',
'CLRImageType': '3',
'CLRThreadAttribute': '1',
'CLRUnmanagedCodeCheck': 'true',
'DataExecutionPrevention': '0',
'DelayLoadDLLs': 'd4',
'DelaySign': 'true',
'Driver': '2',
'EmbedManagedResourceFile': 'd2',
'EnableCOMDATFolding': '1',
'EnableUAC': 'false',
'EntryPointSymbol': 'f5',
'ErrorReporting': '2',
'FixedBaseAddress': '1',
'ForceSymbolReferences': 'd3',
'FunctionOrder': 'fssdfsd',
'GenerateDebugInformation': 'true',
'GenerateManifest': 'false',
'GenerateMapFile': 'true',
'HeapCommitSize': '13',
'HeapReserveSize': '12',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreDefaultLibraryNames': 'flob;flok',
'IgnoreEmbeddedIDL': 'true',
'IgnoreImportLibrary': 'true',
'ImportLibrary': 'f4',
'KeyContainer': 'f7',
'KeyFile': 'f6',
'LargeAddressAware': '2',
'LinkIncremental': '0',
'LinkLibraryDependencies': 'false',
'LinkTimeCodeGeneration': '1',
'ManifestFile':
'$(IntDir)\\$(TargetFileName).2intermediate.manifest',
'MapExports': 'true',
'MapFileName': 'd5',
'MergedIDLBaseFileName': 'f2',
'MergeSections': 'f5',
'MidlCommandFile': 'f1',
'ModuleDefinitionFile': 'sdsd',
'OptimizeForWindows98': '2',
'OptimizeReferences': '2',
'OutputFile': '$(OutDir)\\$(ProjectName)2.exe',
'PerUserRedirection': 'true',
'Profile': 'true',
'ProfileGuidedDatabase': '$(TargetDir)$(TargetName).pgdd',
'ProgramDatabaseFile': 'Flob.pdb',
'RandomizedBaseAddress': '1',
'RegisterOutput': 'true',
'ResourceOnlyDLL': 'true',
'SetChecksum': 'false',
'ShowProgress': '1',
'StackCommitSize': '15',
'StackReserveSize': '14',
'StripPrivateSymbols': 'd3',
'SubSystem': '1',
'SupportUnloadOfDelayLoadedDLL': 'true',
'SuppressStartupBanner': 'false',
'SwapRunFromCD': 'true',
'SwapRunFromNet': 'true',
'TargetMachine': '1',
'TerminalServerAware': '1',
'TurnOffAssemblyGeneration': 'true',
'TypeLibraryFile': 'f3',
'TypeLibraryResourceID': '12',
'UACExecutionLevel': '2',
'UACUIAccess': 'true',
'UseLibraryDependencyInputs': 'true',
'UseUnicodeResponseFiles': 'false',
'Version': '333'},
'VCResourceCompilerTool': {
'AdditionalIncludeDirectories': 'f3',
'AdditionalOptions': '/more3',
'Culture': '3084',
'IgnoreStandardIncludePath': 'true',
'PreprocessorDefinitions': '_UNICODE;UNICODE2',
'ResourceOutputFileName': '$(IntDir)/$(InputName)3.res',
'ShowProgress': 'true'},
'VCManifestTool': {
'AdditionalManifestFiles': 'sfsdfsd',
'AdditionalOptions': 'afdsdafsd',
'AssemblyIdentity': 'sddfdsadfsa',
'ComponentFileName': 'fsdfds',
'DependencyInformationFile': '$(IntDir)\\mt.depdfd',
'EmbedManifest': 'false',
'GenerateCatalogFiles': 'true',
'InputResourceManifests': 'asfsfdafs',
'ManifestResourceFile':
'$(IntDir)\\$(TargetFileName).embed.manifest.resfdsf',
'OutputManifestFile': '$(TargetPath).manifestdfs',
'RegistrarScriptFile': 'sdfsfd',
'ReplacementsFile': 'sdffsd',
'SuppressStartupBanner': 'false',
'TypeLibraryFile': 'sfsd',
'UpdateFileHashes': 'true',
'UpdateFileHashesSearchPath': 'sfsd',
'UseFAT32Workaround': 'true',
'UseUnicodeResponseFiles': 'false',
'VerboseOutput': 'true'}}
expected_msbuild_settings = {
'ClCompile': {
'AdditionalIncludeDirectories': 'dir1',
'AdditionalOptions': '/more /J',
'AdditionalUsingDirectories': 'test',
'AssemblerListingLocation': '$(IntDir)a',
'AssemblerOutput': 'AssemblyCode',
'BasicRuntimeChecks': 'EnableFastChecks',
'BrowseInformation': 'true',
'BrowseInformationFile': '$(IntDir)e',
'BufferSecurityCheck': 'false',
'CallingConvention': 'FastCall',
'CompileAs': 'CompileAsC',
'DebugInformationFormat': 'EditAndContinue',
'DisableLanguageExtensions': 'true',
'DisableSpecificWarnings': 'abc',
'EnableEnhancedInstructionSet': 'StreamingSIMDExtensions',
'EnableFiberSafeOptimizations': 'true',
'EnablePREfast': 'true',
'ErrorReporting': 'Queue',
'ExceptionHandling': 'Async',
'ExpandAttributedSource': 'true',
'FavorSizeOrSpeed': 'Size',
'FloatingPointExceptions': 'true',
'FloatingPointModel': 'Strict',
'ForceConformanceInForLoopScope': 'false',
'ForcedIncludeFiles': 'def',
'ForcedUsingFiles': 'ge',
'FunctionLevelLinking': 'true',
'GenerateXMLDocumentationFiles': 'true',
'IgnoreStandardIncludePath': 'true',
'InlineFunctionExpansion': 'OnlyExplicitInline',
'IntrinsicFunctions': 'true',
'MinimalRebuild': 'true',
'ObjectFileName': '$(IntDir)b',
'OmitDefaultLibName': 'true',
'OmitFramePointers': 'true',
'OpenMPSupport': 'true',
'Optimization': 'Full',
'PrecompiledHeader': 'NotUsing', # Actual conversion gives ''
'PrecompiledHeaderFile': 'StdAfx.hd',
'PrecompiledHeaderOutputFile': '$(IntDir)$(TargetName).pche',
'PreprocessKeepComments': 'true',
'PreprocessorDefinitions': 'WIN32;_DEBUG;_CONSOLE',
'PreprocessSuppressLineNumbers': 'true',
'PreprocessToFile': 'true',
'ProgramDataBaseFileName': '$(IntDir)vc90b.pdb',
'RuntimeLibrary': 'MultiThreadedDebugDLL',
'RuntimeTypeInfo': 'false',
'ShowIncludes': 'true',
'SmallerTypeCheck': 'true',
'StringPooling': 'true',
'StructMemberAlignment': '4Bytes',
'SuppressStartupBanner': 'false',
'TreatWarningAsError': 'true',
'TreatWChar_tAsBuiltInType': 'false',
'UndefineAllPreprocessorDefinitions': 'true',
'UndefinePreprocessorDefinitions': 'wer',
'UseFullPaths': 'true',
'WarningLevel': 'Level3',
'WholeProgramOptimization': 'true',
'XMLDocumentationFileName': '$(IntDir)c'},
'Link': {
'AdditionalDependencies': 'zx',
'AdditionalLibraryDirectories': 'asd',
'AdditionalManifestDependencies': 's2',
'AdditionalOptions': '/mor2',
'AddModuleNamesToAssembly': 'd1',
'AllowIsolation': 'false',
'AssemblyDebug': 'true',
'AssemblyLinkResource': 'd5',
'BaseAddress': '23423',
'CLRImageType': 'ForceSafeILImage',
'CLRThreadAttribute': 'MTAThreadingAttribute',
'CLRUnmanagedCodeCheck': 'true',
'DataExecutionPrevention': '',
'DelayLoadDLLs': 'd4',
'DelaySign': 'true',
'Driver': 'UpOnly',
'EmbedManagedResourceFile': 'd2',
'EnableCOMDATFolding': 'false',
'EnableUAC': 'false',
'EntryPointSymbol': 'f5',
'FixedBaseAddress': 'false',
'ForceSymbolReferences': 'd3',
'FunctionOrder': 'fssdfsd',
'GenerateDebugInformation': 'true',
'GenerateMapFile': 'true',
'HeapCommitSize': '13',
'HeapReserveSize': '12',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreEmbeddedIDL': 'true',
'IgnoreSpecificDefaultLibraries': 'flob;flok',
'ImportLibrary': 'f4',
'KeyContainer': 'f7',
'KeyFile': 'f6',
'LargeAddressAware': 'true',
'LinkErrorReporting': 'QueueForNextLogin',
'LinkTimeCodeGeneration': 'UseLinkTimeCodeGeneration',
'ManifestFile': '$(IntDir)$(TargetFileName).2intermediate.manifest',
'MapExports': 'true',
'MapFileName': 'd5',
'MergedIDLBaseFileName': 'f2',
'MergeSections': 'f5',
'MidlCommandFile': 'f1',
'ModuleDefinitionFile': 'sdsd',
'NoEntryPoint': 'true',
'OptimizeReferences': 'true',
'OutputFile': '$(OutDir)$(ProjectName)2.exe',
'PerUserRedirection': 'true',
'Profile': 'true',
'ProfileGuidedDatabase': '$(TargetDir)$(TargetName).pgdd',
'ProgramDatabaseFile': 'Flob.pdb',
'RandomizedBaseAddress': 'false',
'RegisterOutput': 'true',
'SetChecksum': 'false',
'ShowProgress': 'LinkVerbose',
'StackCommitSize': '15',
'StackReserveSize': '14',
'StripPrivateSymbols': 'd3',
'SubSystem': 'Console',
'SupportUnloadOfDelayLoadedDLL': 'true',
'SuppressStartupBanner': 'false',
'SwapRunFromCD': 'true',
'SwapRunFromNET': 'true',
'TargetMachine': 'MachineX86',
'TerminalServerAware': 'false',
'TurnOffAssemblyGeneration': 'true',
'TypeLibraryFile': 'f3',
'TypeLibraryResourceID': '12',
'UACExecutionLevel': 'RequireAdministrator',
'UACUIAccess': 'true',
'Version': '333'},
'ResourceCompile': {
'AdditionalIncludeDirectories': 'f3',
'AdditionalOptions': '/more3',
'Culture': '0x0c0c',
'IgnoreStandardIncludePath': 'true',
'PreprocessorDefinitions': '_UNICODE;UNICODE2',
'ResourceOutputFileName': '$(IntDir)%(Filename)3.res',
'ShowProgress': 'true'},
'Manifest': {
'AdditionalManifestFiles': 'sfsdfsd',
'AdditionalOptions': 'afdsdafsd',
'AssemblyIdentity': 'sddfdsadfsa',
'ComponentFileName': 'fsdfds',
'GenerateCatalogFiles': 'true',
'InputResourceManifests': 'asfsfdafs',
'OutputManifestFile': '$(TargetPath).manifestdfs',
'RegistrarScriptFile': 'sdfsfd',
'ReplacementsFile': 'sdffsd',
'SuppressStartupBanner': 'false',
'TypeLibraryFile': 'sfsd',
'UpdateFileHashes': 'true',
'UpdateFileHashesSearchPath': 'sfsd',
'VerboseOutput': 'true'},
'ProjectReference': {
'LinkLibraryDependencies': 'false',
'UseLibraryDependencyInputs': 'true'},
'': {
'EmbedManifest': 'false',
'GenerateManifest': 'false',
'IgnoreImportLibrary': 'true',
'LinkIncremental': ''
},
'ManifestResourceCompile': {
'ResourceOutputFileName':
'$(IntDir)$(TargetFileName).embed.manifest.resfdsf'}
}
actual_msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(
msvs_settings,
self.stderr)
self.assertEqual(expected_msbuild_settings, actual_msbuild_settings)
self._ExpectedWarnings([])
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
pbs/django-cms | cms/plugins/snippet/south_migrations/0004_publisher2.py | 4 | 9604 |
from south.db import db
from django.db import models
from cms.plugins.snippet.models import *
class Migration:
needed_by = (
# Migration after cms.publisher2, keep migrations in sync with real db
# Fixes migration error in MySQL
("cms", "0022_login_required_added.py"),
)
def forwards(self, orm):
# Deleting field 'SnippetPtr.public'
db.delete_column('cmsplugin_snippetptr', 'public_id')
# Deleting model 'snippetptrpublic'
db.delete_table('cmsplugin_snippetptrpublic')
def backwards(self, orm):
# Adding field 'SnippetPtr.public'
db.add_column('cmsplugin_snippetptr', 'public', orm['snippet.snippetptr:public'])
# Adding model 'snippetptrpublic'
db.create_table('cmsplugin_snippetptrpublic', (
('snippet', orm['snippet.snippetptrpublic:snippet']),
('cmspluginpublic_ptr', orm['snippet.snippetptrpublic:cmspluginpublic_ptr']),
('mark_delete', orm['snippet.snippetptrpublic:mark_delete']),
))
db.send_create_signal('snippet', ['snippetptrpublic'])
models = {
'cms.cmsplugin': {
'creation_date': ('models.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('models.AutoField', [], {'primary_key': 'True'}),
'language': ('models.CharField', [], {'max_length': '5', 'db_index': 'True'}),
'level': ('models.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('models.PositiveIntegerField', [], {'db_index': 'True'}),
'page': ('models.ForeignKey', [], {'to': "orm['cms.Page']"}),
'parent': ('models.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('models.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'plugin_type': ('models.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('models.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'publisher_is_draft': ('models.BooleanField', [], {'default': '1', 'db_index': 'True', 'blank': 'True'}),
'publisher_public': ('models.OneToOneField', [], {'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.CMSPlugin']"}),
'publisher_state': ('models.SmallIntegerField', [], {'default': '0', 'db_index': 'True'}),
'rght': ('models.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('models.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.cmspluginpublic': {
'creation_date': ('models.DateTimeField', [], {'default': 'datetime.datetime(2009, 7, 2, 6, 35, 14, 587322)'}),
'id': ('models.AutoField', [], {'primary_key': 'True'}),
'language': ('models.CharField', [], {'max_length': '5', 'db_index': 'True'}),
'level': ('models.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('models.PositiveIntegerField', [], {'db_index': 'True'}),
'mark_delete': ('models.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'page': ('models.ForeignKey', [], {'to': "orm['cms.PagePublic']"}),
'parent': ('models.ForeignKey', [], {'to': "orm['cms.CMSPluginPublic']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('models.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'plugin_type': ('models.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('models.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('models.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('models.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.page': {
'changed_by': ('models.CharField', [], {'max_length': '70'}),
'created_by': ('models.CharField', [], {'max_length': '70'}),
'creation_date': ('models.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('models.AutoField', [], {'primary_key': 'True'}),
'in_navigation': ('models.BooleanField', [], {'default': 'True', 'db_index': 'True', 'blank': 'True'}),
'level': ('models.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('models.PositiveIntegerField', [], {'db_index': 'True'}),
'login_required': ('models.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'moderator_state': ('models.SmallIntegerField', [], {'default': '1', 'blank': 'True'}),
'navigation_extenders': ('models.CharField', [], {'db_index': 'True', 'max_length': '80', 'null': 'True', 'blank': 'True'}),
'parent': ('models.ForeignKey', [], {'related_name': "'children'", 'blank': 'True', 'null': 'True', 'to': "orm['cms.Page']"}),
'publication_date': ('models.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'publication_end_date': ('models.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'published': ('models.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'publisher_is_draft': ('models.BooleanField', [], {'default': '1', 'db_index': 'True', 'blank': 'True'}),
'publisher_public': ('models.OneToOneField', [], {'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.Page']"}),
'publisher_state': ('models.SmallIntegerField', [], {'default': '0', 'db_index': 'True'}),
'reverse_id': ('models.CharField', [], {'db_index': 'True', 'max_length': '40', 'null': 'True', 'blank': 'True'}),
'rght': ('models.PositiveIntegerField', [], {'db_index': 'True'}),
'site': ('models.ForeignKey', [], {'to': "orm['sites.Site']"}),
'soft_root': ('models.BooleanField', [], {'default': 'False', 'db_index': 'True', 'blank': 'True'}),
'template': ('models.CharField', [], {'max_length': '100'}),
'tree_id': ('models.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.pagepublic': {
'changed_by': ('models.CharField', [], {'max_length': '70'}),
'created_by': ('models.CharField', [], {'max_length': '70'}),
'creation_date': ('models.DateTimeField', [], {'default': 'datetime.datetime(2009, 7, 2, 6, 35, 13, 668564)'}),
'id': ('models.AutoField', [], {'primary_key': 'True'}),
'in_navigation': ('models.BooleanField', [], {'default': 'True', 'blank': 'True', 'db_index': 'True'}),
'level': ('models.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('models.PositiveIntegerField', [], {'db_index': 'True'}),
'login_required': ('models.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'mark_delete': ('models.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'moderator_state': ('models.SmallIntegerField', [], {'default': '1', 'blank': 'True'}),
'navigation_extenders': ('models.CharField', [], {'blank': 'True', 'max_length': '80', 'null': 'True', 'db_index': 'True'}),
'parent': ('models.ForeignKey', [], {'related_name': "'children'", 'null': 'True', 'to': "orm['cms.PagePublic']", 'blank': 'True'}),
'publication_date': ('models.DateTimeField', [], {'blank': 'True', 'null': 'True', 'db_index': 'True'}),
'publication_end_date': ('models.DateTimeField', [], {'blank': 'True', 'null': 'True', 'db_index': 'True'}),
'published': ('models.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'reverse_id': ('models.CharField', [], {'blank': 'True', 'max_length': '40', 'null': 'True', 'db_index': 'True'}),
'rght': ('models.PositiveIntegerField', [], {'db_index': 'True'}),
'site': ('models.ForeignKey', [], {'to': "orm['sites.Site']"}),
'soft_root': ('models.BooleanField', [], {'default': 'False', 'blank': 'True', 'db_index': 'True'}),
'template': ('models.CharField', [], {'max_length': '100'}),
'tree_id': ('models.PositiveIntegerField', [], {'db_index': 'True'})
},
'sites.site': {
'Meta': {'db_table': "'django_site'"},
'domain': ('models.CharField', [], {'max_length': '100'}),
'id': ('models.AutoField', [], {'primary_key': 'True'}),
'name': ('models.CharField', [], {'max_length': '50'})
},
'snippet.snippet': {
'html': ('models.TextField', [], {'blank': 'True'}),
'id': ('models.AutoField', [], {'primary_key': 'True'}),
'name': ('models.CharField', [], {'unique': 'True', 'max_length': '255'})
},
'snippet.snippetptr': {
'Meta': {'db_table': "'cmsplugin_snippetptr'"},
'cmsplugin_ptr': ('models.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'snippet': ('models.ForeignKey', [], {'to': "orm['snippet.Snippet']"})
},
'snippet.snippetptrpublic': {
'cmspluginpublic_ptr': "models.OneToOneField(to=orm['cms.CMSPluginPublic'], unique=True, primary_key=True)",
'mark_delete': 'models.BooleanField(default=False, blank=True)',
'snippet': "models.ForeignKey(to=orm['snippet.Snippet'])"
}
}
complete_apps = ['snippet']
| bsd-3-clause |
vikatory/kbengine | kbe/res/scripts/common/Lib/unittest/test/test_assertions.py | 82 | 16470 | import datetime
import warnings
import weakref
import unittest
from itertools import product
class Test_Assertions(unittest.TestCase):
def test_AlmostEqual(self):
self.assertAlmostEqual(1.00000001, 1.0)
self.assertNotAlmostEqual(1.0000001, 1.0)
self.assertRaises(self.failureException,
self.assertAlmostEqual, 1.0000001, 1.0)
self.assertRaises(self.failureException,
self.assertNotAlmostEqual, 1.00000001, 1.0)
self.assertAlmostEqual(1.1, 1.0, places=0)
self.assertRaises(self.failureException,
self.assertAlmostEqual, 1.1, 1.0, places=1)
self.assertAlmostEqual(0, .1+.1j, places=0)
self.assertNotAlmostEqual(0, .1+.1j, places=1)
self.assertRaises(self.failureException,
self.assertAlmostEqual, 0, .1+.1j, places=1)
self.assertRaises(self.failureException,
self.assertNotAlmostEqual, 0, .1+.1j, places=0)
self.assertAlmostEqual(float('inf'), float('inf'))
self.assertRaises(self.failureException, self.assertNotAlmostEqual,
float('inf'), float('inf'))
def test_AmostEqualWithDelta(self):
self.assertAlmostEqual(1.1, 1.0, delta=0.5)
self.assertAlmostEqual(1.0, 1.1, delta=0.5)
self.assertNotAlmostEqual(1.1, 1.0, delta=0.05)
self.assertNotAlmostEqual(1.0, 1.1, delta=0.05)
self.assertAlmostEqual(1.0, 1.0, delta=0.5)
self.assertRaises(self.failureException, self.assertNotAlmostEqual,
1.0, 1.0, delta=0.5)
self.assertRaises(self.failureException, self.assertAlmostEqual,
1.1, 1.0, delta=0.05)
self.assertRaises(self.failureException, self.assertNotAlmostEqual,
1.1, 1.0, delta=0.5)
self.assertRaises(TypeError, self.assertAlmostEqual,
1.1, 1.0, places=2, delta=2)
self.assertRaises(TypeError, self.assertNotAlmostEqual,
1.1, 1.0, places=2, delta=2)
first = datetime.datetime.now()
second = first + datetime.timedelta(seconds=10)
self.assertAlmostEqual(first, second,
delta=datetime.timedelta(seconds=20))
self.assertNotAlmostEqual(first, second,
delta=datetime.timedelta(seconds=5))
def test_assertRaises(self):
def _raise(e):
raise e
self.assertRaises(KeyError, _raise, KeyError)
self.assertRaises(KeyError, _raise, KeyError("key"))
try:
self.assertRaises(KeyError, lambda: None)
except self.failureException as e:
self.assertIn("KeyError not raised", str(e))
else:
self.fail("assertRaises() didn't fail")
try:
self.assertRaises(KeyError, _raise, ValueError)
except ValueError:
pass
else:
self.fail("assertRaises() didn't let exception pass through")
with self.assertRaises(KeyError) as cm:
try:
raise KeyError
except Exception as e:
exc = e
raise
self.assertIs(cm.exception, exc)
with self.assertRaises(KeyError):
raise KeyError("key")
try:
with self.assertRaises(KeyError):
pass
except self.failureException as e:
self.assertIn("KeyError not raised", str(e))
else:
self.fail("assertRaises() didn't fail")
try:
with self.assertRaises(KeyError):
raise ValueError
except ValueError:
pass
else:
self.fail("assertRaises() didn't let exception pass through")
def test_assertRaises_frames_survival(self):
# Issue #9815: assertRaises should avoid keeping local variables
# in a traceback alive.
class A:
pass
wr = None
class Foo(unittest.TestCase):
def foo(self):
nonlocal wr
a = A()
wr = weakref.ref(a)
try:
raise IOError
except IOError:
raise ValueError
def test_functional(self):
self.assertRaises(ValueError, self.foo)
def test_with(self):
with self.assertRaises(ValueError):
self.foo()
Foo("test_functional").run()
self.assertIsNone(wr())
Foo("test_with").run()
self.assertIsNone(wr())
def testAssertNotRegex(self):
self.assertNotRegex('Ala ma kota', r'r+')
try:
self.assertNotRegex('Ala ma kota', r'k.t', 'Message')
except self.failureException as e:
self.assertIn("'kot'", e.args[0])
self.assertIn('Message', e.args[0])
else:
self.fail('assertNotRegex should have failed.')
class TestLongMessage(unittest.TestCase):
"""Test that the individual asserts honour longMessage.
This actually tests all the message behaviour for
asserts that use longMessage."""
def setUp(self):
class TestableTestFalse(unittest.TestCase):
longMessage = False
failureException = self.failureException
def testTest(self):
pass
class TestableTestTrue(unittest.TestCase):
longMessage = True
failureException = self.failureException
def testTest(self):
pass
self.testableTrue = TestableTestTrue('testTest')
self.testableFalse = TestableTestFalse('testTest')
def testDefault(self):
self.assertTrue(unittest.TestCase.longMessage)
def test_formatMsg(self):
self.assertEqual(self.testableFalse._formatMessage(None, "foo"), "foo")
self.assertEqual(self.testableFalse._formatMessage("foo", "bar"), "foo")
self.assertEqual(self.testableTrue._formatMessage(None, "foo"), "foo")
self.assertEqual(self.testableTrue._formatMessage("foo", "bar"), "bar : foo")
# This blows up if _formatMessage uses string concatenation
self.testableTrue._formatMessage(object(), 'foo')
def test_formatMessage_unicode_error(self):
one = ''.join(chr(i) for i in range(255))
# this used to cause a UnicodeDecodeError constructing msg
self.testableTrue._formatMessage(one, '\uFFFD')
def assertMessages(self, methodName, args, errors):
"""
Check that methodName(*args) raises the correct error messages.
errors should be a list of 4 regex that match the error when:
1) longMessage = False and no msg passed;
2) longMessage = False and msg passed;
3) longMessage = True and no msg passed;
4) longMessage = True and msg passed;
"""
def getMethod(i):
useTestableFalse = i < 2
if useTestableFalse:
test = self.testableFalse
else:
test = self.testableTrue
return getattr(test, methodName)
for i, expected_regex in enumerate(errors):
testMethod = getMethod(i)
kwargs = {}
withMsg = i % 2
if withMsg:
kwargs = {"msg": "oops"}
with self.assertRaisesRegex(self.failureException,
expected_regex=expected_regex):
testMethod(*args, **kwargs)
def testAssertTrue(self):
self.assertMessages('assertTrue', (False,),
["^False is not true$", "^oops$", "^False is not true$",
"^False is not true : oops$"])
def testAssertFalse(self):
self.assertMessages('assertFalse', (True,),
["^True is not false$", "^oops$", "^True is not false$",
"^True is not false : oops$"])
def testNotEqual(self):
self.assertMessages('assertNotEqual', (1, 1),
["^1 == 1$", "^oops$", "^1 == 1$",
"^1 == 1 : oops$"])
def testAlmostEqual(self):
self.assertMessages('assertAlmostEqual', (1, 2),
["^1 != 2 within 7 places$", "^oops$",
"^1 != 2 within 7 places$", "^1 != 2 within 7 places : oops$"])
def testNotAlmostEqual(self):
self.assertMessages('assertNotAlmostEqual', (1, 1),
["^1 == 1 within 7 places$", "^oops$",
"^1 == 1 within 7 places$", "^1 == 1 within 7 places : oops$"])
def test_baseAssertEqual(self):
self.assertMessages('_baseAssertEqual', (1, 2),
["^1 != 2$", "^oops$", "^1 != 2$", "^1 != 2 : oops$"])
def testAssertSequenceEqual(self):
# Error messages are multiline so not testing on full message
# assertTupleEqual and assertListEqual delegate to this method
self.assertMessages('assertSequenceEqual', ([], [None]),
["\+ \[None\]$", "^oops$", r"\+ \[None\]$",
r"\+ \[None\] : oops$"])
def testAssertSetEqual(self):
self.assertMessages('assertSetEqual', (set(), set([None])),
["None$", "^oops$", "None$",
"None : oops$"])
def testAssertIn(self):
self.assertMessages('assertIn', (None, []),
['^None not found in \[\]$', "^oops$",
'^None not found in \[\]$',
'^None not found in \[\] : oops$'])
def testAssertNotIn(self):
self.assertMessages('assertNotIn', (None, [None]),
['^None unexpectedly found in \[None\]$', "^oops$",
'^None unexpectedly found in \[None\]$',
'^None unexpectedly found in \[None\] : oops$'])
def testAssertDictEqual(self):
self.assertMessages('assertDictEqual', ({}, {'key': 'value'}),
[r"\+ \{'key': 'value'\}$", "^oops$",
"\+ \{'key': 'value'\}$",
"\+ \{'key': 'value'\} : oops$"])
def testAssertDictContainsSubset(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
self.assertMessages('assertDictContainsSubset', ({'key': 'value'}, {}),
["^Missing: 'key'$", "^oops$",
"^Missing: 'key'$",
"^Missing: 'key' : oops$"])
def testAssertMultiLineEqual(self):
self.assertMessages('assertMultiLineEqual', ("", "foo"),
[r"\+ foo$", "^oops$",
r"\+ foo$",
r"\+ foo : oops$"])
def testAssertLess(self):
self.assertMessages('assertLess', (2, 1),
["^2 not less than 1$", "^oops$",
"^2 not less than 1$", "^2 not less than 1 : oops$"])
def testAssertLessEqual(self):
self.assertMessages('assertLessEqual', (2, 1),
["^2 not less than or equal to 1$", "^oops$",
"^2 not less than or equal to 1$",
"^2 not less than or equal to 1 : oops$"])
def testAssertGreater(self):
self.assertMessages('assertGreater', (1, 2),
["^1 not greater than 2$", "^oops$",
"^1 not greater than 2$",
"^1 not greater than 2 : oops$"])
def testAssertGreaterEqual(self):
self.assertMessages('assertGreaterEqual', (1, 2),
["^1 not greater than or equal to 2$", "^oops$",
"^1 not greater than or equal to 2$",
"^1 not greater than or equal to 2 : oops$"])
def testAssertIsNone(self):
self.assertMessages('assertIsNone', ('not None',),
["^'not None' is not None$", "^oops$",
"^'not None' is not None$",
"^'not None' is not None : oops$"])
def testAssertIsNotNone(self):
self.assertMessages('assertIsNotNone', (None,),
["^unexpectedly None$", "^oops$",
"^unexpectedly None$",
"^unexpectedly None : oops$"])
def testAssertIs(self):
self.assertMessages('assertIs', (None, 'foo'),
["^None is not 'foo'$", "^oops$",
"^None is not 'foo'$",
"^None is not 'foo' : oops$"])
def testAssertIsNot(self):
self.assertMessages('assertIsNot', (None, None),
["^unexpectedly identical: None$", "^oops$",
"^unexpectedly identical: None$",
"^unexpectedly identical: None : oops$"])
def assertMessagesCM(self, methodName, args, func, errors):
"""
Check that the correct error messages are raised while executing:
with method(*args):
func()
*errors* should be a list of 4 regex that match the error when:
1) longMessage = False and no msg passed;
2) longMessage = False and msg passed;
3) longMessage = True and no msg passed;
4) longMessage = True and msg passed;
"""
p = product((self.testableFalse, self.testableTrue),
({}, {"msg": "oops"}))
for (cls, kwargs), err in zip(p, errors):
method = getattr(cls, methodName)
with self.assertRaisesRegex(cls.failureException, err):
with method(*args, **kwargs) as cm:
func()
def testAssertRaises(self):
self.assertMessagesCM('assertRaises', (TypeError,), lambda: None,
['^TypeError not raised$', '^oops$',
'^TypeError not raised$',
'^TypeError not raised : oops$'])
def testAssertRaisesRegex(self):
# test error not raised
self.assertMessagesCM('assertRaisesRegex', (TypeError, 'unused regex'),
lambda: None,
['^TypeError not raised$', '^oops$',
'^TypeError not raised$',
'^TypeError not raised : oops$'])
# test error raised but with wrong message
def raise_wrong_message():
raise TypeError('foo')
self.assertMessagesCM('assertRaisesRegex', (TypeError, 'regex'),
raise_wrong_message,
['^"regex" does not match "foo"$', '^oops$',
'^"regex" does not match "foo"$',
'^"regex" does not match "foo" : oops$'])
def testAssertWarns(self):
self.assertMessagesCM('assertWarns', (UserWarning,), lambda: None,
['^UserWarning not triggered$', '^oops$',
'^UserWarning not triggered$',
'^UserWarning not triggered : oops$'])
def testAssertWarnsRegex(self):
# test error not raised
self.assertMessagesCM('assertWarnsRegex', (UserWarning, 'unused regex'),
lambda: None,
['^UserWarning not triggered$', '^oops$',
'^UserWarning not triggered$',
'^UserWarning not triggered : oops$'])
# test warning raised but with wrong message
def raise_wrong_message():
warnings.warn('foo')
self.assertMessagesCM('assertWarnsRegex', (UserWarning, 'regex'),
raise_wrong_message,
['^"regex" does not match "foo"$', '^oops$',
'^"regex" does not match "foo"$',
'^"regex" does not match "foo" : oops$'])
if __name__ == "__main__":
unittest.main()
| lgpl-3.0 |
anirudhSK/chromium | chrome/common/extensions/docs/examples/apps/hello-python/httplib2/__init__.py | 451 | 51082 | from __future__ import generators
"""
httplib2
A caching http interface that supports ETags and gzip
to conserve bandwidth.
Requires Python 2.3 or later
Changelog:
2007-08-18, Rick: Modified so it's able to use a socks proxy if needed.
"""
__author__ = "Joe Gregorio (joe@bitworking.org)"
__copyright__ = "Copyright 2006, Joe Gregorio"
__contributors__ = ["Thomas Broyer (t.broyer@ltgt.net)",
"James Antill",
"Xavier Verges Farrero",
"Jonathan Feinberg",
"Blair Zajac",
"Sam Ruby",
"Louis Nyffenegger"]
__license__ = "MIT"
__version__ = "$Rev$"
import re
import sys
import email
import email.Utils
import email.Message
import email.FeedParser
import StringIO
import gzip
import zlib
import httplib
import urlparse
import base64
import os
import copy
import calendar
import time
import random
# remove depracated warning in python2.6
try:
from hashlib import sha1 as _sha, md5 as _md5
except ImportError:
import sha
import md5
_sha = sha.new
_md5 = md5.new
import hmac
from gettext import gettext as _
import socket
try:
import socks
except ImportError:
socks = None
# Build the appropriate socket wrapper for ssl
try:
import ssl # python 2.6
_ssl_wrap_socket = ssl.wrap_socket
except ImportError:
def _ssl_wrap_socket(sock, key_file, cert_file):
ssl_sock = socket.ssl(sock, key_file, cert_file)
return httplib.FakeSocket(sock, ssl_sock)
if sys.version_info >= (2,3):
from iri2uri import iri2uri
else:
def iri2uri(uri):
return uri
def has_timeout(timeout): # python 2.6
if hasattr(socket, '_GLOBAL_DEFAULT_TIMEOUT'):
return (timeout is not None and timeout is not socket._GLOBAL_DEFAULT_TIMEOUT)
return (timeout is not None)
__all__ = ['Http', 'Response', 'ProxyInfo', 'HttpLib2Error',
'RedirectMissingLocation', 'RedirectLimit', 'FailedToDecompressContent',
'UnimplementedDigestAuthOptionError', 'UnimplementedHmacDigestAuthOptionError',
'debuglevel']
# The httplib debug level, set to a non-zero value to get debug output
debuglevel = 0
# Python 2.3 support
if sys.version_info < (2,4):
def sorted(seq):
seq.sort()
return seq
# Python 2.3 support
def HTTPResponse__getheaders(self):
"""Return list of (header, value) tuples."""
if self.msg is None:
raise httplib.ResponseNotReady()
return self.msg.items()
if not hasattr(httplib.HTTPResponse, 'getheaders'):
httplib.HTTPResponse.getheaders = HTTPResponse__getheaders
# All exceptions raised here derive from HttpLib2Error
class HttpLib2Error(Exception): pass
# Some exceptions can be caught and optionally
# be turned back into responses.
class HttpLib2ErrorWithResponse(HttpLib2Error):
def __init__(self, desc, response, content):
self.response = response
self.content = content
HttpLib2Error.__init__(self, desc)
class RedirectMissingLocation(HttpLib2ErrorWithResponse): pass
class RedirectLimit(HttpLib2ErrorWithResponse): pass
class FailedToDecompressContent(HttpLib2ErrorWithResponse): pass
class UnimplementedDigestAuthOptionError(HttpLib2ErrorWithResponse): pass
class UnimplementedHmacDigestAuthOptionError(HttpLib2ErrorWithResponse): pass
class RelativeURIError(HttpLib2Error): pass
class ServerNotFoundError(HttpLib2Error): pass
# Open Items:
# -----------
# Proxy support
# Are we removing the cached content too soon on PUT (only delete on 200 Maybe?)
# Pluggable cache storage (supports storing the cache in
# flat files by default. We need a plug-in architecture
# that can support Berkeley DB and Squid)
# == Known Issues ==
# Does not handle a resource that uses conneg and Last-Modified but no ETag as a cache validator.
# Does not handle Cache-Control: max-stale
# Does not use Age: headers when calculating cache freshness.
# The number of redirections to follow before giving up.
# Note that only GET redirects are automatically followed.
# Will also honor 301 requests by saving that info and never
# requesting that URI again.
DEFAULT_MAX_REDIRECTS = 5
# Which headers are hop-by-hop headers by default
HOP_BY_HOP = ['connection', 'keep-alive', 'proxy-authenticate', 'proxy-authorization', 'te', 'trailers', 'transfer-encoding', 'upgrade']
def _get_end2end_headers(response):
hopbyhop = list(HOP_BY_HOP)
hopbyhop.extend([x.strip() for x in response.get('connection', '').split(',')])
return [header for header in response.keys() if header not in hopbyhop]
URI = re.compile(r"^(([^:/?#]+):)?(//([^/?#]*))?([^?#]*)(\?([^#]*))?(#(.*))?")
def parse_uri(uri):
"""Parses a URI using the regex given in Appendix B of RFC 3986.
(scheme, authority, path, query, fragment) = parse_uri(uri)
"""
groups = URI.match(uri).groups()
return (groups[1], groups[3], groups[4], groups[6], groups[8])
def urlnorm(uri):
(scheme, authority, path, query, fragment) = parse_uri(uri)
if not scheme or not authority:
raise RelativeURIError("Only absolute URIs are allowed. uri = %s" % uri)
authority = authority.lower()
scheme = scheme.lower()
if not path:
path = "/"
# Could do syntax based normalization of the URI before
# computing the digest. See Section 6.2.2 of Std 66.
request_uri = query and "?".join([path, query]) or path
scheme = scheme.lower()
defrag_uri = scheme + "://" + authority + request_uri
return scheme, authority, request_uri, defrag_uri
# Cache filename construction (original borrowed from Venus http://intertwingly.net/code/venus/)
re_url_scheme = re.compile(r'^\w+://')
re_slash = re.compile(r'[?/:|]+')
def safename(filename):
"""Return a filename suitable for the cache.
Strips dangerous and common characters to create a filename we
can use to store the cache in.
"""
try:
if re_url_scheme.match(filename):
if isinstance(filename,str):
filename = filename.decode('utf-8')
filename = filename.encode('idna')
else:
filename = filename.encode('idna')
except UnicodeError:
pass
if isinstance(filename,unicode):
filename=filename.encode('utf-8')
filemd5 = _md5(filename).hexdigest()
filename = re_url_scheme.sub("", filename)
filename = re_slash.sub(",", filename)
# limit length of filename
if len(filename)>200:
filename=filename[:200]
return ",".join((filename, filemd5))
NORMALIZE_SPACE = re.compile(r'(?:\r\n)?[ \t]+')
def _normalize_headers(headers):
return dict([ (key.lower(), NORMALIZE_SPACE.sub(value, ' ').strip()) for (key, value) in headers.iteritems()])
def _parse_cache_control(headers):
retval = {}
if headers.has_key('cache-control'):
parts = headers['cache-control'].split(',')
parts_with_args = [tuple([x.strip().lower() for x in part.split("=", 1)]) for part in parts if -1 != part.find("=")]
parts_wo_args = [(name.strip().lower(), 1) for name in parts if -1 == name.find("=")]
retval = dict(parts_with_args + parts_wo_args)
return retval
# Whether to use a strict mode to parse WWW-Authenticate headers
# Might lead to bad results in case of ill-formed header value,
# so disabled by default, falling back to relaxed parsing.
# Set to true to turn on, usefull for testing servers.
USE_WWW_AUTH_STRICT_PARSING = 0
# In regex below:
# [^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+ matches a "token" as defined by HTTP
# "(?:[^\0-\x08\x0A-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?" matches a "quoted-string" as defined by HTTP, when LWS have already been replaced by a single space
# Actually, as an auth-param value can be either a token or a quoted-string, they are combined in a single pattern which matches both:
# \"?((?<=\")(?:[^\0-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?(?=\")|(?<!\")[^\0-\x08\x0A-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+(?!\"))\"?
WWW_AUTH_STRICT = re.compile(r"^(?:\s*(?:,\s*)?([^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+)\s*=\s*\"?((?<=\")(?:[^\0-\x08\x0A-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?(?=\")|(?<!\")[^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+(?!\"))\"?)(.*)$")
WWW_AUTH_RELAXED = re.compile(r"^(?:\s*(?:,\s*)?([^ \t\r\n=]+)\s*=\s*\"?((?<=\")(?:[^\\\"]|\\.)*?(?=\")|(?<!\")[^ \t\r\n,]+(?!\"))\"?)(.*)$")
UNQUOTE_PAIRS = re.compile(r'\\(.)')
def _parse_www_authenticate(headers, headername='www-authenticate'):
"""Returns a dictionary of dictionaries, one dict
per auth_scheme."""
retval = {}
if headers.has_key(headername):
authenticate = headers[headername].strip()
www_auth = USE_WWW_AUTH_STRICT_PARSING and WWW_AUTH_STRICT or WWW_AUTH_RELAXED
while authenticate:
# Break off the scheme at the beginning of the line
if headername == 'authentication-info':
(auth_scheme, the_rest) = ('digest', authenticate)
else:
(auth_scheme, the_rest) = authenticate.split(" ", 1)
# Now loop over all the key value pairs that come after the scheme,
# being careful not to roll into the next scheme
match = www_auth.search(the_rest)
auth_params = {}
while match:
if match and len(match.groups()) == 3:
(key, value, the_rest) = match.groups()
auth_params[key.lower()] = UNQUOTE_PAIRS.sub(r'\1', value) # '\\'.join([x.replace('\\', '') for x in value.split('\\\\')])
match = www_auth.search(the_rest)
retval[auth_scheme.lower()] = auth_params
authenticate = the_rest.strip()
return retval
def _entry_disposition(response_headers, request_headers):
"""Determine freshness from the Date, Expires and Cache-Control headers.
We don't handle the following:
1. Cache-Control: max-stale
2. Age: headers are not used in the calculations.
Not that this algorithm is simpler than you might think
because we are operating as a private (non-shared) cache.
This lets us ignore 's-maxage'. We can also ignore
'proxy-invalidate' since we aren't a proxy.
We will never return a stale document as
fresh as a design decision, and thus the non-implementation
of 'max-stale'. This also lets us safely ignore 'must-revalidate'
since we operate as if every server has sent 'must-revalidate'.
Since we are private we get to ignore both 'public' and
'private' parameters. We also ignore 'no-transform' since
we don't do any transformations.
The 'no-store' parameter is handled at a higher level.
So the only Cache-Control parameters we look at are:
no-cache
only-if-cached
max-age
min-fresh
"""
retval = "STALE"
cc = _parse_cache_control(request_headers)
cc_response = _parse_cache_control(response_headers)
if request_headers.has_key('pragma') and request_headers['pragma'].lower().find('no-cache') != -1:
retval = "TRANSPARENT"
if 'cache-control' not in request_headers:
request_headers['cache-control'] = 'no-cache'
elif cc.has_key('no-cache'):
retval = "TRANSPARENT"
elif cc_response.has_key('no-cache'):
retval = "STALE"
elif cc.has_key('only-if-cached'):
retval = "FRESH"
elif response_headers.has_key('date'):
date = calendar.timegm(email.Utils.parsedate_tz(response_headers['date']))
now = time.time()
current_age = max(0, now - date)
if cc_response.has_key('max-age'):
try:
freshness_lifetime = int(cc_response['max-age'])
except ValueError:
freshness_lifetime = 0
elif response_headers.has_key('expires'):
expires = email.Utils.parsedate_tz(response_headers['expires'])
if None == expires:
freshness_lifetime = 0
else:
freshness_lifetime = max(0, calendar.timegm(expires) - date)
else:
freshness_lifetime = 0
if cc.has_key('max-age'):
try:
freshness_lifetime = int(cc['max-age'])
except ValueError:
freshness_lifetime = 0
if cc.has_key('min-fresh'):
try:
min_fresh = int(cc['min-fresh'])
except ValueError:
min_fresh = 0
current_age += min_fresh
if freshness_lifetime > current_age:
retval = "FRESH"
return retval
def _decompressContent(response, new_content):
content = new_content
try:
encoding = response.get('content-encoding', None)
if encoding in ['gzip', 'deflate']:
if encoding == 'gzip':
content = gzip.GzipFile(fileobj=StringIO.StringIO(new_content)).read()
if encoding == 'deflate':
content = zlib.decompress(content)
response['content-length'] = str(len(content))
# Record the historical presence of the encoding in a way the won't interfere.
response['-content-encoding'] = response['content-encoding']
del response['content-encoding']
except IOError:
content = ""
raise FailedToDecompressContent(_("Content purported to be compressed with %s but failed to decompress.") % response.get('content-encoding'), response, content)
return content
def _updateCache(request_headers, response_headers, content, cache, cachekey):
if cachekey:
cc = _parse_cache_control(request_headers)
cc_response = _parse_cache_control(response_headers)
if cc.has_key('no-store') or cc_response.has_key('no-store'):
cache.delete(cachekey)
else:
info = email.Message.Message()
for key, value in response_headers.iteritems():
if key not in ['status','content-encoding','transfer-encoding']:
info[key] = value
# Add annotations to the cache to indicate what headers
# are variant for this request.
vary = response_headers.get('vary', None)
if vary:
vary_headers = vary.lower().replace(' ', '').split(',')
for header in vary_headers:
key = '-varied-%s' % header
try:
info[key] = request_headers[header]
except KeyError:
pass
status = response_headers.status
if status == 304:
status = 200
status_header = 'status: %d\r\n' % response_headers.status
header_str = info.as_string()
header_str = re.sub("\r(?!\n)|(?<!\r)\n", "\r\n", header_str)
text = "".join([status_header, header_str, content])
cache.set(cachekey, text)
def _cnonce():
dig = _md5("%s:%s" % (time.ctime(), ["0123456789"[random.randrange(0, 9)] for i in range(20)])).hexdigest()
return dig[:16]
def _wsse_username_token(cnonce, iso_now, password):
return base64.b64encode(_sha("%s%s%s" % (cnonce, iso_now, password)).digest()).strip()
# For credentials we need two things, first
# a pool of credential to try (not necesarily tied to BAsic, Digest, etc.)
# Then we also need a list of URIs that have already demanded authentication
# That list is tricky since sub-URIs can take the same auth, or the
# auth scheme may change as you descend the tree.
# So we also need each Auth instance to be able to tell us
# how close to the 'top' it is.
class Authentication(object):
def __init__(self, credentials, host, request_uri, headers, response, content, http):
(scheme, authority, path, query, fragment) = parse_uri(request_uri)
self.path = path
self.host = host
self.credentials = credentials
self.http = http
def depth(self, request_uri):
(scheme, authority, path, query, fragment) = parse_uri(request_uri)
return request_uri[len(self.path):].count("/")
def inscope(self, host, request_uri):
# XXX Should we normalize the request_uri?
(scheme, authority, path, query, fragment) = parse_uri(request_uri)
return (host == self.host) and path.startswith(self.path)
def request(self, method, request_uri, headers, content):
"""Modify the request headers to add the appropriate
Authorization header. Over-rise this in sub-classes."""
pass
def response(self, response, content):
"""Gives us a chance to update with new nonces
or such returned from the last authorized response.
Over-rise this in sub-classes if necessary.
Return TRUE is the request is to be retried, for
example Digest may return stale=true.
"""
return False
class BasicAuthentication(Authentication):
def __init__(self, credentials, host, request_uri, headers, response, content, http):
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
def request(self, method, request_uri, headers, content):
"""Modify the request headers to add the appropriate
Authorization header."""
headers['authorization'] = 'Basic ' + base64.b64encode("%s:%s" % self.credentials).strip()
class DigestAuthentication(Authentication):
"""Only do qop='auth' and MD5, since that
is all Apache currently implements"""
def __init__(self, credentials, host, request_uri, headers, response, content, http):
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
challenge = _parse_www_authenticate(response, 'www-authenticate')
self.challenge = challenge['digest']
qop = self.challenge.get('qop', 'auth')
self.challenge['qop'] = ('auth' in [x.strip() for x in qop.split()]) and 'auth' or None
if self.challenge['qop'] is None:
raise UnimplementedDigestAuthOptionError( _("Unsupported value for qop: %s." % qop))
self.challenge['algorithm'] = self.challenge.get('algorithm', 'MD5').upper()
if self.challenge['algorithm'] != 'MD5':
raise UnimplementedDigestAuthOptionError( _("Unsupported value for algorithm: %s." % self.challenge['algorithm']))
self.A1 = "".join([self.credentials[0], ":", self.challenge['realm'], ":", self.credentials[1]])
self.challenge['nc'] = 1
def request(self, method, request_uri, headers, content, cnonce = None):
"""Modify the request headers"""
H = lambda x: _md5(x).hexdigest()
KD = lambda s, d: H("%s:%s" % (s, d))
A2 = "".join([method, ":", request_uri])
self.challenge['cnonce'] = cnonce or _cnonce()
request_digest = '"%s"' % KD(H(self.A1), "%s:%s:%s:%s:%s" % (self.challenge['nonce'],
'%08x' % self.challenge['nc'],
self.challenge['cnonce'],
self.challenge['qop'], H(A2)
))
headers['Authorization'] = 'Digest username="%s", realm="%s", nonce="%s", uri="%s", algorithm=%s, response=%s, qop=%s, nc=%08x, cnonce="%s"' % (
self.credentials[0],
self.challenge['realm'],
self.challenge['nonce'],
request_uri,
self.challenge['algorithm'],
request_digest,
self.challenge['qop'],
self.challenge['nc'],
self.challenge['cnonce'],
)
self.challenge['nc'] += 1
def response(self, response, content):
if not response.has_key('authentication-info'):
challenge = _parse_www_authenticate(response, 'www-authenticate').get('digest', {})
if 'true' == challenge.get('stale'):
self.challenge['nonce'] = challenge['nonce']
self.challenge['nc'] = 1
return True
else:
updated_challenge = _parse_www_authenticate(response, 'authentication-info').get('digest', {})
if updated_challenge.has_key('nextnonce'):
self.challenge['nonce'] = updated_challenge['nextnonce']
self.challenge['nc'] = 1
return False
class HmacDigestAuthentication(Authentication):
"""Adapted from Robert Sayre's code and DigestAuthentication above."""
__author__ = "Thomas Broyer (t.broyer@ltgt.net)"
def __init__(self, credentials, host, request_uri, headers, response, content, http):
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
challenge = _parse_www_authenticate(response, 'www-authenticate')
self.challenge = challenge['hmacdigest']
# TODO: self.challenge['domain']
self.challenge['reason'] = self.challenge.get('reason', 'unauthorized')
if self.challenge['reason'] not in ['unauthorized', 'integrity']:
self.challenge['reason'] = 'unauthorized'
self.challenge['salt'] = self.challenge.get('salt', '')
if not self.challenge.get('snonce'):
raise UnimplementedHmacDigestAuthOptionError( _("The challenge doesn't contain a server nonce, or this one is empty."))
self.challenge['algorithm'] = self.challenge.get('algorithm', 'HMAC-SHA-1')
if self.challenge['algorithm'] not in ['HMAC-SHA-1', 'HMAC-MD5']:
raise UnimplementedHmacDigestAuthOptionError( _("Unsupported value for algorithm: %s." % self.challenge['algorithm']))
self.challenge['pw-algorithm'] = self.challenge.get('pw-algorithm', 'SHA-1')
if self.challenge['pw-algorithm'] not in ['SHA-1', 'MD5']:
raise UnimplementedHmacDigestAuthOptionError( _("Unsupported value for pw-algorithm: %s." % self.challenge['pw-algorithm']))
if self.challenge['algorithm'] == 'HMAC-MD5':
self.hashmod = _md5
else:
self.hashmod = _sha
if self.challenge['pw-algorithm'] == 'MD5':
self.pwhashmod = _md5
else:
self.pwhashmod = _sha
self.key = "".join([self.credentials[0], ":",
self.pwhashmod.new("".join([self.credentials[1], self.challenge['salt']])).hexdigest().lower(),
":", self.challenge['realm']
])
self.key = self.pwhashmod.new(self.key).hexdigest().lower()
def request(self, method, request_uri, headers, content):
"""Modify the request headers"""
keys = _get_end2end_headers(headers)
keylist = "".join(["%s " % k for k in keys])
headers_val = "".join([headers[k] for k in keys])
created = time.strftime('%Y-%m-%dT%H:%M:%SZ',time.gmtime())
cnonce = _cnonce()
request_digest = "%s:%s:%s:%s:%s" % (method, request_uri, cnonce, self.challenge['snonce'], headers_val)
request_digest = hmac.new(self.key, request_digest, self.hashmod).hexdigest().lower()
headers['Authorization'] = 'HMACDigest username="%s", realm="%s", snonce="%s", cnonce="%s", uri="%s", created="%s", response="%s", headers="%s"' % (
self.credentials[0],
self.challenge['realm'],
self.challenge['snonce'],
cnonce,
request_uri,
created,
request_digest,
keylist,
)
def response(self, response, content):
challenge = _parse_www_authenticate(response, 'www-authenticate').get('hmacdigest', {})
if challenge.get('reason') in ['integrity', 'stale']:
return True
return False
class WsseAuthentication(Authentication):
"""This is thinly tested and should not be relied upon.
At this time there isn't any third party server to test against.
Blogger and TypePad implemented this algorithm at one point
but Blogger has since switched to Basic over HTTPS and
TypePad has implemented it wrong, by never issuing a 401
challenge but instead requiring your client to telepathically know that
their endpoint is expecting WSSE profile="UsernameToken"."""
def __init__(self, credentials, host, request_uri, headers, response, content, http):
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
def request(self, method, request_uri, headers, content):
"""Modify the request headers to add the appropriate
Authorization header."""
headers['Authorization'] = 'WSSE profile="UsernameToken"'
iso_now = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
cnonce = _cnonce()
password_digest = _wsse_username_token(cnonce, iso_now, self.credentials[1])
headers['X-WSSE'] = 'UsernameToken Username="%s", PasswordDigest="%s", Nonce="%s", Created="%s"' % (
self.credentials[0],
password_digest,
cnonce,
iso_now)
class GoogleLoginAuthentication(Authentication):
def __init__(self, credentials, host, request_uri, headers, response, content, http):
from urllib import urlencode
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
challenge = _parse_www_authenticate(response, 'www-authenticate')
service = challenge['googlelogin'].get('service', 'xapi')
# Bloggger actually returns the service in the challenge
# For the rest we guess based on the URI
if service == 'xapi' and request_uri.find("calendar") > 0:
service = "cl"
# No point in guessing Base or Spreadsheet
#elif request_uri.find("spreadsheets") > 0:
# service = "wise"
auth = dict(Email=credentials[0], Passwd=credentials[1], service=service, source=headers['user-agent'])
resp, content = self.http.request("https://www.google.com/accounts/ClientLogin", method="POST", body=urlencode(auth), headers={'Content-Type': 'application/x-www-form-urlencoded'})
lines = content.split('\n')
d = dict([tuple(line.split("=", 1)) for line in lines if line])
if resp.status == 403:
self.Auth = ""
else:
self.Auth = d['Auth']
def request(self, method, request_uri, headers, content):
"""Modify the request headers to add the appropriate
Authorization header."""
headers['authorization'] = 'GoogleLogin Auth=' + self.Auth
AUTH_SCHEME_CLASSES = {
"basic": BasicAuthentication,
"wsse": WsseAuthentication,
"digest": DigestAuthentication,
"hmacdigest": HmacDigestAuthentication,
"googlelogin": GoogleLoginAuthentication
}
AUTH_SCHEME_ORDER = ["hmacdigest", "googlelogin", "digest", "wsse", "basic"]
class FileCache(object):
"""Uses a local directory as a store for cached files.
Not really safe to use if multiple threads or processes are going to
be running on the same cache.
"""
def __init__(self, cache, safe=safename): # use safe=lambda x: md5.new(x).hexdigest() for the old behavior
self.cache = cache
self.safe = safe
if not os.path.exists(cache):
os.makedirs(self.cache)
def get(self, key):
retval = None
cacheFullPath = os.path.join(self.cache, self.safe(key))
try:
f = file(cacheFullPath, "rb")
retval = f.read()
f.close()
except IOError:
pass
return retval
def set(self, key, value):
cacheFullPath = os.path.join(self.cache, self.safe(key))
f = file(cacheFullPath, "wb")
f.write(value)
f.close()
def delete(self, key):
cacheFullPath = os.path.join(self.cache, self.safe(key))
if os.path.exists(cacheFullPath):
os.remove(cacheFullPath)
class Credentials(object):
def __init__(self):
self.credentials = []
def add(self, name, password, domain=""):
self.credentials.append((domain.lower(), name, password))
def clear(self):
self.credentials = []
def iter(self, domain):
for (cdomain, name, password) in self.credentials:
if cdomain == "" or domain == cdomain:
yield (name, password)
class KeyCerts(Credentials):
"""Identical to Credentials except that
name/password are mapped to key/cert."""
pass
class ProxyInfo(object):
"""Collect information required to use a proxy."""
def __init__(self, proxy_type, proxy_host, proxy_port, proxy_rdns=None, proxy_user=None, proxy_pass=None):
"""The parameter proxy_type must be set to one of socks.PROXY_TYPE_XXX
constants. For example:
p = ProxyInfo(proxy_type=socks.PROXY_TYPE_HTTP, proxy_host='localhost', proxy_port=8000)
"""
self.proxy_type, self.proxy_host, self.proxy_port, self.proxy_rdns, self.proxy_user, self.proxy_pass = proxy_type, proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass
def astuple(self):
return (self.proxy_type, self.proxy_host, self.proxy_port, self.proxy_rdns,
self.proxy_user, self.proxy_pass)
def isgood(self):
return socks and (self.proxy_host != None) and (self.proxy_port != None)
class HTTPConnectionWithTimeout(httplib.HTTPConnection):
"""HTTPConnection subclass that supports timeouts"""
def __init__(self, host, port=None, strict=None, timeout=None, proxy_info=None):
httplib.HTTPConnection.__init__(self, host, port, strict)
self.timeout = timeout
self.proxy_info = proxy_info
def connect(self):
"""Connect to the host and port specified in __init__."""
# Mostly verbatim from httplib.py.
msg = "getaddrinfo returns an empty list"
for res in socket.getaddrinfo(self.host, self.port, 0,
socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
try:
if self.proxy_info and self.proxy_info.isgood():
self.sock = socks.socksocket(af, socktype, proto)
self.sock.setproxy(*self.proxy_info.astuple())
else:
self.sock = socket.socket(af, socktype, proto)
# Different from httplib: support timeouts.
if has_timeout(self.timeout):
self.sock.settimeout(self.timeout)
# End of difference from httplib.
if self.debuglevel > 0:
print "connect: (%s, %s)" % (self.host, self.port)
self.sock.connect(sa)
except socket.error, msg:
if self.debuglevel > 0:
print 'connect fail:', (self.host, self.port)
if self.sock:
self.sock.close()
self.sock = None
continue
break
if not self.sock:
raise socket.error, msg
class HTTPSConnectionWithTimeout(httplib.HTTPSConnection):
"This class allows communication via SSL."
def __init__(self, host, port=None, key_file=None, cert_file=None,
strict=None, timeout=None, proxy_info=None):
httplib.HTTPSConnection.__init__(self, host, port=port, key_file=key_file,
cert_file=cert_file, strict=strict)
self.timeout = timeout
self.proxy_info = proxy_info
def connect(self):
"Connect to a host on a given (SSL) port."
if self.proxy_info and self.proxy_info.isgood():
sock = socks.socksocket(socket.AF_INET, socket.SOCK_STREAM)
sock.setproxy(*self.proxy_info.astuple())
else:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if has_timeout(self.timeout):
sock.settimeout(self.timeout)
sock.connect((self.host, self.port))
self.sock =_ssl_wrap_socket(sock, self.key_file, self.cert_file)
class Http(object):
"""An HTTP client that handles:
- all methods
- caching
- ETags
- compression,
- HTTPS
- Basic
- Digest
- WSSE
and more.
"""
def __init__(self, cache=None, timeout=None, proxy_info=None):
"""The value of proxy_info is a ProxyInfo instance.
If 'cache' is a string then it is used as a directory name
for a disk cache. Otherwise it must be an object that supports
the same interface as FileCache."""
self.proxy_info = proxy_info
# Map domain name to an httplib connection
self.connections = {}
# The location of the cache, for now a directory
# where cached responses are held.
if cache and isinstance(cache, str):
self.cache = FileCache(cache)
else:
self.cache = cache
# Name/password
self.credentials = Credentials()
# Key/cert
self.certificates = KeyCerts()
# authorization objects
self.authorizations = []
# If set to False then no redirects are followed, even safe ones.
self.follow_redirects = True
# Which HTTP methods do we apply optimistic concurrency to, i.e.
# which methods get an "if-match:" etag header added to them.
self.optimistic_concurrency_methods = ["PUT"]
# If 'follow_redirects' is True, and this is set to True then
# all redirecs are followed, including unsafe ones.
self.follow_all_redirects = False
self.ignore_etag = False
self.force_exception_to_status_code = False
self.timeout = timeout
def _auth_from_challenge(self, host, request_uri, headers, response, content):
"""A generator that creates Authorization objects
that can be applied to requests.
"""
challenges = _parse_www_authenticate(response, 'www-authenticate')
for cred in self.credentials.iter(host):
for scheme in AUTH_SCHEME_ORDER:
if challenges.has_key(scheme):
yield AUTH_SCHEME_CLASSES[scheme](cred, host, request_uri, headers, response, content, self)
def add_credentials(self, name, password, domain=""):
"""Add a name and password that will be used
any time a request requires authentication."""
self.credentials.add(name, password, domain)
def add_certificate(self, key, cert, domain):
"""Add a key and cert that will be used
any time a request requires authentication."""
self.certificates.add(key, cert, domain)
def clear_credentials(self):
"""Remove all the names and passwords
that are used for authentication"""
self.credentials.clear()
self.authorizations = []
def _conn_request(self, conn, request_uri, method, body, headers):
for i in range(2):
try:
conn.request(method, request_uri, body, headers)
except socket.gaierror:
conn.close()
raise ServerNotFoundError("Unable to find the server at %s" % conn.host)
except (socket.error, httplib.HTTPException):
# Just because the server closed the connection doesn't apparently mean
# that the server didn't send a response.
pass
try:
response = conn.getresponse()
except (socket.error, httplib.HTTPException):
if i == 0:
conn.close()
conn.connect()
continue
else:
raise
else:
content = ""
if method == "HEAD":
response.close()
else:
content = response.read()
response = Response(response)
if method != "HEAD":
content = _decompressContent(response, content)
break
return (response, content)
def _request(self, conn, host, absolute_uri, request_uri, method, body, headers, redirections, cachekey):
"""Do the actual request using the connection object
and also follow one level of redirects if necessary"""
auths = [(auth.depth(request_uri), auth) for auth in self.authorizations if auth.inscope(host, request_uri)]
auth = auths and sorted(auths)[0][1] or None
if auth:
auth.request(method, request_uri, headers, body)
(response, content) = self._conn_request(conn, request_uri, method, body, headers)
if auth:
if auth.response(response, body):
auth.request(method, request_uri, headers, body)
(response, content) = self._conn_request(conn, request_uri, method, body, headers )
response._stale_digest = 1
if response.status == 401:
for authorization in self._auth_from_challenge(host, request_uri, headers, response, content):
authorization.request(method, request_uri, headers, body)
(response, content) = self._conn_request(conn, request_uri, method, body, headers, )
if response.status != 401:
self.authorizations.append(authorization)
authorization.response(response, body)
break
if (self.follow_all_redirects or (method in ["GET", "HEAD"]) or response.status == 303):
if self.follow_redirects and response.status in [300, 301, 302, 303, 307]:
# Pick out the location header and basically start from the beginning
# remembering first to strip the ETag header and decrement our 'depth'
if redirections:
if not response.has_key('location') and response.status != 300:
raise RedirectMissingLocation( _("Redirected but the response is missing a Location: header."), response, content)
# Fix-up relative redirects (which violate an RFC 2616 MUST)
if response.has_key('location'):
location = response['location']
(scheme, authority, path, query, fragment) = parse_uri(location)
if authority == None:
response['location'] = urlparse.urljoin(absolute_uri, location)
if response.status == 301 and method in ["GET", "HEAD"]:
response['-x-permanent-redirect-url'] = response['location']
if not response.has_key('content-location'):
response['content-location'] = absolute_uri
_updateCache(headers, response, content, self.cache, cachekey)
if headers.has_key('if-none-match'):
del headers['if-none-match']
if headers.has_key('if-modified-since'):
del headers['if-modified-since']
if response.has_key('location'):
location = response['location']
old_response = copy.deepcopy(response)
if not old_response.has_key('content-location'):
old_response['content-location'] = absolute_uri
redirect_method = ((response.status == 303) and (method not in ["GET", "HEAD"])) and "GET" or method
(response, content) = self.request(location, redirect_method, body=body, headers = headers, redirections = redirections - 1)
response.previous = old_response
else:
raise RedirectLimit( _("Redirected more times than rediection_limit allows."), response, content)
elif response.status in [200, 203] and method == "GET":
# Don't cache 206's since we aren't going to handle byte range requests
if not response.has_key('content-location'):
response['content-location'] = absolute_uri
_updateCache(headers, response, content, self.cache, cachekey)
return (response, content)
def _normalize_headers(self, headers):
return _normalize_headers(headers)
# Need to catch and rebrand some exceptions
# Then need to optionally turn all exceptions into status codes
# including all socket.* and httplib.* exceptions.
def request(self, uri, method="GET", body=None, headers=None, redirections=DEFAULT_MAX_REDIRECTS, connection_type=None):
""" Performs a single HTTP request.
The 'uri' is the URI of the HTTP resource and can begin
with either 'http' or 'https'. The value of 'uri' must be an absolute URI.
The 'method' is the HTTP method to perform, such as GET, POST, DELETE, etc.
There is no restriction on the methods allowed.
The 'body' is the entity body to be sent with the request. It is a string
object.
Any extra headers that are to be sent with the request should be provided in the
'headers' dictionary.
The maximum number of redirect to follow before raising an
exception is 'redirections. The default is 5.
The return value is a tuple of (response, content), the first
being and instance of the 'Response' class, the second being
a string that contains the response entity body.
"""
try:
if headers is None:
headers = {}
else:
headers = self._normalize_headers(headers)
if not headers.has_key('user-agent'):
headers['user-agent'] = "Python-httplib2/%s" % __version__
uri = iri2uri(uri)
(scheme, authority, request_uri, defrag_uri) = urlnorm(uri)
domain_port = authority.split(":")[0:2]
if len(domain_port) == 2 and domain_port[1] == '443' and scheme == 'http':
scheme = 'https'
authority = domain_port[0]
conn_key = scheme+":"+authority
if conn_key in self.connections:
conn = self.connections[conn_key]
else:
if not connection_type:
connection_type = (scheme == 'https') and HTTPSConnectionWithTimeout or HTTPConnectionWithTimeout
certs = list(self.certificates.iter(authority))
if scheme == 'https' and certs:
conn = self.connections[conn_key] = connection_type(authority, key_file=certs[0][0],
cert_file=certs[0][1], timeout=self.timeout, proxy_info=self.proxy_info)
else:
conn = self.connections[conn_key] = connection_type(authority, timeout=self.timeout, proxy_info=self.proxy_info)
conn.set_debuglevel(debuglevel)
if method in ["GET", "HEAD"] and 'range' not in headers and 'accept-encoding' not in headers:
headers['accept-encoding'] = 'gzip, deflate'
info = email.Message.Message()
cached_value = None
if self.cache:
cachekey = defrag_uri
cached_value = self.cache.get(cachekey)
if cached_value:
# info = email.message_from_string(cached_value)
#
# Need to replace the line above with the kludge below
# to fix the non-existent bug not fixed in this
# bug report: http://mail.python.org/pipermail/python-bugs-list/2005-September/030289.html
try:
info, content = cached_value.split('\r\n\r\n', 1)
feedparser = email.FeedParser.FeedParser()
feedparser.feed(info)
info = feedparser.close()
feedparser._parse = None
except IndexError:
self.cache.delete(cachekey)
cachekey = None
cached_value = None
else:
cachekey = None
if method in self.optimistic_concurrency_methods and self.cache and info.has_key('etag') and not self.ignore_etag and 'if-match' not in headers:
# http://www.w3.org/1999/04/Editing/
headers['if-match'] = info['etag']
if method not in ["GET", "HEAD"] and self.cache and cachekey:
# RFC 2616 Section 13.10
self.cache.delete(cachekey)
# Check the vary header in the cache to see if this request
# matches what varies in the cache.
if method in ['GET', 'HEAD'] and 'vary' in info:
vary = info['vary']
vary_headers = vary.lower().replace(' ', '').split(',')
for header in vary_headers:
key = '-varied-%s' % header
value = info[key]
if headers.get(header, '') != value:
cached_value = None
break
if cached_value and method in ["GET", "HEAD"] and self.cache and 'range' not in headers:
if info.has_key('-x-permanent-redirect-url'):
# Should cached permanent redirects be counted in our redirection count? For now, yes.
(response, new_content) = self.request(info['-x-permanent-redirect-url'], "GET", headers = headers, redirections = redirections - 1)
response.previous = Response(info)
response.previous.fromcache = True
else:
# Determine our course of action:
# Is the cached entry fresh or stale?
# Has the client requested a non-cached response?
#
# There seems to be three possible answers:
# 1. [FRESH] Return the cache entry w/o doing a GET
# 2. [STALE] Do the GET (but add in cache validators if available)
# 3. [TRANSPARENT] Do a GET w/o any cache validators (Cache-Control: no-cache) on the request
entry_disposition = _entry_disposition(info, headers)
if entry_disposition == "FRESH":
if not cached_value:
info['status'] = '504'
content = ""
response = Response(info)
if cached_value:
response.fromcache = True
return (response, content)
if entry_disposition == "STALE":
if info.has_key('etag') and not self.ignore_etag and not 'if-none-match' in headers:
headers['if-none-match'] = info['etag']
if info.has_key('last-modified') and not 'last-modified' in headers:
headers['if-modified-since'] = info['last-modified']
elif entry_disposition == "TRANSPARENT":
pass
(response, new_content) = self._request(conn, authority, uri, request_uri, method, body, headers, redirections, cachekey)
if response.status == 304 and method == "GET":
# Rewrite the cache entry with the new end-to-end headers
# Take all headers that are in response
# and overwrite their values in info.
# unless they are hop-by-hop, or are listed in the connection header.
for key in _get_end2end_headers(response):
info[key] = response[key]
merged_response = Response(info)
if hasattr(response, "_stale_digest"):
merged_response._stale_digest = response._stale_digest
_updateCache(headers, merged_response, content, self.cache, cachekey)
response = merged_response
response.status = 200
response.fromcache = True
elif response.status == 200:
content = new_content
else:
self.cache.delete(cachekey)
content = new_content
else:
cc = _parse_cache_control(headers)
if cc.has_key('only-if-cached'):
info['status'] = '504'
response = Response(info)
content = ""
else:
(response, content) = self._request(conn, authority, uri, request_uri, method, body, headers, redirections, cachekey)
except Exception, e:
if self.force_exception_to_status_code:
if isinstance(e, HttpLib2ErrorWithResponse):
response = e.response
content = e.content
response.status = 500
response.reason = str(e)
elif isinstance(e, socket.timeout):
content = "Request Timeout"
response = Response( {
"content-type": "text/plain",
"status": "408",
"content-length": len(content)
})
response.reason = "Request Timeout"
else:
content = str(e)
response = Response( {
"content-type": "text/plain",
"status": "400",
"content-length": len(content)
})
response.reason = "Bad Request"
else:
raise
return (response, content)
class Response(dict):
"""An object more like email.Message than httplib.HTTPResponse."""
"""Is this response from our local cache"""
fromcache = False
"""HTTP protocol version used by server. 10 for HTTP/1.0, 11 for HTTP/1.1. """
version = 11
"Status code returned by server. "
status = 200
"""Reason phrase returned by server."""
reason = "Ok"
previous = None
def __init__(self, info):
# info is either an email.Message or
# an httplib.HTTPResponse object.
if isinstance(info, httplib.HTTPResponse):
for key, value in info.getheaders():
self[key.lower()] = value
self.status = info.status
self['status'] = str(self.status)
self.reason = info.reason
self.version = info.version
elif isinstance(info, email.Message.Message):
for key, value in info.items():
self[key] = value
self.status = int(self['status'])
else:
for key, value in info.iteritems():
self[key] = value
self.status = int(self.get('status', self.status))
def __getattr__(self, name):
if name == 'dict':
return self
else:
raise AttributeError, name
| bsd-3-clause |
jlachowski/django-tracking | tracking/middleware.py | 1 | 7690 | from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import str
from builtins import *
from builtins import object
from datetime import datetime, timedelta
import logging
import re
import traceback
from django.conf import settings
from django.contrib.auth.models import AnonymousUser
from django.core.cache import cache
from django.urls import reverse, NoReverseMatch
from django.db.utils import DatabaseError, IntegrityError
from django.http import Http404
from django.db import transaction
from tracking import utils
from tracking.models import Visitor, UntrackedUserAgent, BannedIP
title_re = re.compile('<title>(.*?)</title>')
log = logging.getLogger('tracking.middleware')
class VisitorTrackingMiddleware(object):
"""
Keeps track of your active users. Anytime a visitor accesses a valid URL,
their unique record will be updated with the page they're on and the last
time they requested a page.
Records are considered to be unique when the session key and IP address
are unique together. Sometimes the same user used to have two different
records, so I added a check to see if the session key had changed for the
same IP and user agent in the last 5 minutes
"""
@property
def prefixes(self):
"""Returns a list of URL prefixes that we should not track"""
if not hasattr(self, '_prefixes'):
self._prefixes = getattr(settings, 'NO_TRACKING_PREFIXES', [])
if not getattr(settings, '_FREEZE_TRACKING_PREFIXES', False):
for name in ('MEDIA_URL', 'STATIC_URL'):
url = getattr(settings, name)
if url and url != '/':
self._prefixes.append(url)
try:
# finally, don't track requests to the tracker update pages
self._prefixes.append(reverse('tracking-refresh-active-users'))
except NoReverseMatch:
# django-tracking hasn't been included in the URLconf if we
# get here, which is not a bad thing
pass
settings.NO_TRACKING_PREFIXES = self._prefixes
settings._FREEZE_TRACKING_PREFIXES = True
return self._prefixes
def process_request(self, request):
# don't process AJAX requests
if request.is_ajax(): return
# create some useful variables
ip_address = utils.get_ip(request)
user_agent = request.META.get('HTTP_USER_AGENT', '')[:255]
# retrieve untracked user agents from cache
ua_key = '_tracking_untracked_uas'
untracked = cache.get(ua_key)
if untracked is None:
log.info('Updating untracked user agent cache')
untracked = UntrackedUserAgent.objects.all()
cache.set(ua_key, untracked, 3600)
# see if the user agent is not supposed to be tracked
for ua in untracked:
# if the keyword is found in the user agent, stop tracking
if user_agent.find(ua.keyword) != -1:
log.debug('Not tracking UA "%s" because of keyword: %s' % (user_agent, ua.keyword))
return
if hasattr(request, 'session') and request.session.session_key:
# use the current session key if we can
session_key = request.session.session_key
else:
# otherwise just fake a session key
session_key = '%s:%s' % (ip_address, user_agent)
session_key = session_key[:40]
# ensure that the request.path does not begin with any of the prefixes
for prefix in self.prefixes:
if request.path.startswith(prefix):
log.debug('Not tracking request to: %s' % request.path)
return
# if we get here, the URL needs to be tracked
# determine what time it is
now = utils.get_now()
attrs = {
'session_key': session_key,
'ip_address': ip_address
}
# for some reason, Visitor.objects.get_or_create was not working here
try:
visitor = Visitor.objects.get(**attrs)
except Visitor.DoesNotExist:
# see if there's a visitor with the same IP and user agent
# within the last 5 minutes
cutoff = now - timedelta(minutes=5)
visitors = Visitor.objects.filter(
ip_address=ip_address,
user_agent=user_agent,
last_update__gte=cutoff
)
if len(visitors):
visitor = visitors[0]
visitor.session_key = session_key
log.debug('Using existing visitor for IP %s / UA %s: %s' % (ip_address, user_agent, visitor.id))
else:
# it's probably safe to assume that the visitor is brand new
visitor = Visitor(**attrs)
log.debug('Created a new visitor: %s' % attrs)
except:
return
# determine whether or not the user is logged in
user = request.user
if isinstance(user, AnonymousUser):
user = None
# update the tracking information
visitor.user = user
visitor.user_agent = user_agent
# if the visitor record is new, or the visitor hasn't been here for
# at least an hour, update their referrer URL
one_hour_ago = now - timedelta(hours=1)
if not visitor.last_update or visitor.last_update <= one_hour_ago:
visitor.referrer = utils.u_clean(request.META.get('HTTP_REFERER', 'unknown')[:255])
# reset the number of pages they've been to
visitor.page_views = 0
visitor.session_start = now
visitor.url = request.path
visitor.page_views += 1
visitor.last_update = now
try:
sid = transaction.savepoint()
visitor.save()
transaction.savepoint_commit(sid)
except IntegrityError:
transaction.savepoint_rollback(sid)
except DatabaseError:
log.error('There was a problem saving visitor information:\n%s\n\n%s' % (traceback.format_exc(), locals()))
class VisitorCleanUpMiddleware(object):
"""Clean up old visitor tracking records in the database"""
def process_request(self, request):
timeout = utils.get_cleanup_timeout()
if str(timeout).isdigit():
log.debug('Cleaning up visitors older than %s hours' % timeout)
now = utils.get_now()
timeout = now - timedelta(hours=int(timeout))
Visitor.objects.filter(last_update__lte=timeout).delete()
class BannedIPMiddleware(object):
"""
Raises an Http404 error for any page request from a banned IP. IP addresses
may be added to the list of banned IPs via the Django admin.
The banned users do not actually receive the 404 error--instead they get
an "Internal Server Error", effectively eliminating any access to the site.
"""
def process_request(self, request):
key = '_tracking_banned_ips'
ips = cache.get(key)
if ips is None:
# compile a list of all banned IP addresses
log.info('Updating banned IPs cache')
ips = [b.ip_address for b in BannedIP.objects.all()]
cache.set(key, ips, 3600)
# check to see if the current user's IP address is in that list
if utils.get_ip(request) in ips:
raise Http404
| mit |
BT-astauder/odoo | addons/sale_crm/wizard/__init__.py | 443 | 1077 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import crm_make_sale
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
3quarterstack/simple_blog | django/contrib/gis/tests/geoadmin/tests.py | 70 | 2832 | from __future__ import absolute_import
from django.contrib.gis import admin
from django.contrib.gis.geos import GEOSGeometry, Point
from django.test import TestCase
from django.test.utils import override_settings
from .models import City
GOOGLE_MAPS_API_KEY = 'XXXX'
class GeoAdminTest(TestCase):
urls = 'django.contrib.gis.tests.geoadmin.urls'
def test_ensure_geographic_media(self):
geoadmin = admin.site._registry[City]
admin_js = geoadmin.media.render_js()
self.assertTrue(any([geoadmin.openlayers_url in js for js in admin_js]))
def test_olmap_OSM_rendering(self):
geoadmin = admin.site._registry[City]
result = geoadmin.get_map_widget(City._meta.get_field('point'))(
).render('point', Point(-79.460734, 40.18476))
self.assertIn(
"""geodjango_point.layers.base = new OpenLayers.Layer.OSM("OpenStreetMap (Mapnik)");""",
result)
def test_olmap_WMS_rendering(self):
admin.site.unregister(City)
admin.site.register(City, admin.GeoModelAdmin)
geoadmin = admin.site._registry[City]
result = geoadmin.get_map_widget(City._meta.get_field('point'))(
).render('point', Point(-79.460734, 40.18476))
self.assertIn(
"""geodjango_point.layers.base = new OpenLayers.Layer.WMS("OpenLayers WMS", "http://vmap0.tiles.osgeo.org/wms/vmap0", {layers: \'basic\', format: 'image/jpeg'});""",
result)
def test_olwidget_has_changed(self):
"""
Check that changes are accurately noticed by OpenLayersWidget.
"""
geoadmin = admin.site._registry[City]
form = geoadmin.get_changelist_form(None)()
has_changed = form.fields['point'].widget._has_changed
initial = Point(13.4197458572965953, 52.5194108501149799, srid=4326)
data_same = "SRID=3857;POINT(1493879.2754093995 6894592.019687599)"
data_almost_same = "SRID=3857;POINT(1493879.2754093990 6894592.019687590)"
data_changed = "SRID=3857;POINT(1493884.0527237 6894593.8111804)"
self.assertTrue(has_changed(None, data_changed))
self.assertTrue(has_changed(initial, ""))
self.assertFalse(has_changed(None, ""))
self.assertFalse(has_changed(initial, data_same))
self.assertFalse(has_changed(initial, data_almost_same))
self.assertTrue(has_changed(initial, data_changed))
@override_settings(GOOGLE_MAPS_API_KEY=GOOGLE_MAPS_API_KEY)
def test_google_map_scripts(self):
"""
Testing GoogleMap.scripts() output. See #20773.
"""
from django.contrib.gis.maps.google.gmap import GoogleMap
google_map = GoogleMap()
scripts = google_map.scripts
self.assertIn(GOOGLE_MAPS_API_KEY, scripts)
self.assertIn("new GMap2", scripts)
| mit |
Djabbz/wakatime | wakatime/packages/pygments_py3/pygments/__init__.py | 29 | 2980 | # -*- coding: utf-8 -*-
"""
Pygments
~~~~~~~~
Pygments is a syntax highlighting package written in Python.
It is a generic syntax highlighter for general use in all kinds of software
such as forum systems, wikis or other applications that need to prettify
source code. Highlights are:
* a wide range of common languages and markup formats is supported
* special attention is paid to details, increasing quality by a fair amount
* support for new languages and formats are added easily
* a number of output formats, presently HTML, LaTeX, RTF, SVG, all image
formats that PIL supports, and ANSI sequences
* it is usable as a command-line tool and as a library
* ... and it highlights even Brainfuck!
The `Pygments tip`_ is installable with ``easy_install Pygments==dev``.
.. _Pygments tip:
http://bitbucket.org/birkenfeld/pygments-main/get/tip.zip#egg=Pygments-dev
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
__version__ = '2.0.1'
__docformat__ = 'restructuredtext'
__all__ = ['lex', 'format', 'highlight']
import sys
from pygments.util import StringIO, BytesIO
def lex(code, lexer):
"""
Lex ``code`` with ``lexer`` and return an iterable of tokens.
"""
try:
return lexer.get_tokens(code)
except TypeError as err:
if isinstance(err.args[0], str) and \
'unbound method get_tokens' in err.args[0]:
raise TypeError('lex() argument must be a lexer instance, '
'not a class')
raise
def format(tokens, formatter, outfile=None):
"""
Format a tokenlist ``tokens`` with the formatter ``formatter``.
If ``outfile`` is given and a valid file object (an object
with a ``write`` method), the result will be written to it, otherwise
it is returned as a string.
"""
try:
if not outfile:
#print formatter, 'using', formatter.encoding
realoutfile = formatter.encoding and BytesIO() or StringIO()
formatter.format(tokens, realoutfile)
return realoutfile.getvalue()
else:
formatter.format(tokens, outfile)
except TypeError as err:
if isinstance(err.args[0], str) and \
'unbound method format' in err.args[0]:
raise TypeError('format() argument must be a formatter instance, '
'not a class')
raise
def highlight(code, lexer, formatter, outfile=None):
"""
Lex ``code`` with ``lexer`` and format it with the formatter ``formatter``.
If ``outfile`` is given and a valid file object (an object
with a ``write`` method), the result will be written to it, otherwise
it is returned as a string.
"""
return format(lex(code, lexer), formatter, outfile)
if __name__ == '__main__':
from pygments.cmdline import main
sys.exit(main(sys.argv))
| bsd-3-clause |
ChinaMassClouds/copenstack-server | openstack/src/nova-2014.2/nova/api/openstack/compute/plugins/v3/flavor_access.py | 14 | 6000 | # Copyright (c) 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The flavor access extension."""
import webob
from nova.api.openstack.compute.schemas.v3 import flavor_access
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api import validation
from nova import exception
from nova.i18n import _
from nova import objects
ALIAS = 'os-flavor-access'
soft_authorize = extensions.soft_extension_authorizer('compute',
'v3:' + ALIAS)
authorize = extensions.extension_authorizer('compute', 'v3:%s' % ALIAS)
def _marshall_flavor_access(flavor):
rval = []
for project_id in flavor.projects:
rval.append({'flavor_id': flavor.flavorid,
'tenant_id': project_id})
return {'flavor_access': rval}
class FlavorAccessController(object):
"""The flavor access API controller for the OpenStack API."""
def __init__(self):
super(FlavorAccessController, self).__init__()
@extensions.expected_errors(404)
def index(self, req, flavor_id):
context = req.environ['nova.context']
authorize(context)
try:
flavor = objects.Flavor.get_by_flavor_id(context, flavor_id)
except exception.FlavorNotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
# public flavor to all projects
if flavor.is_public:
explanation = _("Access list not available for public flavors.")
raise webob.exc.HTTPNotFound(explanation=explanation)
# private flavor to listed projects only
return _marshall_flavor_access(flavor)
class FlavorActionController(wsgi.Controller):
"""The flavor access API controller for the OpenStack API."""
def _get_flavor_refs(self, context):
"""Return a dictionary mapping flavorid to flavor_ref."""
flavors = objects.FlavorList.get_all(context)
rval = {}
for flavor in flavors:
rval[flavor.flavorid] = flavor
return rval
def _extend_flavor(self, flavor_rval, flavor_ref):
key = "%s:is_public" % (FlavorAccess.alias)
flavor_rval[key] = flavor_ref['is_public']
@wsgi.extends
def show(self, req, resp_obj, id):
context = req.environ['nova.context']
if soft_authorize(context):
db_flavor = req.get_db_flavor(id)
self._extend_flavor(resp_obj.obj['flavor'], db_flavor)
@wsgi.extends
def detail(self, req, resp_obj):
context = req.environ['nova.context']
if soft_authorize(context):
flavors = list(resp_obj.obj['flavors'])
for flavor_rval in flavors:
db_flavor = req.get_db_flavor(flavor_rval['id'])
self._extend_flavor(flavor_rval, db_flavor)
@wsgi.extends(action='create')
def create(self, req, body, resp_obj):
context = req.environ['nova.context']
if soft_authorize(context):
db_flavor = req.get_db_flavor(resp_obj.obj['flavor']['id'])
self._extend_flavor(resp_obj.obj['flavor'], db_flavor)
@extensions.expected_errors((400, 403, 404, 409))
@wsgi.action("addTenantAccess")
@validation.schema(flavor_access.add_tenant_access)
def _add_tenant_access(self, req, id, body):
context = req.environ['nova.context']
authorize(context, action="add_tenant_access")
vals = body['addTenantAccess']
tenant = vals['tenant']
flavor = objects.Flavor(context=context, flavorid=id)
try:
flavor.add_access(tenant)
except exception.FlavorNotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
except exception.FlavorAccessExists as err:
raise webob.exc.HTTPConflict(explanation=err.format_message())
except exception.AdminRequired as e:
raise webob.exc.HTTPForbidden(explanation=e.format_message())
return _marshall_flavor_access(flavor)
@extensions.expected_errors((400, 403, 404))
@wsgi.action("removeTenantAccess")
@validation.schema(flavor_access.remove_tenant_access)
def _remove_tenant_access(self, req, id, body):
context = req.environ['nova.context']
authorize(context, action="remove_tenant_access")
vals = body['removeTenantAccess']
tenant = vals['tenant']
flavor = objects.Flavor(context=context, flavorid=id)
try:
flavor.remove_access(tenant)
except (exception.FlavorAccessNotFound,
exception.FlavorNotFound) as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
except exception.AdminRequired as e:
raise webob.exc.HTTPForbidden(explanation=e.format_message())
return _marshall_flavor_access(flavor)
class FlavorAccess(extensions.V3APIExtensionBase):
"""Flavor access support."""
name = "FlavorAccess"
alias = ALIAS
version = 1
def get_resources(self):
res = extensions.ResourceExtension(
ALIAS,
controller=FlavorAccessController(),
parent=dict(member_name='flavor', collection_name='flavors'))
return [res]
def get_controller_extensions(self):
extension = extensions.ControllerExtension(
self, 'flavors', FlavorActionController())
return [extension]
| gpl-2.0 |
DSLituiev/scikit-learn | doc/datasets/mldata_fixture.py | 367 | 1183 | """Fixture module to skip the datasets loading when offline
Mock urllib2 access to mldata.org and create a temporary data folder.
"""
from os import makedirs
from os.path import join
import numpy as np
import tempfile
import shutil
from sklearn import datasets
from sklearn.utils.testing import install_mldata_mock
from sklearn.utils.testing import uninstall_mldata_mock
def globs(globs):
# Create a temporary folder for the data fetcher
global custom_data_home
custom_data_home = tempfile.mkdtemp()
makedirs(join(custom_data_home, 'mldata'))
globs['custom_data_home'] = custom_data_home
return globs
def setup_module():
# setup mock urllib2 module to avoid downloading from mldata.org
install_mldata_mock({
'mnist-original': {
'data': np.empty((70000, 784)),
'label': np.repeat(np.arange(10, dtype='d'), 7000),
},
'iris': {
'data': np.empty((150, 4)),
},
'datasets-uci-iris': {
'double0': np.empty((150, 4)),
'class': np.empty((150,)),
},
})
def teardown_module():
uninstall_mldata_mock()
shutil.rmtree(custom_data_home)
| bsd-3-clause |
moble/sympy | sympy/calculus/tests/test_euler.py | 72 | 2248 | from sympy import Symbol, Function, Derivative as D, Eq, cos, sin
from sympy.utilities.pytest import raises
from sympy.calculus.euler import euler_equations as euler
def test_euler_interface():
x = Function('x')
y = Symbol('y')
t = Symbol('t')
raises(TypeError, lambda: euler())
raises(TypeError, lambda: euler(D(x(t), t)*y(t), [x(t), y]))
raises(ValueError, lambda: euler(D(x(t), t)*x(y), [x(t), x(y)]))
raises(TypeError, lambda: euler(D(x(t), t)**2, x(0)))
assert euler(D(x(t), t)**2/2, set([x(t)])) == [Eq(-D(x(t), t, t))]
assert euler(D(x(t), t)**2/2, x(t), set([t])) == [Eq(-D(x(t), t, t))]
def test_euler_pendulum():
x = Function('x')
t = Symbol('t')
L = D(x(t), t)**2/2 + cos(x(t))
assert euler(L, x(t), t) == [Eq(-sin(x(t)) - D(x(t), t, t))]
def test_euler_henonheiles():
x = Function('x')
y = Function('y')
t = Symbol('t')
L = sum(D(z(t), t)**2/2 - z(t)**2/2 for z in [x, y])
L += -x(t)**2*y(t) + y(t)**3/3
assert euler(L, [x(t), y(t)], t) == [Eq(-2*x(t)*y(t) - x(t) -
D(x(t), t, t)),
Eq(-x(t)**2 + y(t)**2 -
y(t) - D(y(t), t, t))]
def test_euler_sineg():
psi = Function('psi')
t = Symbol('t')
x = Symbol('x')
L = D(psi(t, x), t)**2/2 - D(psi(t, x), x)**2/2 + cos(psi(t, x))
assert euler(L, psi(t, x), [t, x]) == [Eq(-sin(psi(t, x)) -
D(psi(t, x), t, t) +
D(psi(t, x), x, x))]
def test_euler_high_order():
# an example from hep-th/0309038
m = Symbol('m')
k = Symbol('k')
x = Function('x')
y = Function('y')
t = Symbol('t')
L = (m*D(x(t), t)**2/2 + m*D(y(t), t)**2/2 -
k*D(x(t), t)*D(y(t), t, t) + k*D(y(t), t)*D(x(t), t, t))
assert euler(L, [x(t), y(t)]) == [Eq(2*k*D(y(t), t, t, t) -
m*D(x(t), t, t)),
Eq(-2*k*D(x(t), t, t, t) -
m*D(y(t), t, t))]
w = Symbol('w')
L = D(x(t, w), t, w)**2/2
assert euler(L) == [Eq(D(x(t, w), t, t, w, w))]
| bsd-3-clause |
google/contentbox | third_party/django/db/models/sql/query.py | 13 | 84272 | """
Create SQL statements for QuerySets.
The code in here encapsulates all of the SQL construction so that QuerySets
themselves do not have to (and could be backed by things other than SQL
databases). The abstraction barrier only works one way: this module has to know
all about the internals of models in order to get the information it needs.
"""
import copy
from django.utils.datastructures import SortedDict
from django.utils.encoding import force_text
from django.utils.tree import Node
from django.utils import six
from django.db import connections, DEFAULT_DB_ALIAS
from django.db.models.constants import LOOKUP_SEP
from django.db.models.aggregates import refs_aggregate
from django.db.models.expressions import ExpressionNode
from django.db.models.fields import FieldDoesNotExist
from django.db.models.related import PathInfo
from django.db.models.sql import aggregates as base_aggregates_module
from django.db.models.sql.constants import (QUERY_TERMS, ORDER_DIR, SINGLE,
ORDER_PATTERN, JoinInfo, SelectInfo)
from django.db.models.sql.datastructures import EmptyResultSet, Empty, MultiJoin
from django.db.models.sql.expressions import SQLEvaluator
from django.db.models.sql.where import (WhereNode, Constraint, EverythingNode,
ExtraWhere, AND, OR, EmptyWhere)
from django.core.exceptions import FieldError
__all__ = ['Query', 'RawQuery']
class RawQuery(object):
"""
A single raw SQL query
"""
def __init__(self, sql, using, params=None):
self.params = params or ()
self.sql = sql
self.using = using
self.cursor = None
# Mirror some properties of a normal query so that
# the compiler can be used to process results.
self.low_mark, self.high_mark = 0, None # Used for offset/limit
self.extra_select = {}
self.aggregate_select = {}
def clone(self, using):
return RawQuery(self.sql, using, params=self.params)
def convert_values(self, value, field, connection):
"""Convert the database-returned value into a type that is consistent
across database backends.
By default, this defers to the underlying backend operations, but
it can be overridden by Query classes for specific backends.
"""
return connection.ops.convert_values(value, field)
def get_columns(self):
if self.cursor is None:
self._execute_query()
converter = connections[self.using].introspection.table_name_converter
return [converter(column_meta[0])
for column_meta in self.cursor.description]
def __iter__(self):
# Always execute a new query for a new iterator.
# This could be optimized with a cache at the expense of RAM.
self._execute_query()
if not connections[self.using].features.can_use_chunked_reads:
# If the database can't use chunked reads we need to make sure we
# evaluate the entire query up front.
result = list(self.cursor)
else:
result = self.cursor
return iter(result)
def __repr__(self):
return "<RawQuery: %r>" % (self.sql % tuple(self.params))
def _execute_query(self):
self.cursor = connections[self.using].cursor()
self.cursor.execute(self.sql, self.params)
class Query(object):
"""
A single SQL query.
"""
# SQL join types. These are part of the class because their string forms
# vary from database to database and can be customised by a subclass.
INNER = 'INNER JOIN'
LOUTER = 'LEFT OUTER JOIN'
alias_prefix = 'T'
query_terms = QUERY_TERMS
aggregates_module = base_aggregates_module
compiler = 'SQLCompiler'
def __init__(self, model, where=WhereNode):
self.model = model
self.alias_refcount = {}
# alias_map is the most important data structure regarding joins.
# It's used for recording which joins exist in the query and what
# type they are. The key is the alias of the joined table (possibly
# the table name) and the value is JoinInfo from constants.py.
self.alias_map = {}
self.table_map = {} # Maps table names to list of aliases.
self.join_map = {}
self.default_cols = True
self.default_ordering = True
self.standard_ordering = True
self.used_aliases = set()
self.filter_is_sticky = False
self.included_inherited_models = {}
# SQL-related attributes
# Select and related select clauses as SelectInfo instances.
# The select is used for cases where we want to set up the select
# clause to contain other than default fields (values(), annotate(),
# subqueries...)
self.select = []
# The related_select_cols is used for columns needed for
# select_related - this is populated in compile stage.
self.related_select_cols = []
self.tables = [] # Aliases in the order they are created.
self.where = where()
self.where_class = where
self.group_by = None
self.having = where()
self.order_by = []
self.low_mark, self.high_mark = 0, None # Used for offset/limit
self.distinct = False
self.distinct_fields = []
self.select_for_update = False
self.select_for_update_nowait = False
self.select_related = False
# SQL aggregate-related attributes
self.aggregates = SortedDict() # Maps alias -> SQL aggregate function
self.aggregate_select_mask = None
self._aggregate_select_cache = None
# Arbitrary maximum limit for select_related. Prevents infinite
# recursion. Can be changed by the depth parameter to select_related().
self.max_depth = 5
# These are for extensions. The contents are more or less appended
# verbatim to the appropriate clause.
self.extra = SortedDict() # Maps col_alias -> (col_sql, params).
self.extra_select_mask = None
self._extra_select_cache = None
self.extra_tables = ()
self.extra_order_by = ()
# A tuple that is a set of model field names and either True, if these
# are the fields to defer, or False if these are the only fields to
# load.
self.deferred_loading = (set(), True)
def __str__(self):
"""
Returns the query as a string of SQL with the parameter values
substituted in (use sql_with_params() to see the unsubstituted string).
Parameter values won't necessarily be quoted correctly, since that is
done by the database interface at execution time.
"""
sql, params = self.sql_with_params()
return sql % params
def sql_with_params(self):
"""
Returns the query as an SQL string and the parameters that will be
subsituted into the query.
"""
return self.get_compiler(DEFAULT_DB_ALIAS).as_sql()
def __deepcopy__(self, memo):
result = self.clone(memo=memo)
memo[id(self)] = result
return result
def prepare(self):
return self
def get_compiler(self, using=None, connection=None):
if using is None and connection is None:
raise ValueError("Need either using or connection")
if using:
connection = connections[using]
# Check that the compiler will be able to execute the query
for alias, aggregate in self.aggregate_select.items():
connection.ops.check_aggregate_support(aggregate)
return connection.ops.compiler(self.compiler)(self, connection, using)
def get_meta(self):
"""
Returns the Options instance (the model._meta) from which to start
processing. Normally, this is self.model._meta, but it can be changed
by subclasses.
"""
return self.model._meta
def clone(self, klass=None, memo=None, **kwargs):
"""
Creates a copy of the current instance. The 'kwargs' parameter can be
used by clients to update attributes after copying has taken place.
"""
obj = Empty()
obj.__class__ = klass or self.__class__
obj.model = self.model
obj.alias_refcount = self.alias_refcount.copy()
obj.alias_map = self.alias_map.copy()
obj.table_map = self.table_map.copy()
obj.join_map = self.join_map.copy()
obj.default_cols = self.default_cols
obj.default_ordering = self.default_ordering
obj.standard_ordering = self.standard_ordering
obj.included_inherited_models = self.included_inherited_models.copy()
obj.select = self.select[:]
obj.related_select_cols = []
obj.tables = self.tables[:]
obj.where = self.where.clone()
obj.where_class = self.where_class
if self.group_by is None:
obj.group_by = None
else:
obj.group_by = self.group_by[:]
obj.having = self.having.clone()
obj.order_by = self.order_by[:]
obj.low_mark, obj.high_mark = self.low_mark, self.high_mark
obj.distinct = self.distinct
obj.distinct_fields = self.distinct_fields[:]
obj.select_for_update = self.select_for_update
obj.select_for_update_nowait = self.select_for_update_nowait
obj.select_related = self.select_related
obj.related_select_cols = []
obj.aggregates = self.aggregates.copy()
if self.aggregate_select_mask is None:
obj.aggregate_select_mask = None
else:
obj.aggregate_select_mask = self.aggregate_select_mask.copy()
# _aggregate_select_cache cannot be copied, as doing so breaks the
# (necessary) state in which both aggregates and
# _aggregate_select_cache point to the same underlying objects.
# It will get re-populated in the cloned queryset the next time it's
# used.
obj._aggregate_select_cache = None
obj.max_depth = self.max_depth
obj.extra = self.extra.copy()
if self.extra_select_mask is None:
obj.extra_select_mask = None
else:
obj.extra_select_mask = self.extra_select_mask.copy()
if self._extra_select_cache is None:
obj._extra_select_cache = None
else:
obj._extra_select_cache = self._extra_select_cache.copy()
obj.extra_tables = self.extra_tables
obj.extra_order_by = self.extra_order_by
obj.deferred_loading = copy.copy(self.deferred_loading[0]), self.deferred_loading[1]
if self.filter_is_sticky and self.used_aliases:
obj.used_aliases = self.used_aliases.copy()
else:
obj.used_aliases = set()
obj.filter_is_sticky = False
obj.__dict__.update(kwargs)
if hasattr(obj, '_setup_query'):
obj._setup_query()
return obj
def convert_values(self, value, field, connection):
"""Convert the database-returned value into a type that is consistent
across database backends.
By default, this defers to the underlying backend operations, but
it can be overridden by Query classes for specific backends.
"""
return connection.ops.convert_values(value, field)
def resolve_aggregate(self, value, aggregate, connection):
"""Resolve the value of aggregates returned by the database to
consistent (and reasonable) types.
This is required because of the predisposition of certain backends
to return Decimal and long types when they are not needed.
"""
if value is None:
if aggregate.is_ordinal:
return 0
# Return None as-is
return value
elif aggregate.is_ordinal:
# Any ordinal aggregate (e.g., count) returns an int
return int(value)
elif aggregate.is_computed:
# Any computed aggregate (e.g., avg) returns a float
return float(value)
else:
# Return value depends on the type of the field being processed.
return self.convert_values(value, aggregate.field, connection)
def get_aggregation(self, using):
"""
Returns the dictionary with the values of the existing aggregations.
"""
if not self.aggregate_select:
return {}
# If there is a group by clause, aggregating does not add useful
# information but retrieves only the first row. Aggregate
# over the subquery instead.
if self.group_by is not None:
from django.db.models.sql.subqueries import AggregateQuery
query = AggregateQuery(self.model)
obj = self.clone()
# Remove any aggregates marked for reduction from the subquery
# and move them to the outer AggregateQuery.
for alias, aggregate in self.aggregate_select.items():
if aggregate.is_summary:
query.aggregate_select[alias] = aggregate
del obj.aggregate_select[alias]
try:
query.add_subquery(obj, using)
except EmptyResultSet:
return dict(
(alias, None)
for alias in query.aggregate_select
)
else:
query = self
self.select = []
self.default_cols = False
self.extra = {}
self.remove_inherited_models()
query.clear_ordering(True)
query.clear_limits()
query.select_for_update = False
query.select_related = False
query.related_select_cols = []
result = query.get_compiler(using).execute_sql(SINGLE)
if result is None:
result = [None for q in query.aggregate_select.items()]
return dict([
(alias, self.resolve_aggregate(val, aggregate, connection=connections[using]))
for (alias, aggregate), val
in zip(query.aggregate_select.items(), result)
])
def get_count(self, using):
"""
Performs a COUNT() query using the current filter constraints.
"""
obj = self.clone()
if len(self.select) > 1 or self.aggregate_select or (self.distinct and self.distinct_fields):
# If a select clause exists, then the query has already started to
# specify the columns that are to be returned.
# In this case, we need to use a subquery to evaluate the count.
from django.db.models.sql.subqueries import AggregateQuery
subquery = obj
subquery.clear_ordering(True)
subquery.clear_limits()
obj = AggregateQuery(obj.model)
try:
obj.add_subquery(subquery, using=using)
except EmptyResultSet:
# add_subquery evaluates the query, if it's an EmptyResultSet
# then there are can be no results, and therefore there the
# count is obviously 0
return 0
obj.add_count_column()
number = obj.get_aggregation(using=using)[None]
# Apply offset and limit constraints manually, since using LIMIT/OFFSET
# in SQL (in variants that provide them) doesn't change the COUNT
# output.
number = max(0, number - self.low_mark)
if self.high_mark is not None:
number = min(number, self.high_mark - self.low_mark)
return number
def has_results(self, using):
q = self.clone()
q.clear_select_clause()
q.clear_ordering(True)
q.set_limits(high=1)
compiler = q.get_compiler(using=using)
return compiler.has_results()
def combine(self, rhs, connector):
"""
Merge the 'rhs' query into the current one (with any 'rhs' effects
being applied *after* (that is, "to the right of") anything in the
current query. 'rhs' is not modified during a call to this function.
The 'connector' parameter describes how to connect filters from the
'rhs' query.
"""
assert self.model == rhs.model, \
"Cannot combine queries on two different base models."
assert self.can_filter(), \
"Cannot combine queries once a slice has been taken."
assert self.distinct == rhs.distinct, \
"Cannot combine a unique query with a non-unique query."
assert self.distinct_fields == rhs.distinct_fields, \
"Cannot combine queries with different distinct fields."
self.remove_inherited_models()
# Work out how to relabel the rhs aliases, if necessary.
change_map = {}
conjunction = (connector == AND)
# Determine which existing joins can be reused. When combining the
# query with AND we must recreate all joins for m2m filters. When
# combining with OR we can reuse joins. The reason is that in AND
# case a single row can't fulfill a condition like:
# revrel__col=1 & revrel__col=2
# But, there might be two different related rows matching this
# condition. In OR case a single True is enough, so single row is
# enough, too.
#
# Note that we will be creating duplicate joins for non-m2m joins in
# the AND case. The results will be correct but this creates too many
# joins. This is something that could be fixed later on.
reuse = set() if conjunction else set(self.tables)
# Base table must be present in the query - this is the same
# table on both sides.
self.get_initial_alias()
# Now, add the joins from rhs query into the new query (skipping base
# table).
for alias in rhs.tables[1:]:
table, _, join_type, lhs, join_cols, nullable, join_field = rhs.alias_map[alias]
promote = (join_type == self.LOUTER)
# If the left side of the join was already relabeled, use the
# updated alias.
lhs = change_map.get(lhs, lhs)
new_alias = self.join(
(lhs, table, join_cols), reuse=reuse,
outer_if_first=not conjunction, nullable=nullable,
join_field=join_field)
if promote:
self.promote_joins([new_alias])
# We can't reuse the same join again in the query. If we have two
# distinct joins for the same connection in rhs query, then the
# combined query must have two joins, too.
reuse.discard(new_alias)
change_map[alias] = new_alias
if not rhs.alias_refcount[alias]:
# The alias was unused in the rhs query. Unref it so that it
# will be unused in the new query, too. We have to add and
# unref the alias so that join promotion has information of
# the join type for the unused alias.
self.unref_alias(new_alias)
# So that we don't exclude valid results in an OR query combination,
# all joins exclusive to either the lhs or the rhs must be converted
# to an outer join. RHS joins were already set to outer joins above,
# so check which joins were used only in the lhs query.
if not conjunction:
rhs_used_joins = set(change_map.values())
to_promote = [alias for alias in self.tables
if alias not in rhs_used_joins]
self.promote_joins(to_promote, True)
# Now relabel a copy of the rhs where-clause and add it to the current
# one.
if rhs.where:
w = rhs.where.clone()
w.relabel_aliases(change_map)
if not self.where:
# Since 'self' matches everything, add an explicit "include
# everything" where-constraint so that connections between the
# where clauses won't exclude valid results.
self.where.add(EverythingNode(), AND)
elif self.where:
# rhs has an empty where clause.
w = self.where_class()
w.add(EverythingNode(), AND)
else:
w = self.where_class()
self.where.add(w, connector)
# Selection columns and extra extensions are those provided by 'rhs'.
self.select = []
for col, field in rhs.select:
if isinstance(col, (list, tuple)):
new_col = change_map.get(col[0], col[0]), col[1]
self.select.append(SelectInfo(new_col, field))
else:
new_col = col.relabeled_clone(change_map)
self.select.append(SelectInfo(new_col, field))
if connector == OR:
# It would be nice to be able to handle this, but the queries don't
# really make sense (or return consistent value sets). Not worth
# the extra complexity when you can write a real query instead.
if self.extra and rhs.extra:
raise ValueError("When merging querysets using 'or', you "
"cannot have extra(select=...) on both sides.")
self.extra.update(rhs.extra)
extra_select_mask = set()
if self.extra_select_mask is not None:
extra_select_mask.update(self.extra_select_mask)
if rhs.extra_select_mask is not None:
extra_select_mask.update(rhs.extra_select_mask)
if extra_select_mask:
self.set_extra_mask(extra_select_mask)
self.extra_tables += rhs.extra_tables
# Ordering uses the 'rhs' ordering, unless it has none, in which case
# the current ordering is used.
self.order_by = rhs.order_by[:] if rhs.order_by else self.order_by
self.extra_order_by = rhs.extra_order_by or self.extra_order_by
def deferred_to_data(self, target, callback):
"""
Converts the self.deferred_loading data structure to an alternate data
structure, describing the field that *will* be loaded. This is used to
compute the columns to select from the database and also by the
QuerySet class to work out which fields are being initialised on each
model. Models that have all their fields included aren't mentioned in
the result, only those that have field restrictions in place.
The "target" parameter is the instance that is populated (in place).
The "callback" is a function that is called whenever a (model, field)
pair need to be added to "target". It accepts three parameters:
"target", and the model and list of fields being added for that model.
"""
field_names, defer = self.deferred_loading
if not field_names:
return
orig_opts = self.get_meta()
seen = {}
must_include = {orig_opts.concrete_model: set([orig_opts.pk])}
for field_name in field_names:
parts = field_name.split(LOOKUP_SEP)
cur_model = self.model
opts = orig_opts
for name in parts[:-1]:
old_model = cur_model
source = opts.get_field_by_name(name)[0]
if is_reverse_o2o(source):
cur_model = source.model
else:
cur_model = source.rel.to
opts = cur_model._meta
# Even if we're "just passing through" this model, we must add
# both the current model's pk and the related reference field
# (if it's not a reverse relation) to the things we select.
if not is_reverse_o2o(source):
must_include[old_model].add(source)
add_to_dict(must_include, cur_model, opts.pk)
field, model, _, _ = opts.get_field_by_name(parts[-1])
if model is None:
model = cur_model
if not is_reverse_o2o(field):
add_to_dict(seen, model, field)
if defer:
# We need to load all fields for each model, except those that
# appear in "seen" (for all models that appear in "seen"). The only
# slight complexity here is handling fields that exist on parent
# models.
workset = {}
for model, values in six.iteritems(seen):
for field, m in model._meta.get_fields_with_model():
if field in values:
continue
add_to_dict(workset, m or model, field)
for model, values in six.iteritems(must_include):
# If we haven't included a model in workset, we don't add the
# corresponding must_include fields for that model, since an
# empty set means "include all fields". That's why there's no
# "else" branch here.
if model in workset:
workset[model].update(values)
for model, values in six.iteritems(workset):
callback(target, model, values)
else:
for model, values in six.iteritems(must_include):
if model in seen:
seen[model].update(values)
else:
# As we've passed through this model, but not explicitly
# included any fields, we have to make sure it's mentioned
# so that only the "must include" fields are pulled in.
seen[model] = values
# Now ensure that every model in the inheritance chain is mentioned
# in the parent list. Again, it must be mentioned to ensure that
# only "must include" fields are pulled in.
for model in orig_opts.get_parent_list():
if model not in seen:
seen[model] = set()
for model, values in six.iteritems(seen):
callback(target, model, values)
def deferred_to_columns_cb(self, target, model, fields):
"""
Callback used by deferred_to_columns(). The "target" parameter should
be a set instance.
"""
table = model._meta.db_table
if table not in target:
target[table] = set()
for field in fields:
target[table].add(field.column)
def table_alias(self, table_name, create=False):
"""
Returns a table alias for the given table_name and whether this is a
new alias or not.
If 'create' is true, a new alias is always created. Otherwise, the
most recently created alias for the table (if one exists) is reused.
"""
current = self.table_map.get(table_name)
if not create and current:
alias = current[0]
self.alias_refcount[alias] += 1
return alias, False
# Create a new alias for this table.
if current:
alias = '%s%d' % (self.alias_prefix, len(self.alias_map) + 1)
current.append(alias)
else:
# The first occurence of a table uses the table name directly.
alias = table_name
self.table_map[alias] = [alias]
self.alias_refcount[alias] = 1
self.tables.append(alias)
return alias, True
def ref_alias(self, alias):
""" Increases the reference count for this alias. """
self.alias_refcount[alias] += 1
def unref_alias(self, alias, amount=1):
""" Decreases the reference count for this alias. """
self.alias_refcount[alias] -= amount
def promote_joins(self, aliases, unconditional=False):
"""
Promotes recursively the join type of given aliases and its children to
an outer join. If 'unconditional' is False, the join is only promoted if
it is nullable or the parent join is an outer join.
Note about join promotion: When promoting any alias, we make sure all
joins which start from that alias are promoted, too. When adding a join
in join(), we make sure any join added to already existing LOUTER join
is generated as LOUTER. This ensures we don't ever have broken join
chains which contain first a LOUTER join, then an INNER JOIN, that is
this kind of join should never be generated: a LOUTER b INNER c. The
reason for avoiding this type of join chain is that the INNER after
the LOUTER will effectively remove any effect the LOUTER had.
"""
aliases = list(aliases)
while aliases:
alias = aliases.pop(0)
if self.alias_map[alias].join_cols[0][1] is None:
# This is the base table (first FROM entry) - this table
# isn't really joined at all in the query, so we should not
# alter its join type.
continue
parent_alias = self.alias_map[alias].lhs_alias
parent_louter = (parent_alias
and self.alias_map[parent_alias].join_type == self.LOUTER)
already_louter = self.alias_map[alias].join_type == self.LOUTER
if ((unconditional or self.alias_map[alias].nullable
or parent_louter) and not already_louter):
data = self.alias_map[alias]._replace(join_type=self.LOUTER)
self.alias_map[alias] = data
# Join type of 'alias' changed, so re-examine all aliases that
# refer to this one.
aliases.extend(
join for join in self.alias_map.keys()
if (self.alias_map[join].lhs_alias == alias
and join not in aliases))
def reset_refcounts(self, to_counts):
"""
This method will reset reference counts for aliases so that they match
the value passed in :param to_counts:.
"""
for alias, cur_refcount in self.alias_refcount.copy().items():
unref_amount = cur_refcount - to_counts.get(alias, 0)
self.unref_alias(alias, unref_amount)
def promote_disjunction(self, aliases_before, alias_usage_counts,
num_childs):
"""
This method is to be used for promoting joins in ORed filters.
The principle for promotion is: any alias which is used (it is in
alias_usage_counts), is not used by every child of the ORed filter,
and isn't pre-existing needs to be promoted to LOUTER join.
"""
for alias, use_count in alias_usage_counts.items():
if use_count < num_childs and alias not in aliases_before:
self.promote_joins([alias])
def change_aliases(self, change_map):
"""
Changes the aliases in change_map (which maps old-alias -> new-alias),
relabelling any references to them in select columns and the where
clause.
"""
assert set(change_map.keys()).intersection(set(change_map.values())) == set()
def relabel_column(col):
if isinstance(col, (list, tuple)):
old_alias = col[0]
return (change_map.get(old_alias, old_alias), col[1])
else:
return col.relabeled_clone(change_map)
# 1. Update references in "select" (normal columns plus aliases),
# "group by", "where" and "having".
self.where.relabel_aliases(change_map)
self.having.relabel_aliases(change_map)
if self.group_by:
self.group_by = [relabel_column(col) for col in self.group_by]
self.select = [SelectInfo(relabel_column(s.col), s.field)
for s in self.select]
self.aggregates = SortedDict(
(key, relabel_column(col)) for key, col in self.aggregates.items())
# 2. Rename the alias in the internal table/alias datastructures.
for ident, aliases in self.join_map.items():
del self.join_map[ident]
aliases = tuple([change_map.get(a, a) for a in aliases])
ident = (change_map.get(ident[0], ident[0]),) + ident[1:]
self.join_map[ident] = aliases
for old_alias, new_alias in six.iteritems(change_map):
alias_data = self.alias_map[old_alias]
alias_data = alias_data._replace(rhs_alias=new_alias)
self.alias_refcount[new_alias] = self.alias_refcount[old_alias]
del self.alias_refcount[old_alias]
self.alias_map[new_alias] = alias_data
del self.alias_map[old_alias]
table_aliases = self.table_map[alias_data.table_name]
for pos, alias in enumerate(table_aliases):
if alias == old_alias:
table_aliases[pos] = new_alias
break
for pos, alias in enumerate(self.tables):
if alias == old_alias:
self.tables[pos] = new_alias
break
for key, alias in self.included_inherited_models.items():
if alias in change_map:
self.included_inherited_models[key] = change_map[alias]
# 3. Update any joins that refer to the old alias.
for alias, data in six.iteritems(self.alias_map):
lhs = data.lhs_alias
if lhs in change_map:
data = data._replace(lhs_alias=change_map[lhs])
self.alias_map[alias] = data
# 4. Update the temporary _lookup_joins list
if hasattr(self, '_lookup_joins'):
self._lookup_joins = [change_map.get(lj, lj) for lj in self._lookup_joins]
def bump_prefix(self, exceptions=()):
"""
Changes the alias prefix to the next letter in the alphabet and
relabels all the aliases. Even tables that previously had no alias will
get an alias after this call (it's mostly used for nested queries and
the outer query will already be using the non-aliased table name).
Subclasses who create their own prefix should override this method to
produce a similar result (a new prefix and relabelled aliases).
The 'exceptions' parameter is a container that holds alias names which
should not be changed.
"""
current = ord(self.alias_prefix)
assert current < ord('Z')
prefix = chr(current + 1)
self.alias_prefix = prefix
change_map = SortedDict()
for pos, alias in enumerate(self.tables):
if alias in exceptions:
continue
new_alias = '%s%d' % (prefix, pos)
change_map[alias] = new_alias
self.tables[pos] = new_alias
self.change_aliases(change_map)
def get_initial_alias(self):
"""
Returns the first alias for this query, after increasing its reference
count.
"""
if self.tables:
alias = self.tables[0]
self.ref_alias(alias)
else:
alias = self.join((None, self.get_meta().db_table, None))
return alias
def count_active_tables(self):
"""
Returns the number of tables in this query with a non-zero reference
count. Note that after execution, the reference counts are zeroed, so
tables added in compiler will not be seen by this method.
"""
return len([1 for count in self.alias_refcount.values() if count])
def join(self, connection, reuse=None, outer_if_first=False,
nullable=False, join_field=None):
"""
Returns an alias for the join in 'connection', either reusing an
existing alias for that join or creating a new one. 'connection' is a
tuple (lhs, table, join_cols) where 'lhs' is either an existing
table alias or a table name. 'join_cols' is a tuple of tuples containing
columns to join on ((l_id1, r_id1), (l_id2, r_id2)). The join corresponds
to the SQL equivalent of::
lhs.l_id1 = table.r_id1 AND lhs.l_id2 = table.r_id2
The 'reuse' parameter can be either None which means all joins
(matching the connection) are reusable, or it can be a set containing
the aliases that can be reused.
If 'outer_if_first' is True and a new join is created, it will have the
LOUTER join type.
A join is always created as LOUTER if the lhs alias is LOUTER to make
sure we do not generate chains like t1 LOUTER t2 INNER t3.
If 'nullable' is True, the join can potentially involve NULL values and
is a candidate for promotion (to "left outer") when combining querysets.
The 'join_field' is the field we are joining along (if any).
"""
lhs, table, join_cols = connection
assert lhs is None or join_field is not None
existing = self.join_map.get(connection, ())
if reuse is None:
reuse = existing
else:
reuse = [a for a in existing if a in reuse]
for alias in reuse:
if join_field and self.alias_map[alias].join_field != join_field:
# The join_map doesn't contain join_field (mainly because
# fields in Query structs are problematic in pickling), so
# check that the existing join is created using the same
# join_field used for the under work join.
continue
self.ref_alias(alias)
return alias
# No reuse is possible, so we need a new alias.
alias, _ = self.table_alias(table, True)
if not lhs:
# Not all tables need to be joined to anything. No join type
# means the later columns are ignored.
join_type = None
elif outer_if_first or self.alias_map[lhs].join_type == self.LOUTER:
# We need to use LOUTER join if asked by outer_if_first or if the
# LHS table is left-joined in the query.
join_type = self.LOUTER
else:
join_type = self.INNER
join = JoinInfo(table, alias, join_type, lhs, join_cols or ((None, None),), nullable,
join_field)
self.alias_map[alias] = join
if connection in self.join_map:
self.join_map[connection] += (alias,)
else:
self.join_map[connection] = (alias,)
return alias
def setup_inherited_models(self):
"""
If the model that is the basis for this QuerySet inherits other models,
we need to ensure that those other models have their tables included in
the query.
We do this as a separate step so that subclasses know which
tables are going to be active in the query, without needing to compute
all the select columns (this method is called from pre_sql_setup(),
whereas column determination is a later part, and side-effect, of
as_sql()).
"""
opts = self.get_meta()
root_alias = self.tables[0]
seen = {None: root_alias}
for field, model in opts.get_fields_with_model():
if model not in seen:
self.join_parent_model(opts, model, root_alias, seen)
self.included_inherited_models = seen
def join_parent_model(self, opts, model, alias, seen):
"""
Makes sure the given 'model' is joined in the query. If 'model' isn't
a parent of 'opts' or if it is None this method is a no-op.
The 'alias' is the root alias for starting the join, 'seen' is a dict
of model -> alias of existing joins. It must also contain a mapping
of None -> some alias. This will be returned in the no-op case.
"""
if model in seen:
return seen[model]
chain = opts.get_base_chain(model)
if chain is None:
return alias
curr_opts = opts
for int_model in chain:
if int_model in seen:
return seen[int_model]
# Proxy model have elements in base chain
# with no parents, assign the new options
# object and skip to the next base in that
# case
if not curr_opts.parents[int_model]:
curr_opts = int_model._meta
continue
link_field = curr_opts.get_ancestor_link(int_model)
_, _, _, joins, _ = self.setup_joins(
[link_field.name], curr_opts, alias)
curr_opts = int_model._meta
alias = seen[int_model] = joins[-1]
return alias or seen[None]
def remove_inherited_models(self):
"""
Undoes the effects of setup_inherited_models(). Should be called
whenever select columns (self.select) are set explicitly.
"""
for key, alias in self.included_inherited_models.items():
if key:
self.unref_alias(alias)
self.included_inherited_models = {}
def add_aggregate(self, aggregate, model, alias, is_summary):
"""
Adds a single aggregate expression to the Query
"""
opts = model._meta
field_list = aggregate.lookup.split(LOOKUP_SEP)
if len(field_list) == 1 and aggregate.lookup in self.aggregates:
# Aggregate is over an annotation
field_name = field_list[0]
col = field_name
source = self.aggregates[field_name]
if not is_summary:
raise FieldError("Cannot compute %s('%s'): '%s' is an aggregate" % (
aggregate.name, field_name, field_name))
elif ((len(field_list) > 1) or
(field_list[0] not in [i.name for i in opts.fields]) or
self.group_by is None or
not is_summary):
# If:
# - the field descriptor has more than one part (foo__bar), or
# - the field descriptor is referencing an m2m/m2o field, or
# - this is a reference to a model field (possibly inherited), or
# - this is an annotation over a model field
# then we need to explore the joins that are required.
field, sources, opts, join_list, path = self.setup_joins(
field_list, opts, self.get_initial_alias())
# Process the join chain to see if it can be trimmed
targets, _, join_list = self.trim_joins(sources, join_list, path)
# If the aggregate references a model or field that requires a join,
# those joins must be LEFT OUTER - empty join rows must be returned
# in order for zeros to be returned for those aggregates.
self.promote_joins(join_list, True)
col = targets[0].column
source = sources[0]
col = (join_list[-1], col)
else:
# The simplest cases. No joins required -
# just reference the provided column alias.
field_name = field_list[0]
source = opts.get_field(field_name)
col = field_name
# Add the aggregate to the query
aggregate.add_to_query(self, alias, col=col, source=source, is_summary=is_summary)
def build_filter(self, filter_expr, branch_negated=False, current_negated=False,
can_reuse=None):
"""
Builds a WhereNode for a single filter clause, but doesn't add it
to this Query. Query.add_q() will then add this filter to the where
or having Node.
The 'branch_negated' tells us if the current branch contains any
negations. This will be used to determine if subqueries are needed.
The 'current_negated' is used to determine if the current filter is
negated or not and this will be used to determine if IS NULL filtering
is needed.
The difference between current_netageted and branch_negated is that
branch_negated is set on first negation, but current_negated is
flipped for each negation.
Note that add_filter will not do any negating itself, that is done
upper in the code by add_q().
The 'can_reuse' is a set of reusable joins for multijoins.
The method will create a filter clause that can be added to the current
query. However, if the filter isn't added to the query then the caller
is responsible for unreffing the joins used.
"""
arg, value = filter_expr
parts = arg.split(LOOKUP_SEP)
if not parts:
raise FieldError("Cannot parse keyword query %r" % arg)
# Work out the lookup type and remove it from the end of 'parts',
# if necessary.
lookup_type = 'exact' # Default lookup type
num_parts = len(parts)
if (len(parts) > 1 and parts[-1] in self.query_terms
and arg not in self.aggregates):
# Traverse the lookup query to distinguish related fields from
# lookup types.
lookup_model = self.model
for counter, field_name in enumerate(parts):
try:
lookup_field = lookup_model._meta.get_field(field_name)
except FieldDoesNotExist:
# Not a field. Bail out.
lookup_type = parts.pop()
break
# Unless we're at the end of the list of lookups, let's attempt
# to continue traversing relations.
if (counter + 1) < num_parts:
try:
lookup_model = lookup_field.rel.to
except AttributeError:
# Not a related field. Bail out.
lookup_type = parts.pop()
break
clause = self.where_class()
# Interpret '__exact=None' as the sql 'is NULL'; otherwise, reject all
# uses of None as a query value.
if value is None:
if lookup_type != 'exact':
raise ValueError("Cannot use None as a query value")
lookup_type = 'isnull'
value = True
elif callable(value):
value = value()
elif isinstance(value, ExpressionNode):
# If value is a query expression, evaluate it
value = SQLEvaluator(value, self, reuse=can_reuse)
# For Oracle '' is equivalent to null. The check needs to be done
# at this stage because join promotion can't be done at compiler
# stage. Using DEFAULT_DB_ALIAS isn't nice, but it is the best we
# can do here. Similar thing is done in is_nullable(), too.
if (connections[DEFAULT_DB_ALIAS].features.interprets_empty_strings_as_nulls and
lookup_type == 'exact' and value == ''):
value = True
lookup_type = 'isnull'
for alias, aggregate in self.aggregates.items():
if alias in (parts[0], LOOKUP_SEP.join(parts)):
clause.add((aggregate, lookup_type, value), AND)
return clause
opts = self.get_meta()
alias = self.get_initial_alias()
allow_many = not branch_negated
try:
field, sources, opts, join_list, path = self.setup_joins(
parts, opts, alias, can_reuse, allow_many,
allow_explicit_fk=True)
if can_reuse is not None:
can_reuse.update(join_list)
# split_exclude() needs to know which joins were generated for the
# lookup parts
self._lookup_joins = join_list
except MultiJoin as e:
return self.split_exclude(filter_expr, LOOKUP_SEP.join(parts[:e.level]),
can_reuse, e.names_with_path)
if (lookup_type == 'isnull' and value is True and not current_negated and
len(join_list) > 1):
# If the comparison is against NULL, we may need to use some left
# outer joins when creating the join chain. This is only done when
# needed, as it's less efficient at the database level.
self.promote_joins(join_list)
# Process the join list to see if we can remove any inner joins from
# the far end (fewer tables in a query is better). Note that join
# promotion must happen before join trimming to have the join type
# information available when reusing joins.
targets, alias, join_list = self.trim_joins(sources, join_list, path)
if hasattr(field, 'get_lookup_constraint'):
constraint = field.get_lookup_constraint(self.where_class, alias, targets, sources,
lookup_type, value)
else:
constraint = (Constraint(alias, targets[0].column, field), lookup_type, value)
clause.add(constraint, AND)
if current_negated and (lookup_type != 'isnull' or value is False):
self.promote_joins(join_list)
if (lookup_type != 'isnull' and (
self.is_nullable(targets[0]) or
self.alias_map[join_list[-1]].join_type == self.LOUTER)):
# The condition added here will be SQL like this:
# NOT (col IS NOT NULL), where the first NOT is added in
# upper layers of code. The reason for addition is that if col
# is null, then col != someval will result in SQL "unknown"
# which isn't the same as in Python. The Python None handling
# is wanted, and it can be gotten by
# (col IS NULL OR col != someval)
# <=>
# NOT (col IS NOT NULL AND col = someval).
clause.add((Constraint(alias, targets[0].column, None), 'isnull', False), AND)
return clause
def add_filter(self, filter_clause):
self.where.add(self.build_filter(filter_clause), 'AND')
def need_having(self, obj):
"""
Returns whether or not all elements of this q_object need to be put
together in the HAVING clause.
"""
if not isinstance(obj, Node):
return (refs_aggregate(obj[0].split(LOOKUP_SEP), self.aggregates)
or (hasattr(obj[1], 'contains_aggregate')
and obj[1].contains_aggregate(self.aggregates)))
return any(self.need_having(c) for c in obj.children)
def split_having_parts(self, q_object, negated=False):
"""
Returns a list of q_objects which need to go into the having clause
instead of the where clause. Removes the splitted out nodes from the
given q_object. Note that the q_object is altered, so cloning it is
needed.
"""
having_parts = []
for c in q_object.children[:]:
# When constucting the having nodes we need to take care to
# preserve the negation status from the upper parts of the tree
if isinstance(c, Node):
# For each negated child, flip the in_negated flag.
in_negated = c.negated ^ negated
if c.connector == OR and self.need_having(c):
# A subtree starting from OR clause must go into having in
# whole if any part of that tree references an aggregate.
q_object.children.remove(c)
having_parts.append(c)
c.negated = in_negated
else:
having_parts.extend(
self.split_having_parts(c, in_negated)[1])
elif self.need_having(c):
q_object.children.remove(c)
new_q = self.where_class(children=[c], negated=negated)
having_parts.append(new_q)
return q_object, having_parts
def add_q(self, q_object):
"""
A preprocessor for the internal _add_q(). Responsible for
splitting the given q_object into where and having parts and
setting up some internal variables.
"""
if not self.need_having(q_object):
where_part, having_parts = q_object, []
else:
where_part, having_parts = self.split_having_parts(
q_object.clone(), q_object.negated)
used_aliases = self.used_aliases
clause = self._add_q(where_part, used_aliases)
self.where.add(clause, AND)
for hp in having_parts:
clause = self._add_q(hp, used_aliases)
self.having.add(clause, AND)
if self.filter_is_sticky:
self.used_aliases = used_aliases
def _add_q(self, q_object, used_aliases, branch_negated=False,
current_negated=False):
"""
Adds a Q-object to the current filter.
"""
connector = q_object.connector
current_negated = current_negated ^ q_object.negated
branch_negated = branch_negated or q_object.negated
target_clause = self.where_class(connector=connector,
negated=q_object.negated)
# Treat case NOT (a AND b) like case ((NOT a) OR (NOT b)) for join
# promotion. See ticket #21748.
effective_connector = connector
if current_negated:
effective_connector = OR if effective_connector == AND else AND
if effective_connector == OR:
alias_usage_counts = dict()
aliases_before = set(self.tables)
for child in q_object.children:
if effective_connector == OR:
refcounts_before = self.alias_refcount.copy()
if isinstance(child, Node):
child_clause = self._add_q(
child, used_aliases, branch_negated,
current_negated)
else:
child_clause = self.build_filter(
child, can_reuse=used_aliases, branch_negated=branch_negated,
current_negated=current_negated)
target_clause.add(child_clause, connector)
if effective_connector == OR:
used = alias_diff(refcounts_before, self.alias_refcount)
for alias in used:
alias_usage_counts[alias] = alias_usage_counts.get(alias, 0) + 1
if effective_connector == OR:
self.promote_disjunction(aliases_before, alias_usage_counts,
len(q_object.children))
return target_clause
def names_to_path(self, names, opts, allow_many, allow_explicit_fk):
"""
Walks the names path and turns them PathInfo tuples. Note that a
single name in 'names' can generate multiple PathInfos (m2m for
example).
'names' is the path of names to travle, 'opts' is the model Options we
start the name resolving from, 'allow_many' and 'allow_explicit_fk'
are as for setup_joins().
Returns a list of PathInfo tuples. In addition returns the final field
(the last used join field), and target (which is a field guaranteed to
contain the same value as the final field).
"""
path, names_with_path = [], []
for pos, name in enumerate(names):
cur_names_with_path = (name, [])
if name == 'pk':
name = opts.pk.name
try:
field, model, direct, m2m = opts.get_field_by_name(name)
except FieldDoesNotExist:
for f in opts.fields:
if allow_explicit_fk and name == f.attname:
# XXX: A hack to allow foo_id to work in values() for
# backwards compatibility purposes. If we dropped that
# feature, this could be removed.
field, model, direct, m2m = opts.get_field_by_name(f.name)
break
else:
available = opts.get_all_field_names() + list(self.aggregate_select)
raise FieldError("Cannot resolve keyword %r into field. "
"Choices are: %s" % (name, ", ".join(available)))
# Check if we need any joins for concrete inheritance cases (the
# field lives in parent, but we are currently in one of its
# children)
if model:
# The field lives on a base class of the current model.
# Skip the chain of proxy to the concrete proxied model
proxied_model = opts.concrete_model
for int_model in opts.get_base_chain(model):
if int_model is proxied_model:
opts = int_model._meta
else:
final_field = opts.parents[int_model]
targets = (final_field.rel.get_related_field(),)
opts = int_model._meta
path.append(PathInfo(final_field.model._meta, opts, targets, final_field, False, True))
cur_names_with_path[1].append(PathInfo(final_field.model._meta, opts, targets, final_field, False, True))
if hasattr(field, 'get_path_info'):
pathinfos = field.get_path_info()
if not allow_many:
for inner_pos, p in enumerate(pathinfos):
if p.m2m:
cur_names_with_path[1].extend(pathinfos[0:inner_pos + 1])
names_with_path.append(cur_names_with_path)
raise MultiJoin(pos + 1, names_with_path)
last = pathinfos[-1]
path.extend(pathinfos)
final_field = last.join_field
opts = last.to_opts
targets = last.target_fields
cur_names_with_path[1].extend(pathinfos)
names_with_path.append(cur_names_with_path)
else:
# Local non-relational field.
final_field = field
targets = (field,)
break
if pos != len(names) - 1:
if pos == len(names) - 2:
raise FieldError(
"Join on field %r not permitted. Did you misspell %r for "
"the lookup type?" % (name, names[pos + 1]))
else:
raise FieldError("Join on field %r not permitted." % name)
return path, final_field, targets
def setup_joins(self, names, opts, alias, can_reuse=None, allow_many=True,
allow_explicit_fk=False, outer_if_first=False):
"""
Compute the necessary table joins for the passage through the fields
given in 'names'. 'opts' is the Options class for the current model
(which gives the table we are starting from), 'alias' is the alias for
the table to start the joining from.
The 'can_reuse' defines the reverse foreign key joins we can reuse. It
can be None in which case all joins are reusable or a set of aliases
that can be reused. Note that non-reverse foreign keys are always
reusable when using setup_joins().
If 'allow_many' is False, then any reverse foreign key seen will
generate a MultiJoin exception.
The 'allow_explicit_fk' controls if field.attname is allowed in the
lookups.
Returns the final field involved in the joins, the target field (used
for any 'where' constraint), the final 'opts' value, the joins and the
field path travelled to generate the joins.
The target field is the field containing the concrete value. Final
field can be something different, for example foreign key pointing to
that value. Final field is needed for example in some value
conversions (convert 'obj' in fk__id=obj to pk val using the foreign
key field for example).
"""
joins = [alias]
# First, generate the path for the names
path, final_field, targets = self.names_to_path(
names, opts, allow_many, allow_explicit_fk)
# Then, add the path to the query's joins. Note that we can't trim
# joins at this stage - we will need the information about join type
# of the trimmed joins.
for pos, join in enumerate(path):
opts = join.to_opts
if join.direct:
nullable = self.is_nullable(join.join_field)
else:
nullable = True
connection = alias, opts.db_table, join.join_field.get_joining_columns()
reuse = can_reuse if join.m2m else None
alias = self.join(
connection, reuse=reuse, nullable=nullable, join_field=join.join_field,
outer_if_first=outer_if_first)
joins.append(alias)
if hasattr(final_field, 'field'):
final_field = final_field.field
return final_field, targets, opts, joins, path
def trim_joins(self, targets, joins, path):
"""
The 'target' parameter is the final field being joined to, 'joins'
is the full list of join aliases. The 'path' contain the PathInfos
used to create the joins.
Returns the final target field and table alias and the new active
joins.
We will always trim any direct join if we have the target column
available already in the previous table. Reverse joins can't be
trimmed as we don't know if there is anything on the other side of
the join.
"""
for pos, info in enumerate(reversed(path)):
if len(joins) == 1 or not info.direct:
break
join_targets = set(t.column for t in info.join_field.foreign_related_fields)
cur_targets = set(t.column for t in targets)
if not cur_targets.issubset(join_targets):
break
targets = tuple(r[0] for r in info.join_field.related_fields if r[1].column in cur_targets)
self.unref_alias(joins.pop())
return targets, joins[-1], joins
def split_exclude(self, filter_expr, prefix, can_reuse, names_with_path):
"""
When doing an exclude against any kind of N-to-many relation, we need
to use a subquery. This method constructs the nested query, given the
original exclude filter (filter_expr) and the portion up to the first
N-to-many relation field.
As an example we could have original filter ~Q(child__name='foo').
We would get here with filter_expr = child__name, prefix = child and
can_reuse is a set of joins usable for filters in the original query.
We will turn this into equivalent of:
WHERE NOT (pk IN (SELECT parent_id FROM thetable
WHERE name = 'foo' AND parent_id IS NOT NULL))
It might be worth it to consider using WHERE NOT EXISTS as that has
saner null handling, and is easier for the backend's optimizer to
handle.
"""
# Generate the inner query.
query = Query(self.model)
query.where.add(query.build_filter(filter_expr), AND)
query.bump_prefix()
query.clear_ordering(True)
# Try to have as simple as possible subquery -> trim leading joins from
# the subquery.
trimmed_prefix, contains_louter = query.trim_start(names_with_path)
query.remove_inherited_models()
# Add extra check to make sure the selected field will not be null
# since we are adding a IN <subquery> clause. This prevents the
# database from tripping over IN (...,NULL,...) selects and returning
# nothing
if self.is_nullable(query.select[0].field):
alias, col = query.select[0].col
query.where.add((Constraint(alias, col, query.select[0].field), 'isnull', False), AND)
condition = self.build_filter(
('%s__in' % trimmed_prefix, query),
current_negated=True, branch_negated=True, can_reuse=can_reuse)
if contains_louter:
or_null_condition = self.build_filter(
('%s__isnull' % trimmed_prefix, True),
current_negated=True, branch_negated=True, can_reuse=can_reuse)
condition.add(or_null_condition, OR)
# Note that the end result will be:
# (outercol NOT IN innerq AND outercol IS NOT NULL) OR outercol IS NULL.
# This might look crazy but due to how IN works, this seems to be
# correct. If the IS NOT NULL check is removed then outercol NOT
# IN will return UNKNOWN. If the IS NULL check is removed, then if
# outercol IS NULL we will not match the row.
return condition
def set_empty(self):
self.where = EmptyWhere()
self.having = EmptyWhere()
def is_empty(self):
return isinstance(self.where, EmptyWhere) or isinstance(self.having, EmptyWhere)
def set_limits(self, low=None, high=None):
"""
Adjusts the limits on the rows retrieved. We use low/high to set these,
as it makes it more Pythonic to read and write. When the SQL query is
created, they are converted to the appropriate offset and limit values.
Any limits passed in here are applied relative to the existing
constraints. So low is added to the current low value and both will be
clamped to any existing high value.
"""
if high is not None:
if self.high_mark is not None:
self.high_mark = min(self.high_mark, self.low_mark + high)
else:
self.high_mark = self.low_mark + high
if low is not None:
if self.high_mark is not None:
self.low_mark = min(self.high_mark, self.low_mark + low)
else:
self.low_mark = self.low_mark + low
def clear_limits(self):
"""
Clears any existing limits.
"""
self.low_mark, self.high_mark = 0, None
def can_filter(self):
"""
Returns True if adding filters to this instance is still possible.
Typically, this means no limits or offsets have been put on the results.
"""
return not self.low_mark and self.high_mark is None
def clear_select_clause(self):
"""
Removes all fields from SELECT clause.
"""
self.select = []
self.default_cols = False
self.select_related = False
self.set_extra_mask(())
self.set_aggregate_mask(())
def clear_select_fields(self):
"""
Clears the list of fields to select (but not extra_select columns).
Some queryset types completely replace any existing list of select
columns.
"""
self.select = []
def add_distinct_fields(self, *field_names):
"""
Adds and resolves the given fields to the query's "distinct on" clause.
"""
self.distinct_fields = field_names
self.distinct = True
def add_fields(self, field_names, allow_m2m=True):
"""
Adds the given (model) fields to the select set. The field names are
added in the order specified.
"""
alias = self.get_initial_alias()
opts = self.get_meta()
try:
for name in field_names:
field, targets, u2, joins, path = self.setup_joins(
name.split(LOOKUP_SEP), opts, alias, None, allow_m2m,
allow_explicit_fk=True, outer_if_first=True)
# Trim last join if possible
targets, final_alias, remaining_joins = self.trim_joins(targets, joins[-2:], path)
joins = joins[:-2] + remaining_joins
self.promote_joins(joins[1:])
for target in targets:
self.select.append(SelectInfo((final_alias, target.column), target))
except MultiJoin:
raise FieldError("Invalid field name: '%s'" % name)
except FieldError:
if LOOKUP_SEP in name:
# For lookups spanning over relationships, show the error
# from the model on which the lookup failed.
raise
else:
names = sorted(opts.get_all_field_names() + list(self.extra)
+ list(self.aggregate_select))
raise FieldError("Cannot resolve keyword %r into field. "
"Choices are: %s" % (name, ", ".join(names)))
self.remove_inherited_models()
def add_ordering(self, *ordering):
"""
Adds items from the 'ordering' sequence to the query's "order by"
clause. These items are either field names (not column names) --
possibly with a direction prefix ('-' or '?') -- or ordinals,
corresponding to column positions in the 'select' list.
If 'ordering' is empty, all ordering is cleared from the query.
"""
errors = []
for item in ordering:
if not ORDER_PATTERN.match(item):
errors.append(item)
if errors:
raise FieldError('Invalid order_by arguments: %s' % errors)
if ordering:
self.order_by.extend(ordering)
else:
self.default_ordering = False
def clear_ordering(self, force_empty):
"""
Removes any ordering settings. If 'force_empty' is True, there will be
no ordering in the resulting query (not even the model's default).
"""
self.order_by = []
self.extra_order_by = ()
if force_empty:
self.default_ordering = False
def set_group_by(self):
"""
Expands the GROUP BY clause required by the query.
This will usually be the set of all non-aggregate fields in the
return data. If the database backend supports grouping by the
primary key, and the query would be equivalent, the optimization
will be made automatically.
"""
self.group_by = []
for col, _ in self.select:
self.group_by.append(col)
def add_count_column(self):
"""
Converts the query to do count(...) or count(distinct(pk)) in order to
get its size.
"""
if not self.distinct:
if not self.select:
count = self.aggregates_module.Count('*', is_summary=True)
else:
assert len(self.select) == 1, \
"Cannot add count col with multiple cols in 'select': %r" % self.select
count = self.aggregates_module.Count(self.select[0].col)
else:
opts = self.get_meta()
if not self.select:
count = self.aggregates_module.Count(
(self.join((None, opts.db_table, None)), opts.pk.column),
is_summary=True, distinct=True)
else:
# Because of SQL portability issues, multi-column, distinct
# counts need a sub-query -- see get_count() for details.
assert len(self.select) == 1, \
"Cannot add count col with multiple cols in 'select'."
count = self.aggregates_module.Count(self.select[0].col, distinct=True)
# Distinct handling is done in Count(), so don't do it at this
# level.
self.distinct = False
# Set only aggregate to be the count column.
# Clear out the select cache to reflect the new unmasked aggregates.
self.aggregates = {None: count}
self.set_aggregate_mask(None)
self.group_by = None
def add_select_related(self, fields):
"""
Sets up the select_related data structure so that we only select
certain related models (as opposed to all models, when
self.select_related=True).
"""
field_dict = {}
for field in fields:
d = field_dict
for part in field.split(LOOKUP_SEP):
d = d.setdefault(part, {})
self.select_related = field_dict
self.related_select_cols = []
def add_extra(self, select, select_params, where, params, tables, order_by):
"""
Adds data to the various extra_* attributes for user-created additions
to the query.
"""
if select:
# We need to pair any placeholder markers in the 'select'
# dictionary with their parameters in 'select_params' so that
# subsequent updates to the select dictionary also adjust the
# parameters appropriately.
select_pairs = SortedDict()
if select_params:
param_iter = iter(select_params)
else:
param_iter = iter([])
for name, entry in select.items():
entry = force_text(entry)
entry_params = []
pos = entry.find("%s")
while pos != -1:
entry_params.append(next(param_iter))
pos = entry.find("%s", pos + 2)
select_pairs[name] = (entry, entry_params)
# This is order preserving, since self.extra_select is a SortedDict.
self.extra.update(select_pairs)
if where or params:
self.where.add(ExtraWhere(where, params), AND)
if tables:
self.extra_tables += tuple(tables)
if order_by:
self.extra_order_by = order_by
def clear_deferred_loading(self):
"""
Remove any fields from the deferred loading set.
"""
self.deferred_loading = (set(), True)
def add_deferred_loading(self, field_names):
"""
Add the given list of model field names to the set of fields to
exclude from loading from the database when automatic column selection
is done. The new field names are added to any existing field names that
are deferred (or removed from any existing field names that are marked
as the only ones for immediate loading).
"""
# Fields on related models are stored in the literal double-underscore
# format, so that we can use a set datastructure. We do the foo__bar
# splitting and handling when computing the SQL colum names (as part of
# get_columns()).
existing, defer = self.deferred_loading
if defer:
# Add to existing deferred names.
self.deferred_loading = existing.union(field_names), True
else:
# Remove names from the set of any existing "immediate load" names.
self.deferred_loading = existing.difference(field_names), False
def add_immediate_loading(self, field_names):
"""
Add the given list of model field names to the set of fields to
retrieve when the SQL is executed ("immediate loading" fields). The
field names replace any existing immediate loading field names. If
there are field names already specified for deferred loading, those
names are removed from the new field_names before storing the new names
for immediate loading. (That is, immediate loading overrides any
existing immediate values, but respects existing deferrals.)
"""
existing, defer = self.deferred_loading
field_names = set(field_names)
if 'pk' in field_names:
field_names.remove('pk')
field_names.add(self.get_meta().pk.name)
if defer:
# Remove any existing deferred names from the current set before
# setting the new names.
self.deferred_loading = field_names.difference(existing), False
else:
# Replace any existing "immediate load" field names.
self.deferred_loading = field_names, False
def get_loaded_field_names(self):
"""
If any fields are marked to be deferred, returns a dictionary mapping
models to a set of names in those fields that will be loaded. If a
model is not in the returned dictionary, none of it's fields are
deferred.
If no fields are marked for deferral, returns an empty dictionary.
"""
# We cache this because we call this function multiple times
# (compiler.fill_related_selections, query.iterator)
try:
return self._loaded_field_names_cache
except AttributeError:
collection = {}
self.deferred_to_data(collection, self.get_loaded_field_names_cb)
self._loaded_field_names_cache = collection
return collection
def get_loaded_field_names_cb(self, target, model, fields):
"""
Callback used by get_deferred_field_names().
"""
target[model] = set([f.name for f in fields])
def set_aggregate_mask(self, names):
"Set the mask of aggregates that will actually be returned by the SELECT"
if names is None:
self.aggregate_select_mask = None
else:
self.aggregate_select_mask = set(names)
self._aggregate_select_cache = None
def set_extra_mask(self, names):
"""
Set the mask of extra select items that will be returned by SELECT,
we don't actually remove them from the Query since they might be used
later
"""
if names is None:
self.extra_select_mask = None
else:
self.extra_select_mask = set(names)
self._extra_select_cache = None
def _aggregate_select(self):
"""The SortedDict of aggregate columns that are not masked, and should
be used in the SELECT clause.
This result is cached for optimization purposes.
"""
if self._aggregate_select_cache is not None:
return self._aggregate_select_cache
elif self.aggregate_select_mask is not None:
self._aggregate_select_cache = SortedDict([
(k,v) for k,v in self.aggregates.items()
if k in self.aggregate_select_mask
])
return self._aggregate_select_cache
else:
return self.aggregates
aggregate_select = property(_aggregate_select)
def _extra_select(self):
if self._extra_select_cache is not None:
return self._extra_select_cache
elif self.extra_select_mask is not None:
self._extra_select_cache = SortedDict([
(k,v) for k,v in self.extra.items()
if k in self.extra_select_mask
])
return self._extra_select_cache
else:
return self.extra
extra_select = property(_extra_select)
def trim_start(self, names_with_path):
"""
Trims joins from the start of the join path. The candidates for trim
are the PathInfos in names_with_path structure that are m2m joins.
Also sets the select column so the start matches the join.
This method is meant to be used for generating the subquery joins &
cols in split_exclude().
Returns a lookup usable for doing outerq.filter(lookup=self). Returns
also if the joins in the prefix contain a LEFT OUTER join.
_"""
all_paths = []
for _, paths in names_with_path:
all_paths.extend(paths)
contains_louter = False
# Trim and operate only on tables that were generated for
# the lookup part of the query. That is, avoid trimming
# joins generated for F() expressions.
lookup_tables = [t for t in self.tables if t in self._lookup_joins or t == self.tables[0]]
for trimmed_paths, path in enumerate(all_paths):
if path.m2m:
break
if self.alias_map[lookup_tables[trimmed_paths + 1]].join_type == self.LOUTER:
contains_louter = True
self.unref_alias(lookup_tables[trimmed_paths])
# The path.join_field is a Rel, lets get the other side's field
join_field = path.join_field.field
# Build the filter prefix.
paths_in_prefix = trimmed_paths
trimmed_prefix = []
for name, path in names_with_path:
if paths_in_prefix - len(path) < 0:
break
trimmed_prefix.append(name)
paths_in_prefix -= len(path)
trimmed_prefix.append(
join_field.foreign_related_fields[0].name)
trimmed_prefix = LOOKUP_SEP.join(trimmed_prefix)
# Lets still see if we can trim the first join from the inner query
# (that is, self). We can't do this for LEFT JOINs because we would
# miss those rows that have nothing on the outer side.
if self.alias_map[lookup_tables[trimmed_paths + 1]].join_type != self.LOUTER:
select_fields = [r[0] for r in join_field.related_fields]
select_alias = lookup_tables[trimmed_paths + 1]
self.unref_alias(lookup_tables[trimmed_paths])
extra_restriction = join_field.get_extra_restriction(
self.where_class, None, lookup_tables[trimmed_paths + 1])
if extra_restriction:
self.where.add(extra_restriction, AND)
else:
# TODO: It might be possible to trim more joins from the start of the
# inner query if it happens to have a longer join chain containing the
# values in select_fields. Lets punt this one for now.
select_fields = [r[1] for r in join_field.related_fields]
select_alias = lookup_tables[trimmed_paths]
self.select = [SelectInfo((select_alias, f.column), f) for f in select_fields]
return trimmed_prefix, contains_louter
def is_nullable(self, field):
"""
A helper to check if the given field should be treated as nullable.
Some backends treat '' as null and Django treats such fields as
nullable for those backends. In such situations field.null can be
False even if we should treat the field as nullable.
"""
# We need to use DEFAULT_DB_ALIAS here, as QuerySet does not have
# (nor should it have) knowledge of which connection is going to be
# used. The proper fix would be to defer all decisions where
# is_nullable() is needed to the compiler stage, but that is not easy
# to do currently.
if ((connections[DEFAULT_DB_ALIAS].features.interprets_empty_strings_as_nulls)
and field.empty_strings_allowed):
return True
else:
return field.null
def get_order_dir(field, default='ASC'):
"""
Returns the field name and direction for an order specification. For
example, '-foo' is returned as ('foo', 'DESC').
The 'default' param is used to indicate which way no prefix (or a '+'
prefix) should sort. The '-' prefix always sorts the opposite way.
"""
dirn = ORDER_DIR[default]
if field[0] == '-':
return field[1:], dirn[1]
return field, dirn[0]
def add_to_dict(data, key, value):
"""
A helper function to add "value" to the set of values for "key", whether or
not "key" already exists.
"""
if key in data:
data[key].add(value)
else:
data[key] = set([value])
def is_reverse_o2o(field):
"""
A little helper to check if the given field is reverse-o2o. The field is
expected to be some sort of relation field or related object.
"""
return not hasattr(field, 'rel') and field.field.unique
def alias_diff(refcounts_before, refcounts_after):
"""
Given the before and after copies of refcounts works out which aliases
have been added to the after copy.
"""
# Use -1 as default value so that any join that is created, then trimmed
# is seen as added.
return set(t for t in refcounts_after
if refcounts_after[t] > refcounts_before.get(t, -1))
| apache-2.0 |
Phoenix-Silver/Zte-Blade-New-35-kernel | arch/ia64/scripts/unwcheck.py | 13143 | 1714 | #!/usr/bin/python
#
# Usage: unwcheck.py FILE
#
# This script checks the unwind info of each function in file FILE
# and verifies that the sum of the region-lengths matches the total
# length of the function.
#
# Based on a shell/awk script originally written by Harish Patil,
# which was converted to Perl by Matthew Chapman, which was converted
# to Python by David Mosberger.
#
import os
import re
import sys
if len(sys.argv) != 2:
print "Usage: %s FILE" % sys.argv[0]
sys.exit(2)
readelf = os.getenv("READELF", "readelf")
start_pattern = re.compile("<([^>]*)>: \[0x([0-9a-f]+)-0x([0-9a-f]+)\]")
rlen_pattern = re.compile(".*rlen=([0-9]+)")
def check_func (func, slots, rlen_sum):
if slots != rlen_sum:
global num_errors
num_errors += 1
if not func: func = "[%#x-%#x]" % (start, end)
print "ERROR: %s: %lu slots, total region length = %lu" % (func, slots, rlen_sum)
return
num_funcs = 0
num_errors = 0
func = False
slots = 0
rlen_sum = 0
for line in os.popen("%s -u %s" % (readelf, sys.argv[1])):
m = start_pattern.match(line)
if m:
check_func(func, slots, rlen_sum)
func = m.group(1)
start = long(m.group(2), 16)
end = long(m.group(3), 16)
slots = 3 * (end - start) / 16
rlen_sum = 0L
num_funcs += 1
else:
m = rlen_pattern.match(line)
if m:
rlen_sum += long(m.group(1))
check_func(func, slots, rlen_sum)
if num_errors == 0:
print "No errors detected in %u functions." % num_funcs
else:
if num_errors > 1:
err="errors"
else:
err="error"
print "%u %s detected in %u functions." % (num_errors, err, num_funcs)
sys.exit(1)
| gpl-2.0 |
Diwahars/pycon | pycon/finaid/tests/test_views.py | 1 | 23603 | # coding=utf-8
from cStringIO import StringIO
import csv
import datetime
from decimal import Decimal
from django.conf import settings
from django.core import mail
from django.core.urlresolvers import reverse
from django.test import TestCase
from mock import patch
from pycon.finaid.models import FinancialAidApplication, \
FinancialAidApplicationPeriod, FinancialAidMessage, \
FinancialAidEmailTemplate, STATUS_SUBMITTED, FinancialAidReviewData, \
STATUS_INFO_NEEDED, STATUS_OFFERED, STATUS_ACCEPTED, STATUS_DECLINED, STATUS_WITHDRAWN, \
STATUS_NEED_MORE
from pycon.finaid.utils import email_address
from .utils import TestMixin, create_application, ReviewTestMixin
from symposion.conference.models import Conference
today = datetime.date.today()
now = datetime.datetime.now()
one_day = datetime.timedelta(days=1)
class TestFinaidApplicationView(TestCase, TestMixin):
def setUp(self):
self.edit_url = reverse('finaid_edit')
self.dashboard_url = reverse('dashboard')
self.login_url = reverse('account_login')
self.user = self.create_user()
# financial aid applications are open
self.period = FinancialAidApplicationPeriod.objects.create(
start=today - one_day,
end=today + one_day
)
Conference.objects.get_or_create(id=settings.CONFERENCE_ID)
def test_not_logged_in(self):
# If not logged in, view redirects to login
expected_url = self.login_url + "?next=" + self.edit_url
rsp = self.client.get(self.edit_url)
self.assertRedirects(rsp, expected_url)
rsp = self.client.post(self.edit_url)
self.assertRedirects(rsp, expected_url)
def test_logged_in_applications_open(self):
# If logged in and applications open, we can view the view
self.login()
rsp = self.client.get(self.edit_url)
self.assertEqual(200, rsp.status_code)
# and context has a form
form = rsp.context['form']
# the form is set up to do an application for the current user
self.assertEqual(self.user, form.instance.user)
def test_logged_in_applications_closed(self):
# If logged in and applications closed, we redirect to dashboard
# We also display a message
self.login()
# Applications ended long ago
self.period.end = datetime.datetime(1972, 1, 1)
self.period.save()
rsp = self.client.get(self.edit_url)
self.assertRedirects(rsp, self.dashboard_url)
# And a message was displayed
# Need to tell the test client to follow the redirect if we want
# to see the message
rsp = self.client.get(self.edit_url, follow=True)
context = rsp.context
self.assertIn('messages', context)
self.assertEqual(1, len(context['messages']))
def test_submit(self):
# Submit an application
self.login()
data = dict(
profession="Foo",
experience_level="lots",
what_you_want="money",
use_of_python="fun",
presenting='1',
amount_requested="0.00",
travel_plans="get there",
)
self.assertEqual(0, len(mail.outbox))
rsp = self.client.post(self.edit_url, data)
self.assertRedirects(rsp, self.dashboard_url)
# There's an application for this user now
app = FinancialAidApplication.objects.get(user=self.user)
self.assertEqual("Foo", app.profession)
# And an email was sent to both user and committee
self.assertEqual(2, len(mail.outbox))
msg = mail.outbox[0]
# print("From: %s\nTo: %s\nSubject: %s\n\n%s" %
# (msg.from_email, ", ".join(msg.recipients()),
# msg.subject, msg.body))
self.assertIn(app.user.email, msg.recipients())
self.assertEqual(email_address(), msg.from_email)
self.assertIn("received", msg.body)
msg = mail.outbox[1]
self.assertIn(email_address(), msg.recipients())
self.assertEqual(app.user.email, msg.from_email)
self.assertIn("submitted", msg.body)
# And a message was displayed
# Need to tell the test client to follow the redirect if we want
# to see the message
rsp = self.client.post(self.edit_url, data, follow=True)
context = rsp.context
self.assertIn('messages', context)
self.assertEqual(1, len(context['messages']))
def test_edit(self):
# Edit an application
self.login()
# Existing application
FinancialAidApplication.objects.create(
user=self.user,
profession="Foo",
experience_level="lots",
what_you_want="money",
use_of_python="fun",
presenting=1,
travel_plans="get there",
)
# New data
data = dict(
profession="Gourmet",
experience_level="none",
what_you_want="money",
use_of_python="fun",
presenting='1',
amount_requested="0.00",
travel_plans="get there quickly",
)
self.assertEqual(0, len(mail.outbox))
rsp = self.client.post(self.edit_url, data)
self.assertRedirects(rsp, self.dashboard_url)
# And the application now has new data
app = FinancialAidApplication.objects.get(user=self.user)
self.assertEqual("Gourmet", app.profession)
self.assertEqual("none", app.experience_level)
# And an email was sent to user and committee
self.assertEqual(2, len(mail.outbox))
msg = mail.outbox[0]
self.assertIn("edited", msg.body)
self.assertIn(email_address(), msg.from_email)
self.assertIn(app.user.email, msg.recipients())
self.assertIn(app.fa_app_url(), msg.body)
msg = mail.outbox[1]
self.assertIn("edited", msg.body)
self.assertIn(email_address(), msg.recipients())
self.assertIn(app.user.email, msg.from_email)
self.assertIn(app.fa_app_url(), msg.body)
# And a message was displayed
# Need to tell the test client to follow the redirect if we want
# to see the message
rsp = self.client.post(self.edit_url, data, follow=True)
context = rsp.context
self.assertIn('messages', context)
self.assertEqual(1, len(context['messages']))
class TestFinaidStatusView(TestCase, TestMixin):
def setUp(self):
self.edit_url = reverse('finaid_edit')
self.dashboard_url = reverse('dashboard')
self.login_url = reverse('account_login')
self.user = self.create_user()
# financial aid applications are open
self.period = FinancialAidApplicationPeriod.objects.create(
start=today - one_day,
end=today + one_day
)
Conference.objects.get_or_create(id=settings.CONFERENCE_ID)
def test_applicant_cant_see_private_messages(self):
self.login()
application = create_application(user=self.user)
application.save()
# Create a 2nd user to make a message
user2 = self.create_user(username="fred", email="fred@example.com")
# Make message
FinancialAidMessage.objects.create(user=user2,
application=application,
visible=False,
message="Burma Shave!")
# Make visible message, just to be sure we're seeing some messages
FinancialAidMessage.objects.create(user=user2,
application=application,
visible=True,
message="Star Trek!")
# Status view
url = reverse("finaid_status")
rsp = self.client.get(url)
self.assertContains(rsp, "Star Trek!")
self.assertNotContains(rsp, "Burma Shave!")
class TestFinaidEmailView(TestCase, TestMixin, ReviewTestMixin):
def setUp(self):
self.user = self.create_user()
self.make_reviewer(self.user)
self.login()
self.application = create_application(user=self.user)
self.application.save()
self.url = reverse('finaid_email', kwargs={'pks': self.application.pk})
# Create 2nd user and application, just to make sure we're only
# using the ones that were asked for and not all of them.
self.user2 = self.create_user(username="jill",
email="jill@example.com")
self.application2 = create_application(user=self.user2)
self.application2.save()
def test_email_view(self):
# Just look at the email view, check the context
rsp = self.client.get(self.url)
if rsp.status_code == 302:
self.fail(rsp['Location'])
self.assertEqual(200, rsp.status_code)
context = rsp.context
self.assertEqual([self.user], context['users'])
@patch('django.template.Template.render')
@patch('pycon.finaid.views.send_mass_mail')
def test_email_submit(self, mock_send_mass_mail, mock_render):
# Actually submit the thing
# Create review record
# Most fields are optional
data = {
'application': self.application,
'status': STATUS_SUBMITTED,
'amount': Decimal('0.00'),
}
review = FinancialAidReviewData(**data)
review.save()
subject = 'TEST SUBJECT'
template_text = 'THE TEMPLATE'
FinancialAidEmailTemplate.objects.create(
name='template',
template="wrong template"
)
template2 = FinancialAidEmailTemplate.objects.create(
name='template',
template=template_text,
)
data = {
'template': template2.pk,
'subject': subject,
}
mock_render.return_value = template_text
rsp = self.client.post(self.url, data)
self.assertEqual(302, rsp.status_code, rsp.content)
# we tried to send the right emails
expected_msgs = [(subject, template_text, email_address(),
[self.user.email])]
mock_send_mass_mail.assert_called_with(expected_msgs)
# the template was rendered with a good context
context = mock_render.call_args[0][0]
self.assertEqual(self.application, context['application'])
self.assertEqual(review, context['review'])
class TestFinaidMessageView(TestCase, TestMixin, ReviewTestMixin):
def setUp(self):
self.user = self.create_user()
self.make_reviewer(self.user)
self.login()
Conference.objects.get_or_create(id=settings.CONFERENCE_ID)
def test_reviewers_only(self):
self.make_not_reviewer(self.user)
user1 = self.create_user("bob", "bob@example.com", "snoopy")
application1 = create_application(user1)
application1.save()
url = reverse('finaid_message', kwargs={'pks': str(application1.pk)})
rsp = self.client.get(url)
self.assertEqual(403, rsp.status_code)
rsp = self.client.post(url)
self.assertEqual(403, rsp.status_code)
def test_no_applications(self):
# No applications selected - redirect back to reviewing page
# (select a non-existing application to get past the URL pattern)
url = reverse('finaid_message', kwargs={'pks': '999'})
rsp = self.client.get(url)
self.assertEqual(302, rsp.status_code)
def test_messaging(self):
# Create a couple users and applications
user1 = self.create_user("bob", "bob@example.com", "snoopy")
user2 = self.create_user("fred", "fred@example.com", "linus")
application1 = create_application(user1)
application1.save()
application2 = create_application(user2)
application2.save()
# We can display the page prompting for a message to send them
pks = ','.join(str(a.pk) for a in FinancialAidApplication.objects.all())
url = reverse('finaid_message', kwargs={'pks': pks})
rsp = self.client.get(url)
self.assertEqual(200, rsp.status_code)
context = rsp.context
applications = context['applications']
self.assertEqual(2, len(applications))
self.assertIn(application1, applications)
self.assertIn(application2, applications)
# "Send" a message to those two applications
test_message = 'One if by land and two if by sea'
data = {
'visible': 'checked',
'message': test_message,
}
rsp = self.client.post(url, data=data)
self.assertEqual(302, rsp.status_code)
msg1 = FinancialAidMessage.objects.get(application=application1)
self.assertEqual(test_message, msg1.message)
msg2 = FinancialAidMessage.objects.get(application=application2)
self.assertEqual(test_message, msg2.message)
# For each message, it's visible, so it should have been emailed to
# both the applicant and the reviewers. Total: 4 messages
self.assertEqual(4, len(mail.outbox))
class TestCSVExport(TestMixin, ReviewTestMixin, TestCase):
def setUp(self):
super(TestCSVExport, self).setUp()
self.url = reverse('finaid_download_csv')
self.login_url = reverse('account_login')
self.user = self.create_user()
self.make_reviewer(self.user)
def get_csv(self):
# Call the URL, get the response, parse it strictly as CSV,
# and return the list of dictionaries
rsp = self.client.get(self.url)
self.assertEqual(200, rsp.status_code)
dialect = csv.excel()
dialect.strict = True
reader = csv.DictReader(StringIO(rsp.content), dialect=dialect)
result = []
for item in reader:
for k, v in item.iteritems():
item[k] = v.decode('utf-8')
result.append(item)
return result
def test_not_logged_in(self):
# If not logged in, view redirects to login
expected_url = self.login_url + "?next=" + self.url
rsp = self.client.get(self.url)
self.assertRedirects(rsp, expected_url)
rsp = self.client.post(self.url)
self.assertRedirects(rsp, expected_url)
def test_reviewers_only(self):
# Only reviewers can download the data
self.make_not_reviewer(self.user)
self.login()
rsp = self.client.get(self.url)
self.assertEqual(403, rsp.status_code)
# and non-reviewers don't see the download link on their dashboard
rsp = self.client.get(reverse('dashboard'))
self.assertEqual(200, rsp.status_code)
self.assertNotContains(rsp, self.url)
def test_link_on_dashboard(self):
# Reviewers get a link on their dashboard
self.login()
rsp = self.client.get(reverse('dashboard'))
self.assertEqual(200, rsp.status_code)
self.assertContains(rsp, self.url, msg_prefix=rsp.content.decode('utf-8'))
def test_empty_data(self):
# No data, should be able to get a CSV response anyway
self.login()
result = self.get_csv()
self.assertEqual(0, len(result))
def test_one_application(self):
# One application that has review data
# Include non-ASCII to be sure that doesn't break anything
application = FinancialAidApplication.objects.create(
user=self.user,
profession=u"Föo",
experience_level="lots",
what_you_want=u"money\nand\n'lóts' of it.",
use_of_python="fun",
presenting=1,
)
FinancialAidReviewData.objects.create(
application=application,
status=STATUS_INFO_NEEDED,
amount=Decimal('2.45'),
)
self.login()
result = self.get_csv()
self.assertEqual(1, len(result))
app = result[0]
self.assertEqual(unicode(application.user), app['user'])
self.assertEqual(application.experience_level, app['experience_level'])
self.assertEqual("Yes", app['presenting'])
self.assertEqual("Information needed", app['status'])
self.assertEqual(self.user.email, app['email'])
def test_two_applications(self):
# A couple users and applications, without review data
user1 = self.create_user("bob", "bob@example.com", "snoopy")
user2 = self.create_user("fred", "fred@example.com", "linus")
application1 = create_application(user1,
experience_level="foo\nbar")
application1.save()
application2 = create_application(user2)
application2.save()
self.login()
result = self.get_csv()
self.assertEqual(2, len(result))
app = result[0]
self.assertEqual(unicode(application1.user), app['user'])
self.assertEqual(application1.experience_level, app['experience_level'])
self.assertEqual('Submitted', app['status'])
self.assertEqual(user1.email, app['email'])
class TestFinaidDashboardButtons(TestCase, TestMixin):
def setUp(self):
super(TestFinaidDashboardButtons, self).setUp()
self.dashboard_url = reverse('dashboard')
self.login_url = reverse('account_login')
self.user = self.create_user()
self.login()
# financial aid applications are open
self.period = FinancialAidApplicationPeriod.objects.create(
start=today - one_day,
end=today + one_day
)
Conference.objects.get_or_create(id=settings.CONFERENCE_ID)
def assert_buttons(self, button_list):
"""
Assert that the buttons named in the list are displayed, and
any not named are not.
Button names are the corresponding URL name, e.g. 'finaid_edit'
or 'finaid_withdraw'.
"""
rsp = self.client.get(self.dashboard_url)
self.assertEqual(200, rsp.status_code)
all_buttons = [
'finaid_apply', 'finaid_withdraw',
'finaid_decline', 'finaid_accept', 'finaid_request_more',
'finaid_edit', 'finaid_status', 'finaid_review',
'finaid_download_csv', 'finaid_provide_info'
]
buttons_in_page = set()
for name in all_buttons:
text_repr, real_count, msg_prefix = self._assert_contains(
rsp, reverse(name), 200, '', False)
if real_count:
buttons_in_page.add(name)
self.assertEqual(set(button_list), buttons_in_page,
msg="Expected %r on page, but found %r." % (button_list, buttons_in_page))
def test_applications_not_open_no_application(self):
self.period.delete()
self.assert_buttons([])
def test_applications_not_open_with_application(self):
self.period.delete()
application = create_application(user=self.user, save=True)
self.assert_buttons(['finaid_edit', 'finaid_status', 'finaid_withdraw'])
def test_not_applied(self):
self.assert_buttons(['finaid_apply'])
def test_just_submitted(self):
application = create_application(user=self.user, save=True)
self.assert_buttons(['finaid_edit', 'finaid_status', 'finaid_withdraw'])
def test_offered(self):
application = create_application(user=self.user, save=True)
application.set_status(STATUS_OFFERED, save=True)
self.assert_buttons(['finaid_accept', 'finaid_decline', 'finaid_request_more',
'finaid_status'])
def test_accepted(self):
application = create_application(user=self.user, save=True)
application.set_status(STATUS_ACCEPTED, save=True)
self.assert_buttons(['finaid_status'])
def test_declined(self):
application = create_application(user=self.user, save=True)
application.set_status(STATUS_DECLINED, save=True)
self.assert_buttons(['finaid_status'])
def test_withdrawn(self):
application = create_application(user=self.user, save=True)
application.set_status(STATUS_WITHDRAWN, save=True)
self.assert_buttons(['finaid_apply'])
def test_info_needed(self):
application = create_application(user=self.user, save=True)
application.set_status(STATUS_INFO_NEEDED, save=True)
self.assert_buttons(['finaid_status', 'finaid_provide_info', 'finaid_withdraw'])
class FinaidViewTestMixin(object):
post_kwargs = {}
def setUp(self):
super(FinaidViewTestMixin, self).setUp()
self.dashboard_url = reverse('dashboard')
self.login_url = reverse('account_login')
self.user = self.create_user()
self.login()
# financial aid applications are open
self.period = FinancialAidApplicationPeriod.objects.create(
start=today - one_day,
end=today + one_day
)
Conference.objects.get_or_create(id=settings.CONFERENCE_ID)
self.url = reverse(self.url_name)
application = create_application(user=self.user, save=True)
application.set_status(self.initial_status, save=True)
def test_get_page(self):
rsp = self.client.get(self.url)
self.assertEqual(200, rsp.status_code)
application = FinancialAidApplication.objects.get(user=self.user)
self.assertEqual(self.initial_status, application.status)
def test_submit(self):
rsp = self.client.post(self.url, self.post_kwargs)
self.assertRedirects(rsp, reverse('dashboard'))
application = FinancialAidApplication.objects.get(user=self.user)
self.assertEqual(self.final_status, application.status)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, self.subject)
class TestFinaidAccept(FinaidViewTestMixin, TestMixin, TestCase):
url_name = 'finaid_accept'
initial_status = STATUS_OFFERED
final_status = STATUS_ACCEPTED
subject = 'Joe Smith has accepted their financial aid offer'
class TestFinaidDecline(FinaidViewTestMixin, TestMixin, TestCase):
url_name = 'finaid_decline'
initial_status = STATUS_OFFERED
final_status = STATUS_DECLINED
subject = 'Joe Smith has declined their financial aid offer'
class TestFinaidWithdraw(FinaidViewTestMixin, TestMixin, TestCase):
url_name = 'finaid_withdraw'
initial_status = STATUS_SUBMITTED
final_status = STATUS_WITHDRAWN
subject = 'Joe Smith has withdrawn their financial aid application'
class TestFinaidWithdrawWhenInfoNeeded(FinaidViewTestMixin, TestMixin, TestCase):
url_name = 'finaid_withdraw'
initial_status = STATUS_INFO_NEEDED
final_status = STATUS_WITHDRAWN
subject = 'Joe Smith has withdrawn their financial aid application'
class TestProvideInfoNeeded(FinaidViewTestMixin, TestMixin, TestCase):
url_name = 'finaid_provide_info'
initial_status = STATUS_INFO_NEEDED
final_status = STATUS_SUBMITTED
post_kwargs = {'message': 'Here you go'}
subject = 'Message from Joe Smith providing requested information.'
class TestRequestMore(FinaidViewTestMixin, TestMixin, TestCase):
url_name = 'finaid_request_more'
initial_status = STATUS_OFFERED
final_status = STATUS_NEED_MORE
post_kwargs = {'message': 'I am greed'}
subject = 'Message from Joe Smith requesting more assistance.'
| bsd-3-clause |
MattOates/pgcli | pgcli/packages/pgspecial/namedqueries.py | 15 | 2062 | # -*- coding: utf-8 -*-
class NamedQueries(object):
section_name = 'named queries'
usage = u'''Named Queries are a way to save frequently used queries
with a short name. Think of them as favorites.
Examples:
# Save a new named query.
> \\ns simple select * from abc where a is not Null;
# List all named queries.
> \\n
╒════════╤═══════════════════════════════════════╕
│ Name │ Query │
╞════════╪═══════════════════════════════════════╡
│ simple │ SELECT * FROM abc where a is not NULL │
╘════════╧═══════════════════════════════════════╛
# Run a named query.
> \\n simple
╒════════╤════════╕
│ a │ b │
╞════════╪════════╡
│ 日本語 │ 日本語 │
╘════════╧════════╛
# Delete a named query.
> \\nd simple
simple: Deleted
'''
def __init__(self, config):
self.config = config
def list(self):
return self.config.get(self.section_name, [])
def get(self, name):
return self.config.get(self.section_name, {}).get(name, None)
def save(self, name, query):
if self.section_name not in self.config:
self.config[self.section_name] = {}
self.config[self.section_name][name] = query
self.config.write()
def delete(self, name):
try:
del self.config[self.section_name][name]
except KeyError:
return '%s: Not Found.' % name
self.config.write()
return '%s: Deleted' % name
from ...config import load_config
namedqueries = NamedQueries(load_config('~/.pgclirc'))
| bsd-3-clause |
Jamlum/pytomo | pytomo/web/wsgiserver/__init__.py | 77 | 85282 | """A high-speed, production ready, thread pooled, generic HTTP server.
Simplest example on how to use this module directly
(without using CherryPy's application machinery)::
from cherrypy import wsgiserver
def my_crazy_app(environ, start_response):
status = '200 OK'
response_headers = [('Content-type','text/plain')]
start_response(status, response_headers)
return ['Hello world!']
server = wsgiserver.CherryPyWSGIServer(
('0.0.0.0', 8070), my_crazy_app,
server_name='www.cherrypy.example')
server.start()
The CherryPy WSGI server can serve as many WSGI applications
as you want in one instance by using a WSGIPathInfoDispatcher::
d = WSGIPathInfoDispatcher({'/': my_crazy_app, '/blog': my_blog_app})
server = wsgiserver.CherryPyWSGIServer(('0.0.0.0', 80), d)
Want SSL support? Just set server.ssl_adapter to an SSLAdapter instance.
This won't call the CherryPy engine (application side) at all, only the
HTTP server, which is independent from the rest of CherryPy. Don't
let the name "CherryPyWSGIServer" throw you; the name merely reflects
its origin, not its coupling.
For those of you wanting to understand internals of this module, here's the
basic call flow. The server's listening thread runs a very tight loop,
sticking incoming connections onto a Queue::
server = CherryPyWSGIServer(...)
server.start()
while True:
tick()
# This blocks until a request comes in:
child = socket.accept()
conn = HTTPConnection(child, ...)
server.requests.put(conn)
Worker threads are kept in a pool and poll the Queue, popping off and then
handling each connection in turn. Each connection can consist of an arbitrary
number of requests and their responses, so we run a nested loop::
while True:
conn = server.requests.get()
conn.communicate()
-> while True:
req = HTTPRequest(...)
req.parse_request()
-> # Read the Request-Line, e.g. "GET /page HTTP/1.1"
req.rfile.readline()
read_headers(req.rfile, req.inheaders)
req.respond()
-> response = app(...)
try:
for chunk in response:
if chunk:
req.write(chunk)
finally:
if hasattr(response, "close"):
response.close()
if req.close_connection:
return
"""
CRLF = '\r\n'
import os
import Queue
import re
quoted_slash = re.compile("(?i)%2F")
import rfc822
import socket
import sys
if 'win' in sys.platform and not hasattr(socket, 'IPPROTO_IPV6'):
socket.IPPROTO_IPV6 = 41
try:
import cStringIO as StringIO
except ImportError:
import StringIO
DEFAULT_BUFFER_SIZE = -1
_fileobject_uses_str_type = isinstance(socket._fileobject(None)._rbuf, basestring)
import threading
import time
import traceback
def format_exc(limit=None):
"""Like print_exc() but return a string. Backport for Python 2.3."""
try:
etype, value, tb = sys.exc_info()
return ''.join(traceback.format_exception(etype, value, tb, limit))
finally:
etype = value = tb = None
from urllib import unquote
from urlparse import urlparse
import warnings
import errno
def plat_specific_errors(*errnames):
"""Return error numbers for all errors in errnames on this platform.
The 'errno' module contains different global constants depending on
the specific platform (OS). This function will return the list of
numeric values for a given list of potential names.
"""
errno_names = dir(errno)
nums = [getattr(errno, k) for k in errnames if k in errno_names]
# de-dupe the list
return dict.fromkeys(nums).keys()
socket_error_eintr = plat_specific_errors("EINTR", "WSAEINTR")
socket_errors_to_ignore = plat_specific_errors(
"EPIPE",
"EBADF", "WSAEBADF",
"ENOTSOCK", "WSAENOTSOCK",
"ETIMEDOUT", "WSAETIMEDOUT",
"ECONNREFUSED", "WSAECONNREFUSED",
"ECONNRESET", "WSAECONNRESET",
"ECONNABORTED", "WSAECONNABORTED",
"ENETRESET", "WSAENETRESET",
"EHOSTDOWN", "EHOSTUNREACH",
)
socket_errors_to_ignore.append("timed out")
socket_errors_to_ignore.append("The read operation timed out")
socket_errors_nonblocking = plat_specific_errors(
'EAGAIN', 'EWOULDBLOCK', 'WSAEWOULDBLOCK')
comma_separated_headers = ['Accept', 'Accept-Charset', 'Accept-Encoding',
'Accept-Language', 'Accept-Ranges', 'Allow', 'Cache-Control',
'Connection', 'Content-Encoding', 'Content-Language', 'Expect',
'If-Match', 'If-None-Match', 'Pragma', 'Proxy-Authenticate', 'TE',
'Trailer', 'Transfer-Encoding', 'Upgrade', 'Vary', 'Via', 'Warning',
'WWW-Authenticate']
import logging
if not hasattr(logging, 'statistics'): logging.statistics = {}
def read_headers(rfile, hdict=None):
"""Read headers from the given stream into the given header dict.
If hdict is None, a new header dict is created. Returns the populated
header dict.
Headers which are repeated are folded together using a comma if their
specification so dictates.
This function raises ValueError when the read bytes violate the HTTP spec.
You should probably return "400 Bad Request" if this happens.
"""
if hdict is None:
hdict = {}
while True:
line = rfile.readline()
if not line:
# No more data--illegal end of headers
raise ValueError("Illegal end of headers.")
if line == CRLF:
# Normal end of headers
break
if not line.endswith(CRLF):
raise ValueError("HTTP requires CRLF terminators")
if line[0] in ' \t':
# It's a continuation line.
v = line.strip()
else:
try:
k, v = line.split(":", 1)
except ValueError:
raise ValueError("Illegal header line.")
# TODO: what about TE and WWW-Authenticate?
k = k.strip().title()
v = v.strip()
hname = k
if k in comma_separated_headers:
existing = hdict.get(hname)
if existing:
v = ", ".join((existing, v))
hdict[hname] = v
return hdict
class MaxSizeExceeded(Exception):
pass
class SizeCheckWrapper(object):
"""Wraps a file-like object, raising MaxSizeExceeded if too large."""
def __init__(self, rfile, maxlen):
self.rfile = rfile
self.maxlen = maxlen
self.bytes_read = 0
def _check_length(self):
if self.maxlen and self.bytes_read > self.maxlen:
raise MaxSizeExceeded()
def read(self, size=None):
data = self.rfile.read(size)
self.bytes_read += len(data)
self._check_length()
return data
def readline(self, size=None):
if size is not None:
data = self.rfile.readline(size)
self.bytes_read += len(data)
self._check_length()
return data
# User didn't specify a size ...
# We read the line in chunks to make sure it's not a 100MB line !
res = []
while True:
data = self.rfile.readline(256)
self.bytes_read += len(data)
self._check_length()
res.append(data)
# See http://www.cherrypy.org/ticket/421
if len(data) < 256 or data[-1:] == "\n":
return ''.join(res)
def readlines(self, sizehint=0):
# Shamelessly stolen from StringIO
total = 0
lines = []
line = self.readline()
while line:
lines.append(line)
total += len(line)
if 0 < sizehint <= total:
break
line = self.readline()
return lines
def close(self):
self.rfile.close()
def __iter__(self):
return self
def next(self):
data = self.rfile.next()
self.bytes_read += len(data)
self._check_length()
return data
class KnownLengthRFile(object):
"""Wraps a file-like object, returning an empty string when exhausted."""
def __init__(self, rfile, content_length):
self.rfile = rfile
self.remaining = content_length
def read(self, size=None):
if self.remaining == 0:
return ''
if size is None:
size = self.remaining
else:
size = min(size, self.remaining)
data = self.rfile.read(size)
self.remaining -= len(data)
return data
def readline(self, size=None):
if self.remaining == 0:
return ''
if size is None:
size = self.remaining
else:
size = min(size, self.remaining)
data = self.rfile.readline(size)
self.remaining -= len(data)
return data
def readlines(self, sizehint=0):
# Shamelessly stolen from StringIO
total = 0
lines = []
line = self.readline(sizehint)
while line:
lines.append(line)
total += len(line)
if 0 < sizehint <= total:
break
line = self.readline(sizehint)
return lines
def close(self):
self.rfile.close()
def __iter__(self):
return self
def __next__(self):
data = next(self.rfile)
self.remaining -= len(data)
return data
class ChunkedRFile(object):
"""Wraps a file-like object, returning an empty string when exhausted.
This class is intended to provide a conforming wsgi.input value for
request entities that have been encoded with the 'chunked' transfer
encoding.
"""
def __init__(self, rfile, maxlen, bufsize=8192):
self.rfile = rfile
self.maxlen = maxlen
self.bytes_read = 0
self.buffer = ''
self.bufsize = bufsize
self.closed = False
def _fetch(self):
if self.closed:
return
line = self.rfile.readline()
self.bytes_read += len(line)
if self.maxlen and self.bytes_read > self.maxlen:
raise MaxSizeExceeded("Request Entity Too Large", self.maxlen)
line = line.strip().split(";", 1)
try:
chunk_size = line.pop(0)
chunk_size = int(chunk_size, 16)
except ValueError:
raise ValueError("Bad chunked transfer size: " + repr(chunk_size))
if chunk_size <= 0:
self.closed = True
return
## if line: chunk_extension = line[0]
if self.maxlen and self.bytes_read + chunk_size > self.maxlen:
raise IOError("Request Entity Too Large")
chunk = self.rfile.read(chunk_size)
self.bytes_read += len(chunk)
self.buffer += chunk
crlf = self.rfile.read(2)
if crlf != CRLF:
raise ValueError(
"Bad chunked transfer coding (expected '\\r\\n', "
"got " + repr(crlf) + ")")
def read(self, size=None):
data = ''
while True:
if size and len(data) >= size:
return data
if not self.buffer:
self._fetch()
if not self.buffer:
# EOF
return data
if size:
remaining = size - len(data)
data += self.buffer[:remaining]
self.buffer = self.buffer[remaining:]
else:
data += self.buffer
def readline(self, size=None):
data = ''
while True:
if size and len(data) >= size:
return data
if not self.buffer:
self._fetch()
if not self.buffer:
# EOF
return data
newline_pos = self.buffer.find('\n')
if size:
if newline_pos == -1:
remaining = size - len(data)
data += self.buffer[:remaining]
self.buffer = self.buffer[remaining:]
else:
remaining = min(size - len(data), newline_pos)
data += self.buffer[:remaining]
self.buffer = self.buffer[remaining:]
else:
if newline_pos == -1:
data += self.buffer
else:
data += self.buffer[:newline_pos]
self.buffer = self.buffer[newline_pos:]
def readlines(self, sizehint=0):
# Shamelessly stolen from StringIO
total = 0
lines = []
line = self.readline(sizehint)
while line:
lines.append(line)
total += len(line)
if 0 < sizehint <= total:
break
line = self.readline(sizehint)
return lines
def read_trailer_lines(self):
if not self.closed:
raise ValueError(
"Cannot read trailers until the request body has been read.")
while True:
line = self.rfile.readline()
if not line:
# No more data--illegal end of headers
raise ValueError("Illegal end of headers.")
self.bytes_read += len(line)
if self.maxlen and self.bytes_read > self.maxlen:
raise IOError("Request Entity Too Large")
if line == CRLF:
# Normal end of headers
break
if not line.endswith(CRLF):
raise ValueError("HTTP requires CRLF terminators")
yield line
def close(self):
self.rfile.close()
def __iter__(self):
# Shamelessly stolen from StringIO
total = 0
line = self.readline(sizehint)
while line:
yield line
total += len(line)
if 0 < sizehint <= total:
break
line = self.readline(sizehint)
class HTTPRequest(object):
"""An HTTP Request (and response).
A single HTTP connection may consist of multiple request/response pairs.
"""
server = None
"""The HTTPServer object which is receiving this request."""
conn = None
"""The HTTPConnection object on which this request connected."""
inheaders = {}
"""A dict of request headers."""
outheaders = []
"""A list of header tuples to write in the response."""
ready = False
"""When True, the request has been parsed and is ready to begin generating
the response. When False, signals the calling Connection that the response
should not be generated and the connection should close."""
close_connection = False
"""Signals the calling Connection that the request should close. This does
not imply an error! The client and/or server may each request that the
connection be closed."""
chunked_write = False
"""If True, output will be encoded with the "chunked" transfer-coding.
This value is set automatically inside send_headers."""
def __init__(self, server, conn):
self.server= server
self.conn = conn
self.ready = False
self.started_request = False
self.scheme = "http"
if self.server.ssl_adapter is not None:
self.scheme = "https"
# Use the lowest-common protocol in case read_request_line errors.
self.response_protocol = 'HTTP/1.0'
self.inheaders = {}
self.status = ""
self.outheaders = []
self.sent_headers = False
self.close_connection = self.__class__.close_connection
self.chunked_read = False
self.chunked_write = self.__class__.chunked_write
def parse_request(self):
"""Parse the next HTTP request start-line and message-headers."""
self.rfile = SizeCheckWrapper(self.conn.rfile,
self.server.max_request_header_size)
try:
self.read_request_line()
except MaxSizeExceeded:
self.simple_response("414 Request-URI Too Long",
"The Request-URI sent with the request exceeds the maximum "
"allowed bytes.")
return
try:
success = self.read_request_headers()
except MaxSizeExceeded:
self.simple_response("413 Request Entity Too Large",
"The headers sent with the request exceed the maximum "
"allowed bytes.")
return
else:
if not success:
return
self.ready = True
def read_request_line(self):
# HTTP/1.1 connections are persistent by default. If a client
# requests a page, then idles (leaves the connection open),
# then rfile.readline() will raise socket.error("timed out").
# Note that it does this based on the value given to settimeout(),
# and doesn't need the client to request or acknowledge the close
# (although your TCP stack might suffer for it: cf Apache's history
# with FIN_WAIT_2).
request_line = self.rfile.readline()
# Set started_request to True so communicate() knows to send 408
# from here on out.
self.started_request = True
if not request_line:
# Force self.ready = False so the connection will close.
self.ready = False
return
if request_line == CRLF:
# RFC 2616 sec 4.1: "...if the server is reading the protocol
# stream at the beginning of a message and receives a CRLF
# first, it should ignore the CRLF."
# But only ignore one leading line! else we enable a DoS.
request_line = self.rfile.readline()
if not request_line:
self.ready = False
return
if not request_line.endswith(CRLF):
self.simple_response("400 Bad Request", "HTTP requires CRLF terminators")
return
try:
method, uri, req_protocol = request_line.strip().split(" ", 2)
rp = int(req_protocol[5]), int(req_protocol[7])
except (ValueError, IndexError):
self.simple_response("400 Bad Request", "Malformed Request-Line")
return
self.uri = uri
self.method = method
# uri may be an abs_path (including "http://host.domain.tld");
scheme, authority, path = self.parse_request_uri(uri)
if '#' in path:
self.simple_response("400 Bad Request",
"Illegal #fragment in Request-URI.")
return
if scheme:
self.scheme = scheme
qs = ''
if '?' in path:
path, qs = path.split('?', 1)
# Unquote the path+params (e.g. "/this%20path" -> "/this path").
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec5.html#sec5.1.2
#
# But note that "...a URI must be separated into its components
# before the escaped characters within those components can be
# safely decoded." http://www.ietf.org/rfc/rfc2396.txt, sec 2.4.2
# Therefore, "/this%2Fpath" becomes "/this%2Fpath", not "/this/path".
try:
atoms = [unquote(x) for x in quoted_slash.split(path)]
except ValueError, ex:
self.simple_response("400 Bad Request", ex.args[0])
return
path = "%2F".join(atoms)
self.path = path
# Note that, like wsgiref and most other HTTP servers,
# we "% HEX HEX"-unquote the path but not the query string.
self.qs = qs
# Compare request and server HTTP protocol versions, in case our
# server does not support the requested protocol. Limit our output
# to min(req, server). We want the following output:
# request server actual written supported response
# protocol protocol response protocol feature set
# a 1.0 1.0 1.0 1.0
# b 1.0 1.1 1.1 1.0
# c 1.1 1.0 1.0 1.0
# d 1.1 1.1 1.1 1.1
# Notice that, in (b), the response will be "HTTP/1.1" even though
# the client only understands 1.0. RFC 2616 10.5.6 says we should
# only return 505 if the _major_ version is different.
sp = int(self.server.protocol[5]), int(self.server.protocol[7])
if sp[0] != rp[0]:
self.simple_response("505 HTTP Version Not Supported")
return
self.request_protocol = req_protocol
self.response_protocol = "HTTP/%s.%s" % min(rp, sp)
def read_request_headers(self):
"""Read self.rfile into self.inheaders. Return success."""
# then all the http headers
try:
read_headers(self.rfile, self.inheaders)
except ValueError, ex:
self.simple_response("400 Bad Request", ex.args[0])
return False
mrbs = self.server.max_request_body_size
if mrbs and int(self.inheaders.get("Content-Length", 0)) > mrbs:
self.simple_response("413 Request Entity Too Large",
"The entity sent with the request exceeds the maximum "
"allowed bytes.")
return False
# Persistent connection support
if self.response_protocol == "HTTP/1.1":
# Both server and client are HTTP/1.1
if self.inheaders.get("Connection", "") == "close":
self.close_connection = True
else:
# Either the server or client (or both) are HTTP/1.0
if self.inheaders.get("Connection", "") != "Keep-Alive":
self.close_connection = True
# Transfer-Encoding support
te = None
if self.response_protocol == "HTTP/1.1":
te = self.inheaders.get("Transfer-Encoding")
if te:
te = [x.strip().lower() for x in te.split(",") if x.strip()]
self.chunked_read = False
if te:
for enc in te:
if enc == "chunked":
self.chunked_read = True
else:
# Note that, even if we see "chunked", we must reject
# if there is an extension we don't recognize.
self.simple_response("501 Unimplemented")
self.close_connection = True
return False
# From PEP 333:
# "Servers and gateways that implement HTTP 1.1 must provide
# transparent support for HTTP 1.1's "expect/continue" mechanism.
# This may be done in any of several ways:
# 1. Respond to requests containing an Expect: 100-continue request
# with an immediate "100 Continue" response, and proceed normally.
# 2. Proceed with the request normally, but provide the application
# with a wsgi.input stream that will send the "100 Continue"
# response if/when the application first attempts to read from
# the input stream. The read request must then remain blocked
# until the client responds.
# 3. Wait until the client decides that the server does not support
# expect/continue, and sends the request body on its own.
# (This is suboptimal, and is not recommended.)
#
# We used to do 3, but are now doing 1. Maybe we'll do 2 someday,
# but it seems like it would be a big slowdown for such a rare case.
if self.inheaders.get("Expect", "") == "100-continue":
# Don't use simple_response here, because it emits headers
# we don't want. See http://www.cherrypy.org/ticket/951
msg = self.server.protocol + " 100 Continue\r\n\r\n"
try:
self.conn.wfile.sendall(msg)
except socket.error, x:
if x.args[0] not in socket_errors_to_ignore:
raise
return True
def parse_request_uri(self, uri):
"""Parse a Request-URI into (scheme, authority, path).
Note that Request-URI's must be one of::
Request-URI = "*" | absoluteURI | abs_path | authority
Therefore, a Request-URI which starts with a double forward-slash
cannot be a "net_path"::
net_path = "//" authority [ abs_path ]
Instead, it must be interpreted as an "abs_path" with an empty first
path segment::
abs_path = "/" path_segments
path_segments = segment *( "/" segment )
segment = *pchar *( ";" param )
param = *pchar
"""
if uri == "*":
return None, None, uri
i = uri.find('://')
if i > 0 and '?' not in uri[:i]:
# An absoluteURI.
# If there's a scheme (and it must be http or https), then:
# http_URL = "http:" "//" host [ ":" port ] [ abs_path [ "?" query ]]
scheme, remainder = uri[:i].lower(), uri[i + 3:]
authority, path = remainder.split("/", 1)
return scheme, authority, path
if uri.startswith('/'):
# An abs_path.
return None, None, uri
else:
# An authority.
return None, uri, None
def respond(self):
"""Call the gateway and write its iterable output."""
mrbs = self.server.max_request_body_size
if self.chunked_read:
self.rfile = ChunkedRFile(self.conn.rfile, mrbs)
else:
cl = int(self.inheaders.get("Content-Length", 0))
if mrbs and mrbs < cl:
if not self.sent_headers:
self.simple_response("413 Request Entity Too Large",
"The entity sent with the request exceeds the maximum "
"allowed bytes.")
return
self.rfile = KnownLengthRFile(self.conn.rfile, cl)
self.server.gateway(self).respond()
if (self.ready and not self.sent_headers):
self.sent_headers = True
self.send_headers()
if self.chunked_write:
self.conn.wfile.sendall("0\r\n\r\n")
def simple_response(self, status, msg=""):
"""Write a simple response back to the client."""
status = str(status)
buf = [self.server.protocol + " " +
status + CRLF,
"Content-Length: %s\r\n" % len(msg),
"Content-Type: text/plain\r\n"]
if status[:3] in ("413", "414"):
# Request Entity Too Large / Request-URI Too Long
self.close_connection = True
if self.response_protocol == 'HTTP/1.1':
# This will not be true for 414, since read_request_line
# usually raises 414 before reading the whole line, and we
# therefore cannot know the proper response_protocol.
buf.append("Connection: close\r\n")
else:
# HTTP/1.0 had no 413/414 status nor Connection header.
# Emit 400 instead and trust the message body is enough.
status = "400 Bad Request"
buf.append(CRLF)
if msg:
if isinstance(msg, unicode):
msg = msg.encode("ISO-8859-1")
buf.append(msg)
try:
self.conn.wfile.sendall("".join(buf))
except socket.error, x:
if x.args[0] not in socket_errors_to_ignore:
raise
def write(self, chunk):
"""Write unbuffered data to the client."""
if self.chunked_write and chunk:
buf = [hex(len(chunk))[2:], CRLF, chunk, CRLF]
self.conn.wfile.sendall("".join(buf))
else:
self.conn.wfile.sendall(chunk)
def send_headers(self):
"""Assert, process, and send the HTTP response message-headers.
You must set self.status, and self.outheaders before calling this.
"""
hkeys = [key.lower() for key, value in self.outheaders]
status = int(self.status[:3])
if status == 413:
# Request Entity Too Large. Close conn to avoid garbage.
self.close_connection = True
elif "content-length" not in hkeys:
# "All 1xx (informational), 204 (no content),
# and 304 (not modified) responses MUST NOT
# include a message-body." So no point chunking.
if status < 200 or status in (204, 205, 304):
pass
else:
if (self.response_protocol == 'HTTP/1.1'
and self.method != 'HEAD'):
# Use the chunked transfer-coding
self.chunked_write = True
self.outheaders.append(("Transfer-Encoding", "chunked"))
else:
# Closing the conn is the only way to determine len.
self.close_connection = True
if "connection" not in hkeys:
if self.response_protocol == 'HTTP/1.1':
# Both server and client are HTTP/1.1 or better
if self.close_connection:
self.outheaders.append(("Connection", "close"))
else:
# Server and/or client are HTTP/1.0
if not self.close_connection:
self.outheaders.append(("Connection", "Keep-Alive"))
if (not self.close_connection) and (not self.chunked_read):
# Read any remaining request body data on the socket.
# "If an origin server receives a request that does not include an
# Expect request-header field with the "100-continue" expectation,
# the request includes a request body, and the server responds
# with a final status code before reading the entire request body
# from the transport connection, then the server SHOULD NOT close
# the transport connection until it has read the entire request,
# or until the client closes the connection. Otherwise, the client
# might not reliably receive the response message. However, this
# requirement is not be construed as preventing a server from
# defending itself against denial-of-service attacks, or from
# badly broken client implementations."
remaining = getattr(self.rfile, 'remaining', 0)
if remaining > 0:
self.rfile.read(remaining)
if "date" not in hkeys:
self.outheaders.append(("Date", rfc822.formatdate()))
if "server" not in hkeys:
self.outheaders.append(("Server", self.server.server_name))
buf = [self.server.protocol + " " + self.status + CRLF]
for k, v in self.outheaders:
buf.append(k + ": " + v + CRLF)
buf.append(CRLF)
self.conn.wfile.sendall("".join(buf))
class NoSSLError(Exception):
"""Exception raised when a client speaks HTTP to an HTTPS socket."""
pass
class FatalSSLAlert(Exception):
"""Exception raised when the SSL implementation signals a fatal alert."""
pass
class CP_fileobject(socket._fileobject):
"""Faux file object attached to a socket object."""
def __init__(self, *args, **kwargs):
self.bytes_read = 0
self.bytes_written = 0
socket._fileobject.__init__(self, *args, **kwargs)
def sendall(self, data):
"""Sendall for non-blocking sockets."""
while data:
try:
bytes_sent = self.send(data)
data = data[bytes_sent:]
except socket.error, e:
if e.args[0] not in socket_errors_nonblocking:
raise
def send(self, data):
bytes_sent = self._sock.send(data)
self.bytes_written += bytes_sent
return bytes_sent
def flush(self):
if self._wbuf:
buffer = "".join(self._wbuf)
self._wbuf = []
self.sendall(buffer)
def recv(self, size):
while True:
try:
data = self._sock.recv(size)
self.bytes_read += len(data)
return data
except socket.error, e:
if (e.args[0] not in socket_errors_nonblocking
and e.args[0] not in socket_error_eintr):
raise
if not _fileobject_uses_str_type:
def read(self, size=-1):
# Use max, disallow tiny reads in a loop as they are very inefficient.
# We never leave read() with any leftover data from a new recv() call
# in our internal buffer.
rbufsize = max(self._rbufsize, self.default_bufsize)
# Our use of StringIO rather than lists of string objects returned by
# recv() minimizes memory usage and fragmentation that occurs when
# rbufsize is large compared to the typical return value of recv().
buf = self._rbuf
buf.seek(0, 2) # seek end
if size < 0:
# Read until EOF
self._rbuf = StringIO.StringIO() # reset _rbuf. we consume it via buf.
while True:
data = self.recv(rbufsize)
if not data:
break
buf.write(data)
return buf.getvalue()
else:
# Read until size bytes or EOF seen, whichever comes first
buf_len = buf.tell()
if buf_len >= size:
# Already have size bytes in our buffer? Extract and return.
buf.seek(0)
rv = buf.read(size)
self._rbuf = StringIO.StringIO()
self._rbuf.write(buf.read())
return rv
self._rbuf = StringIO.StringIO() # reset _rbuf. we consume it via buf.
while True:
left = size - buf_len
# recv() will malloc the amount of memory given as its
# parameter even though it often returns much less data
# than that. The returned data string is short lived
# as we copy it into a StringIO and free it. This avoids
# fragmentation issues on many platforms.
data = self.recv(left)
if not data:
break
n = len(data)
if n == size and not buf_len:
# Shortcut. Avoid buffer data copies when:
# - We have no data in our buffer.
# AND
# - Our call to recv returned exactly the
# number of bytes we were asked to read.
return data
if n == left:
buf.write(data)
del data # explicit free
break
assert n <= left, "recv(%d) returned %d bytes" % (left, n)
buf.write(data)
buf_len += n
del data # explicit free
#assert buf_len == buf.tell()
return buf.getvalue()
def readline(self, size=-1):
buf = self._rbuf
buf.seek(0, 2) # seek end
if buf.tell() > 0:
# check if we already have it in our buffer
buf.seek(0)
bline = buf.readline(size)
if bline.endswith('\n') or len(bline) == size:
self._rbuf = StringIO.StringIO()
self._rbuf.write(buf.read())
return bline
del bline
if size < 0:
# Read until \n or EOF, whichever comes first
if self._rbufsize <= 1:
# Speed up unbuffered case
buf.seek(0)
buffers = [buf.read()]
self._rbuf = StringIO.StringIO() # reset _rbuf. we consume it via buf.
data = None
recv = self.recv
while data != "\n":
data = recv(1)
if not data:
break
buffers.append(data)
return "".join(buffers)
buf.seek(0, 2) # seek end
self._rbuf = StringIO.StringIO() # reset _rbuf. we consume it via buf.
while True:
data = self.recv(self._rbufsize)
if not data:
break
nl = data.find('\n')
if nl >= 0:
nl += 1
buf.write(data[:nl])
self._rbuf.write(data[nl:])
del data
break
buf.write(data)
return buf.getvalue()
else:
# Read until size bytes or \n or EOF seen, whichever comes first
buf.seek(0, 2) # seek end
buf_len = buf.tell()
if buf_len >= size:
buf.seek(0)
rv = buf.read(size)
self._rbuf = StringIO.StringIO()
self._rbuf.write(buf.read())
return rv
self._rbuf = StringIO.StringIO() # reset _rbuf. we consume it via buf.
while True:
data = self.recv(self._rbufsize)
if not data:
break
left = size - buf_len
# did we just receive a newline?
nl = data.find('\n', 0, left)
if nl >= 0:
nl += 1
# save the excess data to _rbuf
self._rbuf.write(data[nl:])
if buf_len:
buf.write(data[:nl])
break
else:
# Shortcut. Avoid data copy through buf when returning
# a substring of our first recv().
return data[:nl]
n = len(data)
if n == size and not buf_len:
# Shortcut. Avoid data copy through buf when
# returning exactly all of our first recv().
return data
if n >= left:
buf.write(data[:left])
self._rbuf.write(data[left:])
break
buf.write(data)
buf_len += n
#assert buf_len == buf.tell()
return buf.getvalue()
else:
def read(self, size=-1):
if size < 0:
# Read until EOF
buffers = [self._rbuf]
self._rbuf = ""
if self._rbufsize <= 1:
recv_size = self.default_bufsize
else:
recv_size = self._rbufsize
while True:
data = self.recv(recv_size)
if not data:
break
buffers.append(data)
return "".join(buffers)
else:
# Read until size bytes or EOF seen, whichever comes first
data = self._rbuf
buf_len = len(data)
if buf_len >= size:
self._rbuf = data[size:]
return data[:size]
buffers = []
if data:
buffers.append(data)
self._rbuf = ""
while True:
left = size - buf_len
recv_size = max(self._rbufsize, left)
data = self.recv(recv_size)
if not data:
break
buffers.append(data)
n = len(data)
if n >= left:
self._rbuf = data[left:]
buffers[-1] = data[:left]
break
buf_len += n
return "".join(buffers)
def readline(self, size=-1):
data = self._rbuf
if size < 0:
# Read until \n or EOF, whichever comes first
if self._rbufsize <= 1:
# Speed up unbuffered case
assert data == ""
buffers = []
while data != "\n":
data = self.recv(1)
if not data:
break
buffers.append(data)
return "".join(buffers)
nl = data.find('\n')
if nl >= 0:
nl += 1
self._rbuf = data[nl:]
return data[:nl]
buffers = []
if data:
buffers.append(data)
self._rbuf = ""
while True:
data = self.recv(self._rbufsize)
if not data:
break
buffers.append(data)
nl = data.find('\n')
if nl >= 0:
nl += 1
self._rbuf = data[nl:]
buffers[-1] = data[:nl]
break
return "".join(buffers)
else:
# Read until size bytes or \n or EOF seen, whichever comes first
nl = data.find('\n', 0, size)
if nl >= 0:
nl += 1
self._rbuf = data[nl:]
return data[:nl]
buf_len = len(data)
if buf_len >= size:
self._rbuf = data[size:]
return data[:size]
buffers = []
if data:
buffers.append(data)
self._rbuf = ""
while True:
data = self.recv(self._rbufsize)
if not data:
break
buffers.append(data)
left = size - buf_len
nl = data.find('\n', 0, left)
if nl >= 0:
nl += 1
self._rbuf = data[nl:]
buffers[-1] = data[:nl]
break
n = len(data)
if n >= left:
self._rbuf = data[left:]
buffers[-1] = data[:left]
break
buf_len += n
return "".join(buffers)
class HTTPConnection(object):
"""An HTTP connection (active socket).
server: the Server object which received this connection.
socket: the raw socket object (usually TCP) for this connection.
makefile: a fileobject class for reading from the socket.
"""
remote_addr = None
remote_port = None
ssl_env = None
rbufsize = DEFAULT_BUFFER_SIZE
wbufsize = DEFAULT_BUFFER_SIZE
RequestHandlerClass = HTTPRequest
def __init__(self, server, sock, makefile=CP_fileobject):
self.server = server
self.socket = sock
self.rfile = makefile(sock, "rb", self.rbufsize)
self.wfile = makefile(sock, "wb", self.wbufsize)
self.requests_seen = 0
def communicate(self):
"""Read each request and respond appropriately."""
request_seen = False
try:
while True:
# (re)set req to None so that if something goes wrong in
# the RequestHandlerClass constructor, the error doesn't
# get written to the previous request.
req = None
req = self.RequestHandlerClass(self.server, self)
# This order of operations should guarantee correct pipelining.
req.parse_request()
if self.server.stats['Enabled']:
self.requests_seen += 1
if not req.ready:
# Something went wrong in the parsing (and the server has
# probably already made a simple_response). Return and
# let the conn close.
return
request_seen = True
req.respond()
if req.close_connection:
return
except socket.error, e:
errnum = e.args[0]
# sadly SSL sockets return a different (longer) time out string
if errnum == 'timed out' or errnum == 'The read operation timed out':
# Don't error if we're between requests; only error
# if 1) no request has been started at all, or 2) we're
# in the middle of a request.
# See http://www.cherrypy.org/ticket/853
if (not request_seen) or (req and req.started_request):
# Don't bother writing the 408 if the response
# has already started being written.
if req and not req.sent_headers:
try:
req.simple_response("408 Request Timeout")
except FatalSSLAlert:
# Close the connection.
return
elif errnum not in socket_errors_to_ignore:
if req and not req.sent_headers:
try:
req.simple_response("500 Internal Server Error",
format_exc())
except FatalSSLAlert:
# Close the connection.
return
return
except (KeyboardInterrupt, SystemExit):
raise
except FatalSSLAlert:
# Close the connection.
return
except NoSSLError:
if req and not req.sent_headers:
# Unwrap our wfile
self.wfile = CP_fileobject(self.socket._sock, "wb", self.wbufsize)
req.simple_response("400 Bad Request",
"The client sent a plain HTTP request, but "
"this server only speaks HTTPS on this port.")
self.linger = True
except Exception:
if req and not req.sent_headers:
try:
req.simple_response("500 Internal Server Error", format_exc())
except FatalSSLAlert:
# Close the connection.
return
linger = False
def close(self):
"""Close the socket underlying this connection."""
self.rfile.close()
if not self.linger:
# Python's socket module does NOT call close on the kernel socket
# when you call socket.close(). We do so manually here because we
# want this server to send a FIN TCP segment immediately. Note this
# must be called *before* calling socket.close(), because the latter
# drops its reference to the kernel socket.
if hasattr(self.socket, '_sock'):
self.socket._sock.close()
self.socket.close()
else:
# On the other hand, sometimes we want to hang around for a bit
# to make sure the client has a chance to read our entire
# response. Skipping the close() calls here delays the FIN
# packet until the socket object is garbage-collected later.
# Someday, perhaps, we'll do the full lingering_close that
# Apache does, but not today.
pass
_SHUTDOWNREQUEST = None
class WorkerThread(threading.Thread):
"""Thread which continuously polls a Queue for Connection objects.
Due to the timing issues of polling a Queue, a WorkerThread does not
check its own 'ready' flag after it has started. To stop the thread,
it is necessary to stick a _SHUTDOWNREQUEST object onto the Queue
(one for each running WorkerThread).
"""
conn = None
"""The current connection pulled off the Queue, or None."""
server = None
"""The HTTP Server which spawned this thread, and which owns the
Queue and is placing active connections into it."""
ready = False
"""A simple flag for the calling server to know when this thread
has begun polling the Queue."""
def __init__(self, server):
self.ready = False
self.server = server
self.requests_seen = 0
self.bytes_read = 0
self.bytes_written = 0
self.start_time = None
self.work_time = 0
self.stats = {
'Requests': lambda s: self.requests_seen + ((self.start_time is None) and 0 or self.conn.requests_seen),
'Bytes Read': lambda s: self.bytes_read + ((self.start_time is None) and 0 or self.conn.rfile.bytes_read),
'Bytes Written': lambda s: self.bytes_written + ((self.start_time is None) and 0 or self.conn.wfile.bytes_written),
'Work Time': lambda s: self.work_time + ((self.start_time is None) and 0 or time.time() - self.start_time),
'Read Throughput': lambda s: s['Bytes Read'](s) / (s['Work Time'](s) or 1e-6),
'Write Throughput': lambda s: s['Bytes Written'](s) / (s['Work Time'](s) or 1e-6),
}
threading.Thread.__init__(self)
def run(self):
self.server.stats['Worker Threads'][self.getName()] = self.stats
try:
self.ready = True
while True:
conn = self.server.requests.get()
if conn is _SHUTDOWNREQUEST:
return
self.conn = conn
if self.server.stats['Enabled']:
self.start_time = time.time()
try:
conn.communicate()
finally:
conn.close()
if self.server.stats['Enabled']:
self.requests_seen += self.conn.requests_seen
self.bytes_read += self.conn.rfile.bytes_read
self.bytes_written += self.conn.wfile.bytes_written
self.work_time += time.time() - self.start_time
self.start_time = None
self.conn = None
except (KeyboardInterrupt, SystemExit), exc:
self.server.interrupt = exc
class ThreadPool(object):
"""A Request Queue for the CherryPyWSGIServer which pools threads.
ThreadPool objects must provide min, get(), put(obj), start()
and stop(timeout) attributes.
"""
def __init__(self, server, min=10, max=-1):
self.server = server
self.min = min
self.max = max
self._threads = []
self._queue = Queue.Queue()
self.get = self._queue.get
def start(self):
"""Start the pool of threads."""
for i in range(self.min):
self._threads.append(WorkerThread(self.server))
for worker in self._threads:
worker.setName("CP Server " + worker.getName())
worker.start()
for worker in self._threads:
while not worker.ready:
time.sleep(.1)
def _get_idle(self):
"""Number of worker threads which are idle. Read-only."""
return len([t for t in self._threads if t.conn is None])
idle = property(_get_idle, doc=_get_idle.__doc__)
def put(self, obj):
self._queue.put(obj)
if obj is _SHUTDOWNREQUEST:
return
def grow(self, amount):
"""Spawn new worker threads (not above self.max)."""
for i in range(amount):
if self.max > 0 and len(self._threads) >= self.max:
break
worker = WorkerThread(self.server)
worker.setName("CP Server " + worker.getName())
self._threads.append(worker)
worker.start()
def shrink(self, amount):
"""Kill off worker threads (not below self.min)."""
# Grow/shrink the pool if necessary.
# Remove any dead threads from our list
for t in self._threads:
if not t.isAlive():
self._threads.remove(t)
amount -= 1
if amount > 0:
for i in range(min(amount, len(self._threads) - self.min)):
# Put a number of shutdown requests on the queue equal
# to 'amount'. Once each of those is processed by a worker,
# that worker will terminate and be culled from our list
# in self.put.
self._queue.put(_SHUTDOWNREQUEST)
def stop(self, timeout=5):
# Must shut down threads here so the code that calls
# this method can know when all threads are stopped.
for worker in self._threads:
self._queue.put(_SHUTDOWNREQUEST)
# Don't join currentThread (when stop is called inside a request).
current = threading.currentThread()
if timeout and timeout >= 0:
endtime = time.time() + timeout
while self._threads:
worker = self._threads.pop()
if worker is not current and worker.isAlive():
try:
if timeout is None or timeout < 0:
worker.join()
else:
remaining_time = endtime - time.time()
if remaining_time > 0:
worker.join(remaining_time)
if worker.isAlive():
# We exhausted the timeout.
# Forcibly shut down the socket.
c = worker.conn
if c and not c.rfile.closed:
try:
c.socket.shutdown(socket.SHUT_RD)
except TypeError:
# pyOpenSSL sockets don't take an arg
c.socket.shutdown()
worker.join()
except (AssertionError,
# Ignore repeated Ctrl-C.
# See http://www.cherrypy.org/ticket/691.
KeyboardInterrupt), exc1:
pass
def _get_qsize(self):
return self._queue.qsize()
qsize = property(_get_qsize)
try:
import fcntl
except ImportError:
try:
from ctypes import windll, WinError
except ImportError:
def prevent_socket_inheritance(sock):
"""Dummy function, since neither fcntl nor ctypes are available."""
pass
else:
def prevent_socket_inheritance(sock):
"""Mark the given socket fd as non-inheritable (Windows)."""
if not windll.kernel32.SetHandleInformation(sock.fileno(), 1, 0):
raise WinError()
else:
def prevent_socket_inheritance(sock):
"""Mark the given socket fd as non-inheritable (POSIX)."""
fd = sock.fileno()
old_flags = fcntl.fcntl(fd, fcntl.F_GETFD)
fcntl.fcntl(fd, fcntl.F_SETFD, old_flags | fcntl.FD_CLOEXEC)
class SSLAdapter(object):
"""Base class for SSL driver library adapters.
Required methods:
* ``wrap(sock) -> (wrapped socket, ssl environ dict)``
* ``makefile(sock, mode='r', bufsize=DEFAULT_BUFFER_SIZE) -> socket file object``
"""
def __init__(self, certificate, private_key, certificate_chain=None):
self.certificate = certificate
self.private_key = private_key
self.certificate_chain = certificate_chain
def wrap(self, sock):
raise NotImplemented
def makefile(self, sock, mode='r', bufsize=DEFAULT_BUFFER_SIZE):
raise NotImplemented
class HTTPServer(object):
"""An HTTP server."""
_bind_addr = "127.0.0.1"
_interrupt = None
gateway = None
"""A Gateway instance."""
minthreads = None
"""The minimum number of worker threads to create (default 10)."""
maxthreads = None
"""The maximum number of worker threads to create (default -1 = no limit)."""
server_name = None
"""The name of the server; defaults to socket.gethostname()."""
protocol = "HTTP/1.1"
"""The version string to write in the Status-Line of all HTTP responses.
For example, "HTTP/1.1" is the default. This also limits the supported
features used in the response."""
request_queue_size = 5
"""The 'backlog' arg to socket.listen(); max queued connections (default 5)."""
shutdown_timeout = 5
"""The total time, in seconds, to wait for worker threads to cleanly exit."""
timeout = 10
"""The timeout in seconds for accepted connections (default 10)."""
version = "CherryPy/3.2.0"
"""A version string for the HTTPServer."""
software = None
"""The value to set for the SERVER_SOFTWARE entry in the WSGI environ.
If None, this defaults to ``'%s Server' % self.version``."""
ready = False
"""An internal flag which marks whether the socket is accepting connections."""
max_request_header_size = 0
"""The maximum size, in bytes, for request headers, or 0 for no limit."""
max_request_body_size = 0
"""The maximum size, in bytes, for request bodies, or 0 for no limit."""
nodelay = True
"""If True (the default since 3.1), sets the TCP_NODELAY socket option."""
ConnectionClass = HTTPConnection
"""The class to use for handling HTTP connections."""
ssl_adapter = None
"""An instance of SSLAdapter (or a subclass).
You must have the corresponding SSL driver library installed."""
def __init__(self, bind_addr, gateway, minthreads=10, maxthreads=-1,
server_name=None):
self.bind_addr = bind_addr
self.gateway = gateway
self.requests = ThreadPool(self, min=minthreads or 1, max=maxthreads)
if not server_name:
server_name = socket.gethostname()
self.server_name = server_name
self.clear_stats()
def clear_stats(self):
self._start_time = None
self._run_time = 0
self.stats = {
'Enabled': False,
'Bind Address': lambda s: repr(self.bind_addr),
'Run time': lambda s: (not s['Enabled']) and 0 or self.runtime(),
'Accepts': 0,
'Accepts/sec': lambda s: s['Accepts'] / self.runtime(),
'Queue': lambda s: getattr(self.requests, "qsize", None),
'Threads': lambda s: len(getattr(self.requests, "_threads", [])),
'Threads Idle': lambda s: getattr(self.requests, "idle", None),
'Socket Errors': 0,
'Requests': lambda s: (not s['Enabled']) and 0 or sum([w['Requests'](w) for w
in s['Worker Threads'].values()], 0),
'Bytes Read': lambda s: (not s['Enabled']) and 0 or sum([w['Bytes Read'](w) for w
in s['Worker Threads'].values()], 0),
'Bytes Written': lambda s: (not s['Enabled']) and 0 or sum([w['Bytes Written'](w) for w
in s['Worker Threads'].values()], 0),
'Work Time': lambda s: (not s['Enabled']) and 0 or sum([w['Work Time'](w) for w
in s['Worker Threads'].values()], 0),
'Read Throughput': lambda s: (not s['Enabled']) and 0 or sum(
[w['Bytes Read'](w) / (w['Work Time'](w) or 1e-6)
for w in s['Worker Threads'].values()], 0),
'Write Throughput': lambda s: (not s['Enabled']) and 0 or sum(
[w['Bytes Written'](w) / (w['Work Time'](w) or 1e-6)
for w in s['Worker Threads'].values()], 0),
'Worker Threads': {},
}
logging.statistics["CherryPy HTTPServer %d" % id(self)] = self.stats
def runtime(self):
if self._start_time is None:
return self._run_time
else:
return self._run_time + (time.time() - self._start_time)
def __str__(self):
return "%s.%s(%r)" % (self.__module__, self.__class__.__name__,
self.bind_addr)
def _get_bind_addr(self):
return self._bind_addr
def _set_bind_addr(self, value):
if isinstance(value, tuple) and value[0] in ('', None):
# Despite the socket module docs, using '' does not
# allow AI_PASSIVE to work. Passing None instead
# returns '0.0.0.0' like we want. In other words:
# host AI_PASSIVE result
# '' Y 192.168.x.y
# '' N 192.168.x.y
# None Y 0.0.0.0
# None N 127.0.0.1
# But since you can get the same effect with an explicit
# '0.0.0.0', we deny both the empty string and None as values.
raise ValueError("Host values of '' or None are not allowed. "
"Use '0.0.0.0' (IPv4) or '::' (IPv6) instead "
"to listen on all active interfaces.")
self._bind_addr = value
bind_addr = property(_get_bind_addr, _set_bind_addr,
doc="""The interface on which to listen for connections.
For TCP sockets, a (host, port) tuple. Host values may be any IPv4
or IPv6 address, or any valid hostname. The string 'localhost' is a
synonym for '127.0.0.1' (or '::1', if your hosts file prefers IPv6).
The string '0.0.0.0' is a special IPv4 entry meaning "any active
interface" (INADDR_ANY), and '::' is the similar IN6ADDR_ANY for
IPv6. The empty string or None are not allowed.
For UNIX sockets, supply the filename as a string.""")
def start(self):
"""Run the server forever."""
# We don't have to trap KeyboardInterrupt or SystemExit here,
# because cherrpy.server already does so, calling self.stop() for us.
# If you're using this server with another framework, you should
# trap those exceptions in whatever code block calls start().
self._interrupt = None
if self.software is None:
self.software = "%s Server" % self.version
# SSL backward compatibility
if (self.ssl_adapter is None and
getattr(self, 'ssl_certificate', None) and
getattr(self, 'ssl_private_key', None)):
warnings.warn(
"SSL attributes are deprecated in CherryPy 3.2, and will "
"be removed in CherryPy 3.3. Use an ssl_adapter attribute "
"instead.",
DeprecationWarning
)
try:
from cherrypy.wsgiserver.ssl_pyopenssl import pyOpenSSLAdapter
except ImportError:
pass
else:
self.ssl_adapter = pyOpenSSLAdapter(
self.ssl_certificate, self.ssl_private_key,
getattr(self, 'ssl_certificate_chain', None))
# Select the appropriate socket
if isinstance(self.bind_addr, basestring):
# AF_UNIX socket
# So we can reuse the socket...
try: os.unlink(self.bind_addr)
except: pass
# So everyone can access the socket...
try: os.chmod(self.bind_addr, 0777)
except: pass
info = [(socket.AF_UNIX, socket.SOCK_STREAM, 0, "", self.bind_addr)]
else:
# AF_INET or AF_INET6 socket
# Get the correct address family for our host (allows IPv6 addresses)
host, port = self.bind_addr
try:
info = socket.getaddrinfo(host, port, socket.AF_UNSPEC,
socket.SOCK_STREAM, 0, socket.AI_PASSIVE)
except socket.gaierror:
if ':' in self.bind_addr[0]:
info = [(socket.AF_INET6, socket.SOCK_STREAM,
0, "", self.bind_addr + (0, 0))]
else:
info = [(socket.AF_INET, socket.SOCK_STREAM,
0, "", self.bind_addr)]
self.socket = None
msg = "No socket could be created"
for res in info:
af, socktype, proto, canonname, sa = res
try:
self.bind(af, socktype, proto)
except socket.error:
if self.socket:
self.socket.close()
self.socket = None
continue
break
if not self.socket:
raise socket.error(msg)
# Timeout so KeyboardInterrupt can be caught on Win32
self.socket.settimeout(1)
self.socket.listen(self.request_queue_size)
# Create worker threads
self.requests.start()
self.ready = True
self._start_time = time.time()
while self.ready:
self.tick()
if self.interrupt:
while self.interrupt is True:
# Wait for self.stop() to complete. See _set_interrupt.
time.sleep(0.1)
if self.interrupt:
raise self.interrupt
def bind(self, family, type, proto=0):
"""Create (or recreate) the actual socket object."""
self.socket = socket.socket(family, type, proto)
prevent_socket_inheritance(self.socket)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if self.nodelay and not isinstance(self.bind_addr, str):
self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
if self.ssl_adapter is not None:
self.socket = self.ssl_adapter.bind(self.socket)
# If listening on the IPV6 any address ('::' = IN6ADDR_ANY),
# activate dual-stack. See http://www.cherrypy.org/ticket/871.
if (hasattr(socket, 'AF_INET6') and family == socket.AF_INET6
and self.bind_addr[0] in ('::', '::0', '::0.0.0.0')):
try:
self.socket.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 0)
except (AttributeError, socket.error):
# Apparently, the socket option is not available in
# this machine's TCP stack
pass
self.socket.bind(self.bind_addr)
def tick(self):
"""Accept a new connection and put it on the Queue."""
try:
s, addr = self.socket.accept()
if self.stats['Enabled']:
self.stats['Accepts'] += 1
if not self.ready:
return
prevent_socket_inheritance(s)
if hasattr(s, 'settimeout'):
s.settimeout(self.timeout)
makefile = CP_fileobject
ssl_env = {}
# if ssl cert and key are set, we try to be a secure HTTP server
if self.ssl_adapter is not None:
try:
s, ssl_env = self.ssl_adapter.wrap(s)
except NoSSLError:
msg = ("The client sent a plain HTTP request, but "
"this server only speaks HTTPS on this port.")
buf = ["%s 400 Bad Request\r\n" % self.protocol,
"Content-Length: %s\r\n" % len(msg),
"Content-Type: text/plain\r\n\r\n",
msg]
wfile = CP_fileobject(s, "wb", DEFAULT_BUFFER_SIZE)
try:
wfile.sendall("".join(buf))
except socket.error, x:
if x.args[0] not in socket_errors_to_ignore:
raise
return
if not s:
return
makefile = self.ssl_adapter.makefile
# Re-apply our timeout since we may have a new socket object
if hasattr(s, 'settimeout'):
s.settimeout(self.timeout)
conn = self.ConnectionClass(self, s, makefile)
if not isinstance(self.bind_addr, basestring):
# optional values
# Until we do DNS lookups, omit REMOTE_HOST
if addr is None: # sometimes this can happen
# figure out if AF_INET or AF_INET6.
if len(s.getsockname()) == 2:
# AF_INET
addr = ('0.0.0.0', 0)
else:
# AF_INET6
addr = ('::', 0)
conn.remote_addr = addr[0]
conn.remote_port = addr[1]
conn.ssl_env = ssl_env
self.requests.put(conn)
except socket.timeout:
# The only reason for the timeout in start() is so we can
# notice keyboard interrupts on Win32, which don't interrupt
# accept() by default
return
except socket.error, x:
if self.stats['Enabled']:
self.stats['Socket Errors'] += 1
if x.args[0] in socket_error_eintr:
# I *think* this is right. EINTR should occur when a signal
# is received during the accept() call; all docs say retry
# the call, and I *think* I'm reading it right that Python
# will then go ahead and poll for and handle the signal
# elsewhere. See http://www.cherrypy.org/ticket/707.
return
if x.args[0] in socket_errors_nonblocking:
# Just try again. See http://www.cherrypy.org/ticket/479.
return
if x.args[0] in socket_errors_to_ignore:
# Our socket was closed.
# See http://www.cherrypy.org/ticket/686.
return
raise
def _get_interrupt(self):
return self._interrupt
def _set_interrupt(self, interrupt):
self._interrupt = True
self.stop()
self._interrupt = interrupt
interrupt = property(_get_interrupt, _set_interrupt,
doc="Set this to an Exception instance to "
"interrupt the server.")
def stop(self):
"""Gracefully shutdown a server that is serving forever."""
self.ready = False
if self._start_time is not None:
self._run_time += (time.time() - self._start_time)
self._start_time = None
sock = getattr(self, "socket", None)
if sock:
if not isinstance(self.bind_addr, basestring):
# Touch our own socket to make accept() return immediately.
try:
host, port = sock.getsockname()[:2]
except socket.error, x:
if x.args[0] not in socket_errors_to_ignore:
# Changed to use error code and not message
# See http://www.cherrypy.org/ticket/860.
raise
else:
# Note that we're explicitly NOT using AI_PASSIVE,
# here, because we want an actual IP to touch.
# localhost won't work if we've bound to a public IP,
# but it will if we bound to '0.0.0.0' (INADDR_ANY).
for res in socket.getaddrinfo(host, port, socket.AF_UNSPEC,
socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
s = None
try:
s = socket.socket(af, socktype, proto)
# See http://groups.google.com/group/cherrypy-users/
# browse_frm/thread/bbfe5eb39c904fe0
s.settimeout(1.0)
s.connect((host, port))
s.close()
except socket.error:
if s:
s.close()
if hasattr(sock, "close"):
sock.close()
self.socket = None
self.requests.stop(self.shutdown_timeout)
class Gateway(object):
def __init__(self, req):
self.req = req
def respond(self):
raise NotImplemented
# These may either be wsgiserver.SSLAdapter subclasses or the string names
# of such classes (in which case they will be lazily loaded).
ssl_adapters = {
'builtin': 'cherrypy.wsgiserver.ssl_builtin.BuiltinSSLAdapter',
'pyopenssl': 'cherrypy.wsgiserver.ssl_pyopenssl.pyOpenSSLAdapter',
}
def get_ssl_adapter_class(name='pyopenssl'):
adapter = ssl_adapters[name.lower()]
if isinstance(adapter, basestring):
last_dot = adapter.rfind(".")
attr_name = adapter[last_dot + 1:]
mod_path = adapter[:last_dot]
try:
mod = sys.modules[mod_path]
if mod is None:
raise KeyError()
except KeyError:
# The last [''] is important.
mod = __import__(mod_path, globals(), locals(), [''])
# Let an AttributeError propagate outward.
try:
adapter = getattr(mod, attr_name)
except AttributeError:
raise AttributeError("'%s' object has no attribute '%s'"
% (mod_path, attr_name))
return adapter
# -------------------------------- WSGI Stuff -------------------------------- #
class CherryPyWSGIServer(HTTPServer):
wsgi_version = (1, 0)
def __init__(self, bind_addr, wsgi_app, numthreads=10, server_name=None,
max=-1, request_queue_size=5, timeout=10, shutdown_timeout=5):
self.requests = ThreadPool(self, min=numthreads or 1, max=max)
self.wsgi_app = wsgi_app
self.gateway = wsgi_gateways[self.wsgi_version]
self.bind_addr = bind_addr
if not server_name:
server_name = socket.gethostname()
self.server_name = server_name
self.request_queue_size = request_queue_size
self.timeout = timeout
self.shutdown_timeout = shutdown_timeout
self.clear_stats()
def _get_numthreads(self):
return self.requests.min
def _set_numthreads(self, value):
self.requests.min = value
numthreads = property(_get_numthreads, _set_numthreads)
class WSGIGateway(Gateway):
def __init__(self, req):
self.req = req
self.started_response = False
self.env = self.get_environ()
self.remaining_bytes_out = None
def get_environ(self):
"""Return a new environ dict targeting the given wsgi.version"""
raise NotImplemented
def respond(self):
response = self.req.server.wsgi_app(self.env, self.start_response)
try:
for chunk in response:
# "The start_response callable must not actually transmit
# the response headers. Instead, it must store them for the
# server or gateway to transmit only after the first
# iteration of the application return value that yields
# a NON-EMPTY string, or upon the application's first
# invocation of the write() callable." (PEP 333)
if chunk:
if isinstance(chunk, unicode):
chunk = chunk.encode('ISO-8859-1')
self.write(chunk)
finally:
if hasattr(response, "close"):
response.close()
def start_response(self, status, headers, exc_info = None):
"""WSGI callable to begin the HTTP response."""
# "The application may call start_response more than once,
# if and only if the exc_info argument is provided."
if self.started_response and not exc_info:
raise AssertionError("WSGI start_response called a second "
"time with no exc_info.")
self.started_response = True
# "if exc_info is provided, and the HTTP headers have already been
# sent, start_response must raise an error, and should raise the
# exc_info tuple."
if self.req.sent_headers:
try:
raise exc_info[0], exc_info[1], exc_info[2]
finally:
exc_info = None
self.req.status = status
for k, v in headers:
if not isinstance(k, str):
raise TypeError("WSGI response header key %r is not a byte string." % k)
if not isinstance(v, str):
raise TypeError("WSGI response header value %r is not a byte string." % v)
if k.lower() == 'content-length':
self.remaining_bytes_out = int(v)
self.req.outheaders.extend(headers)
return self.write
def write(self, chunk):
"""WSGI callable to write unbuffered data to the client.
This method is also used internally by start_response (to write
data from the iterable returned by the WSGI application).
"""
if not self.started_response:
raise AssertionError("WSGI write called before start_response.")
chunklen = len(chunk)
rbo = self.remaining_bytes_out
if rbo is not None and chunklen > rbo:
if not self.req.sent_headers:
# Whew. We can send a 500 to the client.
self.req.simple_response("500 Internal Server Error",
"The requested resource returned more bytes than the "
"declared Content-Length.")
else:
# Dang. We have probably already sent data. Truncate the chunk
# to fit (so the client doesn't hang) and raise an error later.
chunk = chunk[:rbo]
if not self.req.sent_headers:
self.req.sent_headers = True
self.req.send_headers()
self.req.write(chunk)
if rbo is not None:
rbo -= chunklen
if rbo < 0:
raise ValueError(
"Response body exceeds the declared Content-Length.")
class WSGIGateway_10(WSGIGateway):
def get_environ(self):
"""Return a new environ dict targeting the given wsgi.version"""
req = self.req
env = {
# set a non-standard environ entry so the WSGI app can know what
# the *real* server protocol is (and what features to support).
# See http://www.faqs.org/rfcs/rfc2145.html.
'ACTUAL_SERVER_PROTOCOL': req.server.protocol,
'PATH_INFO': req.path,
'QUERY_STRING': req.qs,
'REMOTE_ADDR': req.conn.remote_addr or '',
'REMOTE_PORT': str(req.conn.remote_port or ''),
'REQUEST_METHOD': req.method,
'REQUEST_URI': req.uri,
'SCRIPT_NAME': '',
'SERVER_NAME': req.server.server_name,
# Bah. "SERVER_PROTOCOL" is actually the REQUEST protocol.
'SERVER_PROTOCOL': req.request_protocol,
'SERVER_SOFTWARE': req.server.software,
'wsgi.errors': sys.stderr,
'wsgi.input': req.rfile,
'wsgi.multiprocess': False,
'wsgi.multithread': True,
'wsgi.run_once': False,
'wsgi.url_scheme': req.scheme,
'wsgi.version': (1, 0),
}
if isinstance(req.server.bind_addr, basestring):
# AF_UNIX. This isn't really allowed by WSGI, which doesn't
# address unix domain sockets. But it's better than nothing.
env["SERVER_PORT"] = ""
else:
env["SERVER_PORT"] = str(req.server.bind_addr[1])
# Request headers
for k, v in req.inheaders.iteritems():
env["HTTP_" + k.upper().replace("-", "_")] = v
# CONTENT_TYPE/CONTENT_LENGTH
ct = env.pop("HTTP_CONTENT_TYPE", None)
if ct is not None:
env["CONTENT_TYPE"] = ct
cl = env.pop("HTTP_CONTENT_LENGTH", None)
if cl is not None:
env["CONTENT_LENGTH"] = cl
if req.conn.ssl_env:
env.update(req.conn.ssl_env)
return env
class WSGIGateway_u0(WSGIGateway_10):
def get_environ(self):
"""Return a new environ dict targeting the given wsgi.version"""
req = self.req
env_10 = WSGIGateway_10.get_environ(self)
env = dict([(k.decode('ISO-8859-1'), v) for k, v in env_10.iteritems()])
env[u'wsgi.version'] = ('u', 0)
# Request-URI
env.setdefault(u'wsgi.url_encoding', u'utf-8')
try:
for key in [u"PATH_INFO", u"SCRIPT_NAME", u"QUERY_STRING"]:
env[key] = env_10[str(key)].decode(env[u'wsgi.url_encoding'])
except UnicodeDecodeError:
# Fall back to latin 1 so apps can transcode if needed.
env[u'wsgi.url_encoding'] = u'ISO-8859-1'
for key in [u"PATH_INFO", u"SCRIPT_NAME", u"QUERY_STRING"]:
env[key] = env_10[str(key)].decode(env[u'wsgi.url_encoding'])
for k, v in sorted(env.items()):
if isinstance(v, str) and k not in ('REQUEST_URI', 'wsgi.input'):
env[k] = v.decode('ISO-8859-1')
return env
wsgi_gateways = {
(1, 0): WSGIGateway_10,
('u', 0): WSGIGateway_u0,
}
class WSGIPathInfoDispatcher(object):
"""A WSGI dispatcher for dispatch based on the PATH_INFO.
apps: a dict or list of (path_prefix, app) pairs.
"""
def __init__(self, apps):
try:
apps = apps.items()
except AttributeError:
pass
# Sort the apps by len(path), descending
apps.sort(cmp=lambda x,y: cmp(len(x[0]), len(y[0])))
apps.reverse()
# The path_prefix strings must start, but not end, with a slash.
# Use "" instead of "/".
self.apps = [(p.rstrip("/"), a) for p, a in apps]
def __call__(self, environ, start_response):
path = environ["PATH_INFO"] or "/"
for p, app in self.apps:
# The apps list should be sorted by length, descending.
if path.startswith(p + "/") or path == p:
environ = environ.copy()
environ["SCRIPT_NAME"] = environ["SCRIPT_NAME"] + p
environ["PATH_INFO"] = path[len(p):]
return app(environ, start_response)
start_response('404 Not Found', [('Content-Type', 'text/plain'),
('Content-Length', '0')])
return ['']
| gpl-2.0 |
bak1an/django | tests/sitemaps_tests/test_utils.py | 72 | 1830 | from unittest import mock
from urllib.parse import urlencode
from django.contrib.sitemaps import (
SitemapNotFound, _get_sitemap_full_url, ping_google,
)
from django.core.exceptions import ImproperlyConfigured
from django.test import modify_settings, override_settings
from .base import SitemapTestsBase
class PingGoogleTests(SitemapTestsBase):
@mock.patch('django.contrib.sitemaps.urlopen')
def test_something(self, urlopen):
ping_google()
params = urlencode({'sitemap': 'http://example.com/sitemap-without-entries/sitemap.xml'})
full_url = 'https://www.google.com/webmasters/tools/ping?%s' % params
urlopen.assert_called_with(full_url)
def test_get_sitemap_full_url_global(self):
self.assertEqual(_get_sitemap_full_url(None), 'http://example.com/sitemap-without-entries/sitemap.xml')
@override_settings(ROOT_URLCONF='sitemaps_tests.urls.index_only')
def test_get_sitemap_full_url_index(self):
self.assertEqual(_get_sitemap_full_url(None), 'http://example.com/simple/index.xml')
@override_settings(ROOT_URLCONF='sitemaps_tests.urls.empty')
def test_get_sitemap_full_url_not_detected(self):
msg = "You didn't provide a sitemap_url, and the sitemap URL couldn't be auto-detected."
with self.assertRaisesMessage(SitemapNotFound, msg):
_get_sitemap_full_url(None)
def test_get_sitemap_full_url_exact_url(self):
self.assertEqual(_get_sitemap_full_url('/foo.xml'), 'http://example.com/foo.xml')
@modify_settings(INSTALLED_APPS={'remove': 'django.contrib.sites'})
def test_get_sitemap_full_url_no_sites(self):
msg = "ping_google requires django.contrib.sites, which isn't installed."
with self.assertRaisesMessage(ImproperlyConfigured, msg):
_get_sitemap_full_url(None)
| bsd-3-clause |
SummerLW/Perf-Insight-Report | third_party/mapreduce/mapreduce/util.py | 36 | 13063 | #!/usr/bin/env python
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for use with the mapreduce library."""
# pylint: disable=g-bad-name
__all__ = [
"create_datastore_write_config",
"for_name",
"get_queue_name",
"get_short_name",
"handler_for_name",
"is_generator",
"parse_bool",
"total_seconds",
"try_serialize_handler",
"try_deserialize_handler",
"CALLBACK_MR_ID_TASK_HEADER",
"strip_prefix_from_items"
]
import inspect
import os
import pickle
import random
import sys
import time
import types
from google.appengine.ext import ndb
from google.appengine.datastore import datastore_rpc
from mapreduce import parameters
# Taskqueue task header for mr id. Use internal by MR.
_MR_ID_TASK_HEADER = "AE-MR-ID"
_MR_SHARD_ID_TASK_HEADER = "AE-MR-SHARD-ID"
# Callback task MR ID task header
CALLBACK_MR_ID_TASK_HEADER = "Mapreduce-Id"
# Ridiculous future UNIX epoch time, 500 years from now.
_FUTURE_TIME = 2**34
def _get_descending_key(gettime=time.time):
"""Returns a key name lexically ordered by time descending.
This lets us have a key name for use with Datastore entities which returns
rows in time descending order when it is scanned in lexically ascending order,
allowing us to bypass index building for descending indexes.
Args:
gettime: Used for testing.
Returns:
A string with a time descending key.
"""
now_descending = int((_FUTURE_TIME - gettime()) * 100)
request_id_hash = os.environ.get("REQUEST_ID_HASH")
if not request_id_hash:
request_id_hash = str(random.getrandbits(32))
return "%d%s" % (now_descending, request_id_hash)
def _get_task_host():
"""Get the Host header value for all mr tasks.
Task Host header determines which instance this task would be routed to.
Current version id format is: v7.368834058928280579
Current module id is just the module's name. It could be "default"
Default version hostname is app_id.appspot.com
Returns:
A complete host name is of format version.module.app_id.appspot.com
If module is the default module, just version.app_id.appspot.com. The reason
is if an app doesn't have modules enabled and the url is
"version.default.app_id", "version" is ignored and "default" is used as
version. If "default" version doesn't exist, the url is routed to the
default version.
"""
version = os.environ["CURRENT_VERSION_ID"].split(".")[0]
default_host = os.environ["DEFAULT_VERSION_HOSTNAME"]
module = os.environ["CURRENT_MODULE_ID"]
if os.environ["CURRENT_MODULE_ID"] == "default":
return "%s.%s" % (version, default_host)
return "%s.%s.%s" % (version, module, default_host)
def _get_task_headers(map_job_id,
mr_id_header_key=_MR_ID_TASK_HEADER):
"""Get headers for all mr tasks.
Args:
map_job_id: map job id.
mr_id_header_key: the key to set mr id with.
Returns:
A dictionary of all headers.
"""
return {mr_id_header_key: map_job_id,
"Host": _get_task_host()}
def _enum(**enums):
"""Helper to create enum."""
return type("Enum", (), enums)
def get_queue_name(queue_name):
"""Determine which queue MR should run on.
How to choose the queue:
1. If user provided one, use that.
2. If we are starting a mr from taskqueue, inherit that queue.
If it's a special queue, fall back to the default queue.
3. Default queue.
If user is using any MR pipeline interface, pipeline.start takes a
"queue_name" argument. The pipeline will run on that queue and MR will
simply inherit the queue_name.
Args:
queue_name: queue_name from user. Maybe None.
Returns:
The queue name to run on.
"""
if queue_name:
return queue_name
queue_name = os.environ.get("HTTP_X_APPENGINE_QUEUENAME",
parameters.config.QUEUE_NAME)
if len(queue_name) > 1 and queue_name[0:2] == "__":
# We are currently in some special queue. E.g. __cron.
return parameters.config.QUEUE_NAME
else:
return queue_name
def total_seconds(td):
"""convert a timedelta to seconds.
This is patterned after timedelta.total_seconds, which is only
available in python 27.
Args:
td: a timedelta object.
Returns:
total seconds within a timedelta. Rounded up to seconds.
"""
secs = td.seconds + td.days * 24 * 3600
if td.microseconds:
secs += 1
return secs
def for_name(fq_name, recursive=False):
"""Find class/function/method specified by its fully qualified name.
Fully qualified can be specified as:
* <module_name>.<class_name>
* <module_name>.<function_name>
* <module_name>.<class_name>.<method_name> (an unbound method will be
returned in this case).
for_name works by doing __import__ for <module_name>, and looks for
<class_name>/<function_name> in module's __dict__/attrs. If fully qualified
name doesn't contain '.', the current module will be used.
Args:
fq_name: fully qualified name of something to find.
recursive: run recursively or not.
Returns:
class object or None if fq_name is None.
Raises:
ImportError: when specified module could not be loaded or the class
was not found in the module.
"""
# if "." not in fq_name:
# raise ImportError("'%s' is not a full-qualified name" % fq_name)
if fq_name is None:
return
fq_name = str(fq_name)
module_name = __name__
short_name = fq_name
if fq_name.rfind(".") >= 0:
(module_name, short_name) = (fq_name[:fq_name.rfind(".")],
fq_name[fq_name.rfind(".") + 1:])
try:
result = __import__(module_name, None, None, [short_name])
return result.__dict__[short_name]
except KeyError:
# If we're recursively inside a for_name() chain, then we want to raise
# this error as a key error so we can report the actual source of the
# problem. If we're *not* recursively being called, that means the
# module was found and the specific item could not be loaded, and thus
# we want to raise an ImportError directly.
if recursive:
raise
else:
raise ImportError("Could not find '%s' on path '%s'" % (
short_name, module_name))
except ImportError:
# module_name is not actually a module. Try for_name for it to figure
# out what's this.
try:
module = for_name(module_name, recursive=True)
if hasattr(module, short_name):
return getattr(module, short_name)
else:
# The module was found, but the function component is missing.
raise KeyError()
except KeyError:
raise ImportError("Could not find '%s' on path '%s'" % (
short_name, module_name))
except ImportError:
# This means recursive import attempts failed, thus we will raise the
# first ImportError we encountered, since it's likely the most accurate.
pass
# Raise the original import error that caused all of this, since it is
# likely the real cause of the overall problem.
raise
def handler_for_name(fq_name):
"""Resolves and instantiates handler by fully qualified name.
First resolves the name using for_name call. Then if it resolves to a class,
instantiates a class, if it resolves to a method - instantiates the class and
binds method to the instance.
Args:
fq_name: fully qualified name of something to find.
Returns:
handler instance which is ready to be called.
"""
resolved_name = for_name(fq_name)
if isinstance(resolved_name, (type, types.ClassType)):
# create new instance if this is type
return resolved_name()
elif isinstance(resolved_name, types.MethodType):
# bind the method
return getattr(resolved_name.im_class(), resolved_name.__name__)
else:
return resolved_name
def try_serialize_handler(handler):
"""Try to serialize map/reduce handler.
Args:
handler: handler function/instance. Handler can be a function or an
instance of a callable class. In the latter case, the handler will
be serialized across slices to allow users to save states.
Returns:
serialized handler string or None.
"""
if (isinstance(handler, types.InstanceType) or # old style class
(isinstance(handler, object) and # new style class
not inspect.isfunction(handler) and
not inspect.ismethod(handler)) and
hasattr(handler, "__call__")):
return pickle.dumps(handler)
return None
def try_deserialize_handler(serialized_handler):
"""Reverse function of try_serialize_handler.
Args:
serialized_handler: serialized handler str or None.
Returns:
handler instance or None.
"""
if serialized_handler:
return pickle.loads(serialized_handler)
def is_generator(obj):
"""Return true if the object is generator or generator function.
Generator function objects provides same attributes as functions.
See isfunction.__doc__ for attributes listing.
Adapted from Python 2.6.
Args:
obj: an object to test.
Returns:
true if the object is generator function.
"""
if isinstance(obj, types.GeneratorType):
return True
CO_GENERATOR = 0x20
return bool(((inspect.isfunction(obj) or inspect.ismethod(obj)) and
obj.func_code.co_flags & CO_GENERATOR))
def get_short_name(fq_name):
"""Returns the last component of the name."""
return fq_name.split(".")[-1:][0]
def parse_bool(obj):
"""Return true if the object represents a truth value, false otherwise.
For bool and numeric objects, uses Python's built-in bool function. For
str objects, checks string against a list of possible truth values.
Args:
obj: object to determine boolean value of; expected
Returns:
Boolean value according to 5.1 of Python docs if object is not a str
object. For str objects, return True if str is in TRUTH_VALUE_SET
and False otherwise.
http://docs.python.org/library/stdtypes.html
"""
if type(obj) is str:
TRUTH_VALUE_SET = ["true", "1", "yes", "t", "on"]
return obj.lower() in TRUTH_VALUE_SET
else:
return bool(obj)
def create_datastore_write_config(mapreduce_spec):
"""Creates datastore config to use in write operations.
Args:
mapreduce_spec: current mapreduce specification as MapreduceSpec.
Returns:
an instance of datastore_rpc.Configuration to use for all write
operations in the mapreduce.
"""
force_writes = parse_bool(mapreduce_spec.params.get("force_writes", "false"))
if force_writes:
return datastore_rpc.Configuration(force_writes=force_writes)
else:
# dev server doesn't support force_writes.
return datastore_rpc.Configuration()
def _set_ndb_cache_policy():
"""Tell NDB to never cache anything in memcache or in-process.
This ensures that entities fetched from Datastore input_readers via NDB
will not bloat up the request memory size and Datastore Puts will avoid
doing calls to memcache. Without this you get soft memory limit exits,
which hurts overall throughput.
"""
ndb_ctx = ndb.get_context()
ndb_ctx.set_cache_policy(lambda key: False)
ndb_ctx.set_memcache_policy(lambda key: False)
def _obj_to_path(obj):
"""Returns the fully qualified path to the object.
Args:
obj: obj must be a new style top level class, or a top level function.
No inner function or static method.
Returns:
Fully qualified path to the object.
Raises:
TypeError: when argument obj has unsupported type.
ValueError: when obj can't be discovered on the top level.
"""
if obj is None:
return obj
if inspect.isclass(obj) or inspect.isfunction(obj):
fetched = getattr(sys.modules[obj.__module__], obj.__name__, None)
if fetched is None:
raise ValueError(
"Object %r must be defined on the top level of a module." % obj)
return "%s.%s" % (obj.__module__, obj.__name__)
raise TypeError("Unexpected type %s." % type(obj))
def strip_prefix_from_items(prefix, items):
"""Strips out the prefix from each of the items if it is present.
Args:
prefix: the string for that you wish to strip from the beginning of each
of the items.
items: a list of strings that may or may not contain the prefix you want
to strip out.
Returns:
items_no_prefix: a copy of the list of items (same order) without the
prefix (if present).
"""
items_no_prefix = []
for item in items:
if item.startswith(prefix):
items_no_prefix.append(item[len(prefix):])
else:
items_no_prefix.append(item)
return items_no_prefix
| bsd-3-clause |
ProjectSWGCore/NGECore2 | scripts/mobiles/talus/giga_flite_rasp.py | 2 | 1594 | import sys
from services.spawn import MobileTemplate
from services.spawn import WeaponTemplate
from resources.datatables import WeaponType
from resources.datatables import Difficulty
from resources.datatables import Options
from java.util import Vector
def addTemplate(core):
mobileTemplate = MobileTemplate()
mobileTemplate.setCreatureName('giga_flite_rasp')
mobileTemplate.setLevel(39)
mobileTemplate.setDifficulty(Difficulty.NORMAL)
mobileTemplate.setMinSpawnDistance(5)
mobileTemplate.setMaxSpawnDistance(10)
mobileTemplate.setDeathblow(False)
mobileTemplate.setScale(1)
mobileTemplate.setMeatType("Avian Meat")
mobileTemplate.setMeatAmount(20)
mobileTemplate.setBoneType("Avian Bone")
mobileTemplate.setHideAmount(6)
mobileTemplate.setSocialGroup("rasp")
mobileTemplate.setAssistRange(6)
mobileTemplate.setStalker(False)
mobileTemplate.setOptionsBitmask(Options.ATTACKABLE)
templates = Vector()
templates.add('object/mobile/shared_flite_rasp.iff')
mobileTemplate.setTemplates(templates)
weaponTemplates = Vector()
weapontemplate = WeaponTemplate('object/weapon/melee/unarmed/shared_unarmed_default.iff', WeaponType.UNARMED, 1.0, 6, 'kinetic')
weaponTemplates.add(weapontemplate)
mobileTemplate.setWeaponTemplateVector(weaponTemplates)
attacks = Vector()
attacks.add('bm_claw_3')
attacks.add('bm_slash_3')
attacks.add('bm_wing_buffet_3')
mobileTemplate.setDefaultAttack('creatureMeleeAttack')
mobileTemplate.setAttacks(attacks)
core.spawnService.addMobileTemplate('giga_flite_rasp', mobileTemplate)
return | lgpl-3.0 |
sinpantuflas/aubio | python/demos/demo_sink_create_woodblock.py | 10 | 1580 | #! /usr/bin/env python
import sys
from math import pi, e
from aubio import sink
from numpy import arange, resize, sin, exp, zeros
if len(sys.argv) < 2:
print 'usage: %s <outputfile> [samplerate]' % sys.argv[0]
sys.exit(1)
samplerate = 44100 # samplerate in Hz
if len( sys.argv ) > 2: samplerate = int(sys.argv[2])
pitch = 2200 # in Hz
blocksize = 256 # in samples
duration = 0.02 # in seconds
twopi = pi * 2.
duration = int ( samplerate * duration ) # convert to samples
attack = int (samplerate * .001 )
decay = .5
period = float(samplerate) / pitch
# create a sine lookup table
tablelen = 1000
sinetable = arange(tablelen + 1, dtype = 'float32')
sinetable = 0.7 * sin(twopi * sinetable/tablelen)
sinetone = zeros((duration,), dtype = 'float32')
# compute sinetone at floating point period
for i in range(duration):
x = int((i % period) / float(period) * tablelen)
idx = int(x)
frac = x - idx
a = sinetable[idx]
b = sinetable[idx + 1]
sinetone[i] = a + frac * (b -a)
# apply some envelope
float_ramp = arange(duration, dtype = 'float32')
sinetone *= exp( - e * float_ramp / duration / decay)
sinetone[:attack] *= exp( e * ( float_ramp[:attack] / attack - 1 ) )
if 1:
import matplotlib.pyplot as plt
plt.plot(sinetone)
plt.show()
my_sink = sink(sys.argv[1], samplerate)
total_frames = 0
while total_frames + blocksize < duration:
my_sink(sinetone[total_frames:total_frames+blocksize], blocksize)
total_frames += blocksize
my_sink(sinetone[total_frames:duration], duration - total_frames)
| gpl-3.0 |
tvtsoft/odoo8 | openerp/tools/appdirs.py | 79 | 19979 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2005-2010 ActiveState Software Inc.
# Copyright (c) 2013 Eddy Petrișor
"""Utilities for determining application-specific dirs.
See <http://github.com/ActiveState/appdirs> for details and usage.
"""
# Dev Notes:
# - MSDN on where to store app data files:
# http://support.microsoft.com/default.aspx?scid=kb;en-us;310294#XSLTH3194121123120121120120
# - Mac OS X: http://developer.apple.com/documentation/MacOSX/Conceptual/BPFileSystem/index.html
# - XDG spec for Un*x: http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html
__version_info__ = (1, 3, 0)
__version__ = '.'.join(map(str, __version_info__))
import sys
import os
PY3 = sys.version_info[0] == 3
if PY3:
unicode = str
def user_data_dir(appname=None, appauthor=None, version=None, roaming=False):
r"""Return full path to the user-specific data dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only required and used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"roaming" (boolean, default False) can be set True to use the Windows
roaming appdata directory. That means that for users on a Windows
network setup for roaming profiles, this user data will be
sync'd on login. See
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
for a discussion of issues.
Typical user data directories are:
Mac OS X: ~/Library/Application Support/<AppName>
Unix: ~/.local/share/<AppName> # or in $XDG_DATA_HOME, if defined
Win XP (not roaming): C:\Documents and Settings\<username>\Application Data\<AppAuthor>\<AppName>
Win XP (roaming): C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>
Win 7 (not roaming): C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>
Win 7 (roaming): C:\Users\<username>\AppData\Roaming\<AppAuthor>\<AppName>
For Unix, we follow the XDG spec and support $XDG_DATA_HOME.
That means, by deafult "~/.local/share/<AppName>".
"""
if sys.platform == "win32":
if appauthor is None:
appauthor = appname
const = roaming and "CSIDL_APPDATA" or "CSIDL_LOCAL_APPDATA"
path = os.path.normpath(_get_win_folder(const))
if appname:
path = os.path.join(path, appauthor, appname)
elif sys.platform == 'darwin':
path = os.path.expanduser('~/Library/Application Support/')
if appname:
path = os.path.join(path, appname)
else:
path = os.getenv('XDG_DATA_HOME', os.path.expanduser("~/.local/share"))
if appname:
path = os.path.join(path, appname)
if appname and version:
path = os.path.join(path, version)
return path
def site_data_dir(appname=None, appauthor=None, version=None, multipath=False):
"""Return full path to the user-shared data dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only required and used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"multipath" is an optional parameter only applicable to *nix
which indicates that the entire list of data dirs should be
returned. By default, the first item from XDG_DATA_DIRS is
returned, or '/usr/local/share/<AppName>',
if XDG_DATA_DIRS is not set
Typical user data directories are:
Mac OS X: /Library/Application Support/<AppName>
Unix: /usr/local/share/<AppName> or /usr/share/<AppName>
Win XP: C:\Documents and Settings\All Users\Application Data\<AppAuthor>\<AppName>
Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
Win 7: C:\ProgramData\<AppAuthor>\<AppName> # Hidden, but writeable on Win 7.
For Unix, this is using the $XDG_DATA_DIRS[0] default.
WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
"""
if sys.platform == "win32":
if appauthor is None:
appauthor = appname
path = os.path.normpath(_get_win_folder("CSIDL_COMMON_APPDATA"))
if appname:
path = os.path.join(path, appauthor, appname)
elif sys.platform == 'darwin':
path = os.path.expanduser('/Library/Application Support')
if appname:
path = os.path.join(path, appname)
else:
# XDG default for $XDG_DATA_DIRS
# only first, if multipath is False
path = os.getenv('XDG_DATA_DIRS',
os.pathsep.join(['/usr/local/share', '/usr/share']))
pathlist = [ os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep) ]
if appname:
if version:
appname = os.path.join(appname, version)
pathlist = [ os.sep.join([x, appname]) for x in pathlist ]
if multipath:
path = os.pathsep.join(pathlist)
else:
path = pathlist[0]
return path
if appname and version:
path = os.path.join(path, version)
return path
def user_config_dir(appname=None, appauthor=None, version=None, roaming=False):
r"""Return full path to the user-specific config dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only required and used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"roaming" (boolean, default False) can be set True to use the Windows
roaming appdata directory. That means that for users on a Windows
network setup for roaming profiles, this user data will be
sync'd on login. See
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
for a discussion of issues.
Typical user data directories are:
Mac OS X: same as user_data_dir
Unix: ~/.config/<AppName> # or in $XDG_CONFIG_HOME, if defined
Win *: same as user_data_dir
For Unix, we follow the XDG spec and support $XDG_DATA_HOME.
That means, by deafult "~/.local/share/<AppName>".
"""
if sys.platform in [ "win32", "darwin" ]:
path = user_data_dir(appname, appauthor, None, roaming)
else:
path = os.getenv('XDG_CONFIG_HOME', os.path.expanduser("~/.config"))
if appname:
path = os.path.join(path, appname)
if appname and version:
path = os.path.join(path, version)
return path
def site_config_dir(appname=None, appauthor=None, version=None, multipath=False):
"""Return full path to the user-shared data dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only required and used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"multipath" is an optional parameter only applicable to *nix
which indicates that the entire list of config dirs should be
returned. By default, the first item from XDG_CONFIG_DIRS is
returned, or '/etc/xdg/<AppName>', if XDG_CONFIG_DIRS is not set
Typical user data directories are:
Mac OS X: same as site_data_dir
Unix: /etc/xdg/<AppName> or $XDG_CONFIG_DIRS[i]/<AppName> for each value in
$XDG_CONFIG_DIRS
Win *: same as site_data_dir
Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
For Unix, this is using the $XDG_CONFIG_DIRS[0] default, if multipath=False
WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
"""
if sys.platform in [ "win32", "darwin" ]:
path = site_data_dir(appname, appauthor)
if appname and version:
path = os.path.join(path, version)
else:
# XDG default for $XDG_CONFIG_DIRS
# only first, if multipath is False
path = os.getenv('XDG_CONFIG_DIRS', '/etc/xdg')
pathlist = [ os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep) ]
if appname:
if version:
appname = os.path.join(appname, version)
pathlist = [ os.sep.join([x, appname]) for x in pathlist ]
if multipath:
path = os.pathsep.join(pathlist)
else:
path = pathlist[0]
return path
def user_cache_dir(appname=None, appauthor=None, version=None, opinion=True):
r"""Return full path to the user-specific cache dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only required and used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"opinion" (boolean) can be False to disable the appending of
"Cache" to the base app data dir for Windows. See
discussion below.
Typical user cache directories are:
Mac OS X: ~/Library/Caches/<AppName>
Unix: ~/.cache/<AppName> (XDG default)
Win XP: C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Cache
Vista: C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Cache
On Windows the only suggestion in the MSDN docs is that local settings go in
the `CSIDL_LOCAL_APPDATA` directory. This is identical to the non-roaming
app data dir (the default returned by `user_data_dir` above). Apps typically
put cache data somewhere *under* the given dir here. Some examples:
...\Mozilla\Firefox\Profiles\<ProfileName>\Cache
...\Acme\SuperApp\Cache\1.0
OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value.
This can be disabled with the `opinion=False` option.
"""
if sys.platform == "win32":
if appauthor is None:
appauthor = appname
path = os.path.normpath(_get_win_folder("CSIDL_LOCAL_APPDATA"))
if appname:
path = os.path.join(path, appauthor, appname)
if opinion:
path = os.path.join(path, "Cache")
elif sys.platform == 'darwin':
path = os.path.expanduser('~/Library/Caches')
if appname:
path = os.path.join(path, appname)
else:
path = os.getenv('XDG_CACHE_HOME', os.path.expanduser('~/.cache'))
if appname:
path = os.path.join(path, appname)
if appname and version:
path = os.path.join(path, version)
return path
def user_log_dir(appname=None, appauthor=None, version=None, opinion=True):
r"""Return full path to the user-specific log dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only required and used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"opinion" (boolean) can be False to disable the appending of
"Logs" to the base app data dir for Windows, and "log" to the
base cache dir for Unix. See discussion below.
Typical user cache directories are:
Mac OS X: ~/Library/Logs/<AppName>
Unix: ~/.cache/<AppName>/log # or under $XDG_CACHE_HOME if defined
Win XP: C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Logs
Vista: C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Logs
On Windows the only suggestion in the MSDN docs is that local settings
go in the `CSIDL_LOCAL_APPDATA` directory. (Note: I'm interested in
examples of what some windows apps use for a logs dir.)
OPINION: This function appends "Logs" to the `CSIDL_LOCAL_APPDATA`
value for Windows and appends "log" to the user cache dir for Unix.
This can be disabled with the `opinion=False` option.
"""
if sys.platform == "darwin":
path = os.path.join(
os.path.expanduser('~/Library/Logs'),
appname)
elif sys.platform == "win32":
path = user_data_dir(appname, appauthor, version); version=False
if opinion:
path = os.path.join(path, "Logs")
else:
path = user_cache_dir(appname, appauthor, version); version=False
if opinion:
path = os.path.join(path, "log")
if appname and version:
path = os.path.join(path, version)
return path
class AppDirs(object):
"""Convenience wrapper for getting application dirs."""
def __init__(self, appname, appauthor=None, version=None,
roaming=False, multipath=False):
self.appname = appname
self.appauthor = appauthor
self.version = version
self.roaming = roaming
self.multipath = multipath
@property
def user_data_dir(self):
return user_data_dir(self.appname, self.appauthor,
version=self.version, roaming=self.roaming)
@property
def site_data_dir(self):
return site_data_dir(self.appname, self.appauthor,
version=self.version, multipath=self.multipath)
@property
def user_config_dir(self):
return user_config_dir(self.appname, self.appauthor,
version=self.version, roaming=self.roaming)
@property
def site_config_dir(self):
return site_data_dir(self.appname, self.appauthor,
version=self.version, multipath=self.multipath)
@property
def user_cache_dir(self):
return user_cache_dir(self.appname, self.appauthor,
version=self.version)
@property
def user_log_dir(self):
return user_log_dir(self.appname, self.appauthor,
version=self.version)
#---- internal support stuff
def _get_win_folder_from_registry(csidl_name):
"""This is a fallback technique at best. I'm not sure if using the
registry for this guarantees us the correct answer for all CSIDL_*
names.
"""
import _winreg
shell_folder_name = {
"CSIDL_APPDATA": "AppData",
"CSIDL_COMMON_APPDATA": "Common AppData",
"CSIDL_LOCAL_APPDATA": "Local AppData",
}[csidl_name]
key = _winreg.OpenKey(_winreg.HKEY_CURRENT_USER,
r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders")
dir, type = _winreg.QueryValueEx(key, shell_folder_name)
return dir
def _get_win_folder_with_pywin32(csidl_name):
from win32com.shell import shellcon, shell
dir = shell.SHGetFolderPath(0, getattr(shellcon, csidl_name), 0, 0)
# Try to make this a unicode path because SHGetFolderPath does
# not return unicode strings when there is unicode data in the
# path.
try:
dir = unicode(dir)
# Downgrade to short path name if have highbit chars. See
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
has_high_char = False
for c in dir:
if ord(c) > 255:
has_high_char = True
break
if has_high_char:
try:
import win32api
dir = win32api.GetShortPathName(dir)
except ImportError:
pass
except UnicodeError:
pass
return dir
def _get_win_folder_with_ctypes(csidl_name):
import ctypes
csidl_const = {
"CSIDL_APPDATA": 26,
"CSIDL_COMMON_APPDATA": 35,
"CSIDL_LOCAL_APPDATA": 28,
}[csidl_name]
buf = ctypes.create_unicode_buffer(1024)
ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf)
# Downgrade to short path name if have highbit chars. See
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
has_high_char = False
for c in buf:
if ord(c) > 255:
has_high_char = True
break
if has_high_char:
buf2 = ctypes.create_unicode_buffer(1024)
if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024):
buf = buf2
return buf.value
if sys.platform == "win32":
try:
import win32com.shell
_get_win_folder = _get_win_folder_with_pywin32
except ImportError:
try:
import ctypes
_get_win_folder = _get_win_folder_with_ctypes
except ImportError:
_get_win_folder = _get_win_folder_from_registry
#---- self test code
if __name__ == "__main__":
appname = "MyApp"
appauthor = "MyCompany"
props = ("user_data_dir", "site_data_dir",
"user_config_dir", "site_config_dir",
"user_cache_dir", "user_log_dir")
print("-- app dirs (with optional 'version')")
dirs = AppDirs(appname, appauthor, version="1.0")
for prop in props:
print("%s: %s" % (prop, getattr(dirs, prop)))
print("\n-- app dirs (without optional 'version')")
dirs = AppDirs(appname, appauthor)
for prop in props:
print("%s: %s" % (prop, getattr(dirs, prop)))
print("\n-- app dirs (without optional 'appauthor')")
dirs = AppDirs(appname)
for prop in props:
print("%s: %s" % (prop, getattr(dirs, prop)))
| agpl-3.0 |
ezequielfreire007/proyectoIonic | mychat/node_modules/node-gyp/gyp/pylib/gyp/generator/make.py | 896 | 91092 | # Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Notes:
#
# This is all roughly based on the Makefile system used by the Linux
# kernel, but is a non-recursive make -- we put the entire dependency
# graph in front of make and let it figure it out.
#
# The code below generates a separate .mk file for each target, but
# all are sourced by the top-level Makefile. This means that all
# variables in .mk-files clobber one another. Be careful to use :=
# where appropriate for immediate evaluation, and similarly to watch
# that you're not relying on a variable value to last beween different
# .mk files.
#
# TODOs:
#
# Global settings and utility functions are currently stuffed in the
# toplevel Makefile. It may make sense to generate some .mk files on
# the side to keep the the files readable.
import os
import re
import sys
import subprocess
import gyp
import gyp.common
import gyp.xcode_emulation
from gyp.common import GetEnvironFallback
from gyp.common import GypError
generator_default_variables = {
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '',
'STATIC_LIB_PREFIX': 'lib',
'SHARED_LIB_PREFIX': 'lib',
'STATIC_LIB_SUFFIX': '.a',
'INTERMEDIATE_DIR': '$(obj).$(TOOLSET)/$(TARGET)/geni',
'SHARED_INTERMEDIATE_DIR': '$(obj)/gen',
'PRODUCT_DIR': '$(builddir)',
'RULE_INPUT_ROOT': '%(INPUT_ROOT)s', # This gets expanded by Python.
'RULE_INPUT_DIRNAME': '%(INPUT_DIRNAME)s', # This gets expanded by Python.
'RULE_INPUT_PATH': '$(abspath $<)',
'RULE_INPUT_EXT': '$(suffix $<)',
'RULE_INPUT_NAME': '$(notdir $<)',
'CONFIGURATION_NAME': '$(BUILDTYPE)',
}
# Make supports multiple toolsets
generator_supports_multiple_toolsets = True
# Request sorted dependencies in the order from dependents to dependencies.
generator_wants_sorted_dependencies = False
# Placates pylint.
generator_additional_non_configuration_keys = []
generator_additional_path_sections = []
generator_extra_sources_for_rules = []
generator_filelist_paths = None
def CalculateVariables(default_variables, params):
"""Calculate additional variables for use in the build (called by gyp)."""
flavor = gyp.common.GetFlavor(params)
if flavor == 'mac':
default_variables.setdefault('OS', 'mac')
default_variables.setdefault('SHARED_LIB_SUFFIX', '.dylib')
default_variables.setdefault('SHARED_LIB_DIR',
generator_default_variables['PRODUCT_DIR'])
default_variables.setdefault('LIB_DIR',
generator_default_variables['PRODUCT_DIR'])
# Copy additional generator configuration data from Xcode, which is shared
# by the Mac Make generator.
import gyp.generator.xcode as xcode_generator
global generator_additional_non_configuration_keys
generator_additional_non_configuration_keys = getattr(xcode_generator,
'generator_additional_non_configuration_keys', [])
global generator_additional_path_sections
generator_additional_path_sections = getattr(xcode_generator,
'generator_additional_path_sections', [])
global generator_extra_sources_for_rules
generator_extra_sources_for_rules = getattr(xcode_generator,
'generator_extra_sources_for_rules', [])
COMPILABLE_EXTENSIONS.update({'.m': 'objc', '.mm' : 'objcxx'})
else:
operating_system = flavor
if flavor == 'android':
operating_system = 'linux' # Keep this legacy behavior for now.
default_variables.setdefault('OS', operating_system)
default_variables.setdefault('SHARED_LIB_SUFFIX', '.so')
default_variables.setdefault('SHARED_LIB_DIR','$(builddir)/lib.$(TOOLSET)')
default_variables.setdefault('LIB_DIR', '$(obj).$(TOOLSET)')
def CalculateGeneratorInputInfo(params):
"""Calculate the generator specific info that gets fed to input (called by
gyp)."""
generator_flags = params.get('generator_flags', {})
android_ndk_version = generator_flags.get('android_ndk_version', None)
# Android NDK requires a strict link order.
if android_ndk_version:
global generator_wants_sorted_dependencies
generator_wants_sorted_dependencies = True
output_dir = params['options'].generator_output or \
params['options'].toplevel_dir
builddir_name = generator_flags.get('output_dir', 'out')
qualified_out_dir = os.path.normpath(os.path.join(
output_dir, builddir_name, 'gypfiles'))
global generator_filelist_paths
generator_filelist_paths = {
'toplevel': params['options'].toplevel_dir,
'qualified_out_dir': qualified_out_dir,
}
# The .d checking code below uses these functions:
# wildcard, sort, foreach, shell, wordlist
# wildcard can handle spaces, the rest can't.
# Since I could find no way to make foreach work with spaces in filenames
# correctly, the .d files have spaces replaced with another character. The .d
# file for
# Chromium\ Framework.framework/foo
# is for example
# out/Release/.deps/out/Release/Chromium?Framework.framework/foo
# This is the replacement character.
SPACE_REPLACEMENT = '?'
LINK_COMMANDS_LINUX = """\
quiet_cmd_alink = AR($(TOOLSET)) $@
cmd_alink = rm -f $@ && $(AR.$(TOOLSET)) crs $@ $(filter %.o,$^)
quiet_cmd_alink_thin = AR($(TOOLSET)) $@
cmd_alink_thin = rm -f $@ && $(AR.$(TOOLSET)) crsT $@ $(filter %.o,$^)
# Due to circular dependencies between libraries :(, we wrap the
# special "figure out circular dependencies" flags around the entire
# input list during linking.
quiet_cmd_link = LINK($(TOOLSET)) $@
cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ -Wl,--start-group $(LD_INPUTS) -Wl,--end-group $(LIBS)
# We support two kinds of shared objects (.so):
# 1) shared_library, which is just bundling together many dependent libraries
# into a link line.
# 2) loadable_module, which is generating a module intended for dlopen().
#
# They differ only slightly:
# In the former case, we want to package all dependent code into the .so.
# In the latter case, we want to package just the API exposed by the
# outermost module.
# This means shared_library uses --whole-archive, while loadable_module doesn't.
# (Note that --whole-archive is incompatible with the --start-group used in
# normal linking.)
# Other shared-object link notes:
# - Set SONAME to the library filename so our binaries don't reference
# the local, absolute paths used on the link command-line.
quiet_cmd_solink = SOLINK($(TOOLSET)) $@
cmd_solink = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--whole-archive $(LD_INPUTS) -Wl,--no-whole-archive $(LIBS)
quiet_cmd_solink_module = SOLINK_MODULE($(TOOLSET)) $@
cmd_solink_module = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--start-group $(filter-out FORCE_DO_CMD, $^) -Wl,--end-group $(LIBS)
"""
LINK_COMMANDS_MAC = """\
quiet_cmd_alink = LIBTOOL-STATIC $@
cmd_alink = rm -f $@ && ./gyp-mac-tool filter-libtool libtool $(GYP_LIBTOOLFLAGS) -static -o $@ $(filter %.o,$^)
quiet_cmd_link = LINK($(TOOLSET)) $@
cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o "$@" $(LD_INPUTS) $(LIBS)
quiet_cmd_solink = SOLINK($(TOOLSET)) $@
cmd_solink = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o "$@" $(LD_INPUTS) $(LIBS)
quiet_cmd_solink_module = SOLINK_MODULE($(TOOLSET)) $@
cmd_solink_module = $(LINK.$(TOOLSET)) -bundle $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(filter-out FORCE_DO_CMD, $^) $(LIBS)
"""
LINK_COMMANDS_ANDROID = """\
quiet_cmd_alink = AR($(TOOLSET)) $@
cmd_alink = rm -f $@ && $(AR.$(TOOLSET)) crs $@ $(filter %.o,$^)
quiet_cmd_alink_thin = AR($(TOOLSET)) $@
cmd_alink_thin = rm -f $@ && $(AR.$(TOOLSET)) crsT $@ $(filter %.o,$^)
# Due to circular dependencies between libraries :(, we wrap the
# special "figure out circular dependencies" flags around the entire
# input list during linking.
quiet_cmd_link = LINK($(TOOLSET)) $@
quiet_cmd_link_host = LINK($(TOOLSET)) $@
cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ -Wl,--start-group $(LD_INPUTS) -Wl,--end-group $(LIBS)
cmd_link_host = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(LD_INPUTS) $(LIBS)
# Other shared-object link notes:
# - Set SONAME to the library filename so our binaries don't reference
# the local, absolute paths used on the link command-line.
quiet_cmd_solink = SOLINK($(TOOLSET)) $@
cmd_solink = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--whole-archive $(LD_INPUTS) -Wl,--no-whole-archive $(LIBS)
quiet_cmd_solink_module = SOLINK_MODULE($(TOOLSET)) $@
cmd_solink_module = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--start-group $(filter-out FORCE_DO_CMD, $^) -Wl,--end-group $(LIBS)
quiet_cmd_solink_module_host = SOLINK_MODULE($(TOOLSET)) $@
cmd_solink_module_host = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ $(filter-out FORCE_DO_CMD, $^) $(LIBS)
"""
LINK_COMMANDS_AIX = """\
quiet_cmd_alink = AR($(TOOLSET)) $@
cmd_alink = rm -f $@ && $(AR.$(TOOLSET)) -X32_64 crs $@ $(filter %.o,$^)
quiet_cmd_alink_thin = AR($(TOOLSET)) $@
cmd_alink_thin = rm -f $@ && $(AR.$(TOOLSET)) -X32_64 crs $@ $(filter %.o,$^)
quiet_cmd_link = LINK($(TOOLSET)) $@
cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(LD_INPUTS) $(LIBS)
quiet_cmd_solink = SOLINK($(TOOLSET)) $@
cmd_solink = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(LD_INPUTS) $(LIBS)
quiet_cmd_solink_module = SOLINK_MODULE($(TOOLSET)) $@
cmd_solink_module = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(filter-out FORCE_DO_CMD, $^) $(LIBS)
"""
# Header of toplevel Makefile.
# This should go into the build tree, but it's easier to keep it here for now.
SHARED_HEADER = ("""\
# We borrow heavily from the kernel build setup, though we are simpler since
# we don't have Kconfig tweaking settings on us.
# The implicit make rules have it looking for RCS files, among other things.
# We instead explicitly write all the rules we care about.
# It's even quicker (saves ~200ms) to pass -r on the command line.
MAKEFLAGS=-r
# The source directory tree.
srcdir := %(srcdir)s
abs_srcdir := $(abspath $(srcdir))
# The name of the builddir.
builddir_name ?= %(builddir)s
# The V=1 flag on command line makes us verbosely print command lines.
ifdef V
quiet=
else
quiet=quiet_
endif
# Specify BUILDTYPE=Release on the command line for a release build.
BUILDTYPE ?= %(default_configuration)s
# Directory all our build output goes into.
# Note that this must be two directories beneath src/ for unit tests to pass,
# as they reach into the src/ directory for data with relative paths.
builddir ?= $(builddir_name)/$(BUILDTYPE)
abs_builddir := $(abspath $(builddir))
depsdir := $(builddir)/.deps
# Object output directory.
obj := $(builddir)/obj
abs_obj := $(abspath $(obj))
# We build up a list of every single one of the targets so we can slurp in the
# generated dependency rule Makefiles in one pass.
all_deps :=
%(make_global_settings)s
CC.target ?= %(CC.target)s
CFLAGS.target ?= $(CPPFLAGS) $(CFLAGS)
CXX.target ?= %(CXX.target)s
CXXFLAGS.target ?= $(CPPFLAGS) $(CXXFLAGS)
LINK.target ?= %(LINK.target)s
LDFLAGS.target ?= $(LDFLAGS)
AR.target ?= $(AR)
# C++ apps need to be linked with g++.
LINK ?= $(CXX.target)
# TODO(evan): move all cross-compilation logic to gyp-time so we don't need
# to replicate this environment fallback in make as well.
CC.host ?= %(CC.host)s
CFLAGS.host ?= $(CPPFLAGS_host) $(CFLAGS_host)
CXX.host ?= %(CXX.host)s
CXXFLAGS.host ?= $(CPPFLAGS_host) $(CXXFLAGS_host)
LINK.host ?= %(LINK.host)s
LDFLAGS.host ?=
AR.host ?= %(AR.host)s
# Define a dir function that can handle spaces.
# http://www.gnu.org/software/make/manual/make.html#Syntax-of-Functions
# "leading spaces cannot appear in the text of the first argument as written.
# These characters can be put into the argument value by variable substitution."
empty :=
space := $(empty) $(empty)
# http://stackoverflow.com/questions/1189781/using-make-dir-or-notdir-on-a-path-with-spaces
replace_spaces = $(subst $(space),""" + SPACE_REPLACEMENT + """,$1)
unreplace_spaces = $(subst """ + SPACE_REPLACEMENT + """,$(space),$1)
dirx = $(call unreplace_spaces,$(dir $(call replace_spaces,$1)))
# Flags to make gcc output dependency info. Note that you need to be
# careful here to use the flags that ccache and distcc can understand.
# We write to a dep file on the side first and then rename at the end
# so we can't end up with a broken dep file.
depfile = $(depsdir)/$(call replace_spaces,$@).d
DEPFLAGS = -MMD -MF $(depfile).raw
# We have to fixup the deps output in a few ways.
# (1) the file output should mention the proper .o file.
# ccache or distcc lose the path to the target, so we convert a rule of
# the form:
# foobar.o: DEP1 DEP2
# into
# path/to/foobar.o: DEP1 DEP2
# (2) we want missing files not to cause us to fail to build.
# We want to rewrite
# foobar.o: DEP1 DEP2 \\
# DEP3
# to
# DEP1:
# DEP2:
# DEP3:
# so if the files are missing, they're just considered phony rules.
# We have to do some pretty insane escaping to get those backslashes
# and dollar signs past make, the shell, and sed at the same time.
# Doesn't work with spaces, but that's fine: .d files have spaces in
# their names replaced with other characters."""
r"""
define fixup_dep
# The depfile may not exist if the input file didn't have any #includes.
touch $(depfile).raw
# Fixup path as in (1).
sed -e "s|^$(notdir $@)|$@|" $(depfile).raw >> $(depfile)
# Add extra rules as in (2).
# We remove slashes and replace spaces with new lines;
# remove blank lines;
# delete the first line and append a colon to the remaining lines.
sed -e 's|\\||' -e 'y| |\n|' $(depfile).raw |\
grep -v '^$$' |\
sed -e 1d -e 's|$$|:|' \
>> $(depfile)
rm $(depfile).raw
endef
"""
"""
# Command definitions:
# - cmd_foo is the actual command to run;
# - quiet_cmd_foo is the brief-output summary of the command.
quiet_cmd_cc = CC($(TOOLSET)) $@
cmd_cc = $(CC.$(TOOLSET)) $(GYP_CFLAGS) $(DEPFLAGS) $(CFLAGS.$(TOOLSET)) -c -o $@ $<
quiet_cmd_cxx = CXX($(TOOLSET)) $@
cmd_cxx = $(CXX.$(TOOLSET)) $(GYP_CXXFLAGS) $(DEPFLAGS) $(CXXFLAGS.$(TOOLSET)) -c -o $@ $<
%(extra_commands)s
quiet_cmd_touch = TOUCH $@
cmd_touch = touch $@
quiet_cmd_copy = COPY $@
# send stderr to /dev/null to ignore messages when linking directories.
cmd_copy = rm -rf "$@" && cp %(copy_archive_args)s "$<" "$@"
%(link_commands)s
"""
r"""
# Define an escape_quotes function to escape single quotes.
# This allows us to handle quotes properly as long as we always use
# use single quotes and escape_quotes.
escape_quotes = $(subst ','\'',$(1))
# This comment is here just to include a ' to unconfuse syntax highlighting.
# Define an escape_vars function to escape '$' variable syntax.
# This allows us to read/write command lines with shell variables (e.g.
# $LD_LIBRARY_PATH), without triggering make substitution.
escape_vars = $(subst $$,$$$$,$(1))
# Helper that expands to a shell command to echo a string exactly as it is in
# make. This uses printf instead of echo because printf's behaviour with respect
# to escape sequences is more portable than echo's across different shells
# (e.g., dash, bash).
exact_echo = printf '%%s\n' '$(call escape_quotes,$(1))'
"""
"""
# Helper to compare the command we're about to run against the command
# we logged the last time we ran the command. Produces an empty
# string (false) when the commands match.
# Tricky point: Make has no string-equality test function.
# The kernel uses the following, but it seems like it would have false
# positives, where one string reordered its arguments.
# arg_check = $(strip $(filter-out $(cmd_$(1)), $(cmd_$@)) \\
# $(filter-out $(cmd_$@), $(cmd_$(1))))
# We instead substitute each for the empty string into the other, and
# say they're equal if both substitutions produce the empty string.
# .d files contain """ + SPACE_REPLACEMENT + \
""" instead of spaces, take that into account.
command_changed = $(or $(subst $(cmd_$(1)),,$(cmd_$(call replace_spaces,$@))),\\
$(subst $(cmd_$(call replace_spaces,$@)),,$(cmd_$(1))))
# Helper that is non-empty when a prerequisite changes.
# Normally make does this implicitly, but we force rules to always run
# so we can check their command lines.
# $? -- new prerequisites
# $| -- order-only dependencies
prereq_changed = $(filter-out FORCE_DO_CMD,$(filter-out $|,$?))
# Helper that executes all postbuilds until one fails.
define do_postbuilds
@E=0;\\
for p in $(POSTBUILDS); do\\
eval $$p;\\
E=$$?;\\
if [ $$E -ne 0 ]; then\\
break;\\
fi;\\
done;\\
if [ $$E -ne 0 ]; then\\
rm -rf "$@";\\
exit $$E;\\
fi
endef
# do_cmd: run a command via the above cmd_foo names, if necessary.
# Should always run for a given target to handle command-line changes.
# Second argument, if non-zero, makes it do asm/C/C++ dependency munging.
# Third argument, if non-zero, makes it do POSTBUILDS processing.
# Note: We intentionally do NOT call dirx for depfile, since it contains """ + \
SPACE_REPLACEMENT + """ for
# spaces already and dirx strips the """ + SPACE_REPLACEMENT + \
""" characters.
define do_cmd
$(if $(or $(command_changed),$(prereq_changed)),
@$(call exact_echo, $($(quiet)cmd_$(1)))
@mkdir -p "$(call dirx,$@)" "$(dir $(depfile))"
$(if $(findstring flock,$(word %(flock_index)d,$(cmd_$1))),
@$(cmd_$(1))
@echo " $(quiet_cmd_$(1)): Finished",
@$(cmd_$(1))
)
@$(call exact_echo,$(call escape_vars,cmd_$(call replace_spaces,$@) := $(cmd_$(1)))) > $(depfile)
@$(if $(2),$(fixup_dep))
$(if $(and $(3), $(POSTBUILDS)),
$(call do_postbuilds)
)
)
endef
# Declare the "%(default_target)s" target first so it is the default,
# even though we don't have the deps yet.
.PHONY: %(default_target)s
%(default_target)s:
# make looks for ways to re-generate included makefiles, but in our case, we
# don't have a direct way. Explicitly telling make that it has nothing to do
# for them makes it go faster.
%%.d: ;
# Use FORCE_DO_CMD to force a target to run. Should be coupled with
# do_cmd.
.PHONY: FORCE_DO_CMD
FORCE_DO_CMD:
""")
SHARED_HEADER_MAC_COMMANDS = """
quiet_cmd_objc = CXX($(TOOLSET)) $@
cmd_objc = $(CC.$(TOOLSET)) $(GYP_OBJCFLAGS) $(DEPFLAGS) -c -o $@ $<
quiet_cmd_objcxx = CXX($(TOOLSET)) $@
cmd_objcxx = $(CXX.$(TOOLSET)) $(GYP_OBJCXXFLAGS) $(DEPFLAGS) -c -o $@ $<
# Commands for precompiled header files.
quiet_cmd_pch_c = CXX($(TOOLSET)) $@
cmd_pch_c = $(CC.$(TOOLSET)) $(GYP_PCH_CFLAGS) $(DEPFLAGS) $(CXXFLAGS.$(TOOLSET)) -c -o $@ $<
quiet_cmd_pch_cc = CXX($(TOOLSET)) $@
cmd_pch_cc = $(CC.$(TOOLSET)) $(GYP_PCH_CXXFLAGS) $(DEPFLAGS) $(CXXFLAGS.$(TOOLSET)) -c -o $@ $<
quiet_cmd_pch_m = CXX($(TOOLSET)) $@
cmd_pch_m = $(CC.$(TOOLSET)) $(GYP_PCH_OBJCFLAGS) $(DEPFLAGS) -c -o $@ $<
quiet_cmd_pch_mm = CXX($(TOOLSET)) $@
cmd_pch_mm = $(CC.$(TOOLSET)) $(GYP_PCH_OBJCXXFLAGS) $(DEPFLAGS) -c -o $@ $<
# gyp-mac-tool is written next to the root Makefile by gyp.
# Use $(4) for the command, since $(2) and $(3) are used as flag by do_cmd
# already.
quiet_cmd_mac_tool = MACTOOL $(4) $<
cmd_mac_tool = ./gyp-mac-tool $(4) $< "$@"
quiet_cmd_mac_package_framework = PACKAGE FRAMEWORK $@
cmd_mac_package_framework = ./gyp-mac-tool package-framework "$@" $(4)
quiet_cmd_infoplist = INFOPLIST $@
cmd_infoplist = $(CC.$(TOOLSET)) -E -P -Wno-trigraphs -x c $(INFOPLIST_DEFINES) "$<" -o "$@"
"""
def WriteRootHeaderSuffixRules(writer):
extensions = sorted(COMPILABLE_EXTENSIONS.keys(), key=str.lower)
writer.write('# Suffix rules, putting all outputs into $(obj).\n')
for ext in extensions:
writer.write('$(obj).$(TOOLSET)/%%.o: $(srcdir)/%%%s FORCE_DO_CMD\n' % ext)
writer.write('\t@$(call do_cmd,%s,1)\n' % COMPILABLE_EXTENSIONS[ext])
writer.write('\n# Try building from generated source, too.\n')
for ext in extensions:
writer.write(
'$(obj).$(TOOLSET)/%%.o: $(obj).$(TOOLSET)/%%%s FORCE_DO_CMD\n' % ext)
writer.write('\t@$(call do_cmd,%s,1)\n' % COMPILABLE_EXTENSIONS[ext])
writer.write('\n')
for ext in extensions:
writer.write('$(obj).$(TOOLSET)/%%.o: $(obj)/%%%s FORCE_DO_CMD\n' % ext)
writer.write('\t@$(call do_cmd,%s,1)\n' % COMPILABLE_EXTENSIONS[ext])
writer.write('\n')
SHARED_HEADER_SUFFIX_RULES_COMMENT1 = ("""\
# Suffix rules, putting all outputs into $(obj).
""")
SHARED_HEADER_SUFFIX_RULES_COMMENT2 = ("""\
# Try building from generated source, too.
""")
SHARED_FOOTER = """\
# "all" is a concatenation of the "all" targets from all the included
# sub-makefiles. This is just here to clarify.
all:
# Add in dependency-tracking rules. $(all_deps) is the list of every single
# target in our tree. Only consider the ones with .d (dependency) info:
d_files := $(wildcard $(foreach f,$(all_deps),$(depsdir)/$(f).d))
ifneq ($(d_files),)
include $(d_files)
endif
"""
header = """\
# This file is generated by gyp; do not edit.
"""
# Maps every compilable file extension to the do_cmd that compiles it.
COMPILABLE_EXTENSIONS = {
'.c': 'cc',
'.cc': 'cxx',
'.cpp': 'cxx',
'.cxx': 'cxx',
'.s': 'cc',
'.S': 'cc',
}
def Compilable(filename):
"""Return true if the file is compilable (should be in OBJS)."""
for res in (filename.endswith(e) for e in COMPILABLE_EXTENSIONS):
if res:
return True
return False
def Linkable(filename):
"""Return true if the file is linkable (should be on the link line)."""
return filename.endswith('.o')
def Target(filename):
"""Translate a compilable filename to its .o target."""
return os.path.splitext(filename)[0] + '.o'
def EscapeShellArgument(s):
"""Quotes an argument so that it will be interpreted literally by a POSIX
shell. Taken from
http://stackoverflow.com/questions/35817/whats-the-best-way-to-escape-ossystem-calls-in-python
"""
return "'" + s.replace("'", "'\\''") + "'"
def EscapeMakeVariableExpansion(s):
"""Make has its own variable expansion syntax using $. We must escape it for
string to be interpreted literally."""
return s.replace('$', '$$')
def EscapeCppDefine(s):
"""Escapes a CPP define so that it will reach the compiler unaltered."""
s = EscapeShellArgument(s)
s = EscapeMakeVariableExpansion(s)
# '#' characters must be escaped even embedded in a string, else Make will
# treat it as the start of a comment.
return s.replace('#', r'\#')
def QuoteIfNecessary(string):
"""TODO: Should this ideally be replaced with one or more of the above
functions?"""
if '"' in string:
string = '"' + string.replace('"', '\\"') + '"'
return string
def StringToMakefileVariable(string):
"""Convert a string to a value that is acceptable as a make variable name."""
return re.sub('[^a-zA-Z0-9_]', '_', string)
srcdir_prefix = ''
def Sourceify(path):
"""Convert a path to its source directory form."""
if '$(' in path:
return path
if os.path.isabs(path):
return path
return srcdir_prefix + path
def QuoteSpaces(s, quote=r'\ '):
return s.replace(' ', quote)
# TODO: Avoid code duplication with _ValidateSourcesForMSVSProject in msvs.py.
def _ValidateSourcesForOSX(spec, all_sources):
"""Makes sure if duplicate basenames are not specified in the source list.
Arguments:
spec: The target dictionary containing the properties of the target.
"""
if spec.get('type', None) != 'static_library':
return
basenames = {}
for source in all_sources:
name, ext = os.path.splitext(source)
is_compiled_file = ext in [
'.c', '.cc', '.cpp', '.cxx', '.m', '.mm', '.s', '.S']
if not is_compiled_file:
continue
basename = os.path.basename(name) # Don't include extension.
basenames.setdefault(basename, []).append(source)
error = ''
for basename, files in basenames.iteritems():
if len(files) > 1:
error += ' %s: %s\n' % (basename, ' '.join(files))
if error:
print('static library %s has several files with the same basename:\n' %
spec['target_name'] + error + 'libtool on OS X will generate' +
' warnings for them.')
raise GypError('Duplicate basenames in sources section, see list above')
# Map from qualified target to path to output.
target_outputs = {}
# Map from qualified target to any linkable output. A subset
# of target_outputs. E.g. when mybinary depends on liba, we want to
# include liba in the linker line; when otherbinary depends on
# mybinary, we just want to build mybinary first.
target_link_deps = {}
class MakefileWriter(object):
"""MakefileWriter packages up the writing of one target-specific foobar.mk.
Its only real entry point is Write(), and is mostly used for namespacing.
"""
def __init__(self, generator_flags, flavor):
self.generator_flags = generator_flags
self.flavor = flavor
self.suffix_rules_srcdir = {}
self.suffix_rules_objdir1 = {}
self.suffix_rules_objdir2 = {}
# Generate suffix rules for all compilable extensions.
for ext in COMPILABLE_EXTENSIONS.keys():
# Suffix rules for source folder.
self.suffix_rules_srcdir.update({ext: ("""\
$(obj).$(TOOLSET)/$(TARGET)/%%.o: $(srcdir)/%%%s FORCE_DO_CMD
@$(call do_cmd,%s,1)
""" % (ext, COMPILABLE_EXTENSIONS[ext]))})
# Suffix rules for generated source files.
self.suffix_rules_objdir1.update({ext: ("""\
$(obj).$(TOOLSET)/$(TARGET)/%%.o: $(obj).$(TOOLSET)/%%%s FORCE_DO_CMD
@$(call do_cmd,%s,1)
""" % (ext, COMPILABLE_EXTENSIONS[ext]))})
self.suffix_rules_objdir2.update({ext: ("""\
$(obj).$(TOOLSET)/$(TARGET)/%%.o: $(obj)/%%%s FORCE_DO_CMD
@$(call do_cmd,%s,1)
""" % (ext, COMPILABLE_EXTENSIONS[ext]))})
def Write(self, qualified_target, base_path, output_filename, spec, configs,
part_of_all):
"""The main entry point: writes a .mk file for a single target.
Arguments:
qualified_target: target we're generating
base_path: path relative to source root we're building in, used to resolve
target-relative paths
output_filename: output .mk file name to write
spec, configs: gyp info
part_of_all: flag indicating this target is part of 'all'
"""
gyp.common.EnsureDirExists(output_filename)
self.fp = open(output_filename, 'w')
self.fp.write(header)
self.qualified_target = qualified_target
self.path = base_path
self.target = spec['target_name']
self.type = spec['type']
self.toolset = spec['toolset']
self.is_mac_bundle = gyp.xcode_emulation.IsMacBundle(self.flavor, spec)
if self.flavor == 'mac':
self.xcode_settings = gyp.xcode_emulation.XcodeSettings(spec)
else:
self.xcode_settings = None
deps, link_deps = self.ComputeDeps(spec)
# Some of the generation below can add extra output, sources, or
# link dependencies. All of the out params of the functions that
# follow use names like extra_foo.
extra_outputs = []
extra_sources = []
extra_link_deps = []
extra_mac_bundle_resources = []
mac_bundle_deps = []
if self.is_mac_bundle:
self.output = self.ComputeMacBundleOutput(spec)
self.output_binary = self.ComputeMacBundleBinaryOutput(spec)
else:
self.output = self.output_binary = self.ComputeOutput(spec)
self.is_standalone_static_library = bool(
spec.get('standalone_static_library', 0))
self._INSTALLABLE_TARGETS = ('executable', 'loadable_module',
'shared_library')
if (self.is_standalone_static_library or
self.type in self._INSTALLABLE_TARGETS):
self.alias = os.path.basename(self.output)
install_path = self._InstallableTargetInstallPath()
else:
self.alias = self.output
install_path = self.output
self.WriteLn("TOOLSET := " + self.toolset)
self.WriteLn("TARGET := " + self.target)
# Actions must come first, since they can generate more OBJs for use below.
if 'actions' in spec:
self.WriteActions(spec['actions'], extra_sources, extra_outputs,
extra_mac_bundle_resources, part_of_all)
# Rules must be early like actions.
if 'rules' in spec:
self.WriteRules(spec['rules'], extra_sources, extra_outputs,
extra_mac_bundle_resources, part_of_all)
if 'copies' in spec:
self.WriteCopies(spec['copies'], extra_outputs, part_of_all)
# Bundle resources.
if self.is_mac_bundle:
all_mac_bundle_resources = (
spec.get('mac_bundle_resources', []) + extra_mac_bundle_resources)
self.WriteMacBundleResources(all_mac_bundle_resources, mac_bundle_deps)
self.WriteMacInfoPlist(mac_bundle_deps)
# Sources.
all_sources = spec.get('sources', []) + extra_sources
if all_sources:
if self.flavor == 'mac':
# libtool on OS X generates warnings for duplicate basenames in the same
# target.
_ValidateSourcesForOSX(spec, all_sources)
self.WriteSources(
configs, deps, all_sources, extra_outputs,
extra_link_deps, part_of_all,
gyp.xcode_emulation.MacPrefixHeader(
self.xcode_settings, lambda p: Sourceify(self.Absolutify(p)),
self.Pchify))
sources = filter(Compilable, all_sources)
if sources:
self.WriteLn(SHARED_HEADER_SUFFIX_RULES_COMMENT1)
extensions = set([os.path.splitext(s)[1] for s in sources])
for ext in extensions:
if ext in self.suffix_rules_srcdir:
self.WriteLn(self.suffix_rules_srcdir[ext])
self.WriteLn(SHARED_HEADER_SUFFIX_RULES_COMMENT2)
for ext in extensions:
if ext in self.suffix_rules_objdir1:
self.WriteLn(self.suffix_rules_objdir1[ext])
for ext in extensions:
if ext in self.suffix_rules_objdir2:
self.WriteLn(self.suffix_rules_objdir2[ext])
self.WriteLn('# End of this set of suffix rules')
# Add dependency from bundle to bundle binary.
if self.is_mac_bundle:
mac_bundle_deps.append(self.output_binary)
self.WriteTarget(spec, configs, deps, extra_link_deps + link_deps,
mac_bundle_deps, extra_outputs, part_of_all)
# Update global list of target outputs, used in dependency tracking.
target_outputs[qualified_target] = install_path
# Update global list of link dependencies.
if self.type in ('static_library', 'shared_library'):
target_link_deps[qualified_target] = self.output_binary
# Currently any versions have the same effect, but in future the behavior
# could be different.
if self.generator_flags.get('android_ndk_version', None):
self.WriteAndroidNdkModuleRule(self.target, all_sources, link_deps)
self.fp.close()
def WriteSubMake(self, output_filename, makefile_path, targets, build_dir):
"""Write a "sub-project" Makefile.
This is a small, wrapper Makefile that calls the top-level Makefile to build
the targets from a single gyp file (i.e. a sub-project).
Arguments:
output_filename: sub-project Makefile name to write
makefile_path: path to the top-level Makefile
targets: list of "all" targets for this sub-project
build_dir: build output directory, relative to the sub-project
"""
gyp.common.EnsureDirExists(output_filename)
self.fp = open(output_filename, 'w')
self.fp.write(header)
# For consistency with other builders, put sub-project build output in the
# sub-project dir (see test/subdirectory/gyptest-subdir-all.py).
self.WriteLn('export builddir_name ?= %s' %
os.path.join(os.path.dirname(output_filename), build_dir))
self.WriteLn('.PHONY: all')
self.WriteLn('all:')
if makefile_path:
makefile_path = ' -C ' + makefile_path
self.WriteLn('\t$(MAKE)%s %s' % (makefile_path, ' '.join(targets)))
self.fp.close()
def WriteActions(self, actions, extra_sources, extra_outputs,
extra_mac_bundle_resources, part_of_all):
"""Write Makefile code for any 'actions' from the gyp input.
extra_sources: a list that will be filled in with newly generated source
files, if any
extra_outputs: a list that will be filled in with any outputs of these
actions (used to make other pieces dependent on these
actions)
part_of_all: flag indicating this target is part of 'all'
"""
env = self.GetSortedXcodeEnv()
for action in actions:
name = StringToMakefileVariable('%s_%s' % (self.qualified_target,
action['action_name']))
self.WriteLn('### Rules for action "%s":' % action['action_name'])
inputs = action['inputs']
outputs = action['outputs']
# Build up a list of outputs.
# Collect the output dirs we'll need.
dirs = set()
for out in outputs:
dir = os.path.split(out)[0]
if dir:
dirs.add(dir)
if int(action.get('process_outputs_as_sources', False)):
extra_sources += outputs
if int(action.get('process_outputs_as_mac_bundle_resources', False)):
extra_mac_bundle_resources += outputs
# Write the actual command.
action_commands = action['action']
if self.flavor == 'mac':
action_commands = [gyp.xcode_emulation.ExpandEnvVars(command, env)
for command in action_commands]
command = gyp.common.EncodePOSIXShellList(action_commands)
if 'message' in action:
self.WriteLn('quiet_cmd_%s = ACTION %s $@' % (name, action['message']))
else:
self.WriteLn('quiet_cmd_%s = ACTION %s $@' % (name, name))
if len(dirs) > 0:
command = 'mkdir -p %s' % ' '.join(dirs) + '; ' + command
cd_action = 'cd %s; ' % Sourceify(self.path or '.')
# command and cd_action get written to a toplevel variable called
# cmd_foo. Toplevel variables can't handle things that change per
# makefile like $(TARGET), so hardcode the target.
command = command.replace('$(TARGET)', self.target)
cd_action = cd_action.replace('$(TARGET)', self.target)
# Set LD_LIBRARY_PATH in case the action runs an executable from this
# build which links to shared libs from this build.
# actions run on the host, so they should in theory only use host
# libraries, but until everything is made cross-compile safe, also use
# target libraries.
# TODO(piman): when everything is cross-compile safe, remove lib.target
self.WriteLn('cmd_%s = LD_LIBRARY_PATH=$(builddir)/lib.host:'
'$(builddir)/lib.target:$$LD_LIBRARY_PATH; '
'export LD_LIBRARY_PATH; '
'%s%s'
% (name, cd_action, command))
self.WriteLn()
outputs = map(self.Absolutify, outputs)
# The makefile rules are all relative to the top dir, but the gyp actions
# are defined relative to their containing dir. This replaces the obj
# variable for the action rule with an absolute version so that the output
# goes in the right place.
# Only write the 'obj' and 'builddir' rules for the "primary" output (:1);
# it's superfluous for the "extra outputs", and this avoids accidentally
# writing duplicate dummy rules for those outputs.
# Same for environment.
self.WriteLn("%s: obj := $(abs_obj)" % QuoteSpaces(outputs[0]))
self.WriteLn("%s: builddir := $(abs_builddir)" % QuoteSpaces(outputs[0]))
self.WriteSortedXcodeEnv(outputs[0], self.GetSortedXcodeEnv())
for input in inputs:
assert ' ' not in input, (
"Spaces in action input filenames not supported (%s)" % input)
for output in outputs:
assert ' ' not in output, (
"Spaces in action output filenames not supported (%s)" % output)
# See the comment in WriteCopies about expanding env vars.
outputs = [gyp.xcode_emulation.ExpandEnvVars(o, env) for o in outputs]
inputs = [gyp.xcode_emulation.ExpandEnvVars(i, env) for i in inputs]
self.WriteDoCmd(outputs, map(Sourceify, map(self.Absolutify, inputs)),
part_of_all=part_of_all, command=name)
# Stuff the outputs in a variable so we can refer to them later.
outputs_variable = 'action_%s_outputs' % name
self.WriteLn('%s := %s' % (outputs_variable, ' '.join(outputs)))
extra_outputs.append('$(%s)' % outputs_variable)
self.WriteLn()
self.WriteLn()
def WriteRules(self, rules, extra_sources, extra_outputs,
extra_mac_bundle_resources, part_of_all):
"""Write Makefile code for any 'rules' from the gyp input.
extra_sources: a list that will be filled in with newly generated source
files, if any
extra_outputs: a list that will be filled in with any outputs of these
rules (used to make other pieces dependent on these rules)
part_of_all: flag indicating this target is part of 'all'
"""
env = self.GetSortedXcodeEnv()
for rule in rules:
name = StringToMakefileVariable('%s_%s' % (self.qualified_target,
rule['rule_name']))
count = 0
self.WriteLn('### Generated for rule %s:' % name)
all_outputs = []
for rule_source in rule.get('rule_sources', []):
dirs = set()
(rule_source_dirname, rule_source_basename) = os.path.split(rule_source)
(rule_source_root, rule_source_ext) = \
os.path.splitext(rule_source_basename)
outputs = [self.ExpandInputRoot(out, rule_source_root,
rule_source_dirname)
for out in rule['outputs']]
for out in outputs:
dir = os.path.dirname(out)
if dir:
dirs.add(dir)
if int(rule.get('process_outputs_as_sources', False)):
extra_sources += outputs
if int(rule.get('process_outputs_as_mac_bundle_resources', False)):
extra_mac_bundle_resources += outputs
inputs = map(Sourceify, map(self.Absolutify, [rule_source] +
rule.get('inputs', [])))
actions = ['$(call do_cmd,%s_%d)' % (name, count)]
if name == 'resources_grit':
# HACK: This is ugly. Grit intentionally doesn't touch the
# timestamp of its output file when the file doesn't change,
# which is fine in hash-based dependency systems like scons
# and forge, but not kosher in the make world. After some
# discussion, hacking around it here seems like the least
# amount of pain.
actions += ['@touch --no-create $@']
# See the comment in WriteCopies about expanding env vars.
outputs = [gyp.xcode_emulation.ExpandEnvVars(o, env) for o in outputs]
inputs = [gyp.xcode_emulation.ExpandEnvVars(i, env) for i in inputs]
outputs = map(self.Absolutify, outputs)
all_outputs += outputs
# Only write the 'obj' and 'builddir' rules for the "primary" output
# (:1); it's superfluous for the "extra outputs", and this avoids
# accidentally writing duplicate dummy rules for those outputs.
self.WriteLn('%s: obj := $(abs_obj)' % outputs[0])
self.WriteLn('%s: builddir := $(abs_builddir)' % outputs[0])
self.WriteMakeRule(outputs, inputs, actions,
command="%s_%d" % (name, count))
# Spaces in rule filenames are not supported, but rule variables have
# spaces in them (e.g. RULE_INPUT_PATH expands to '$(abspath $<)').
# The spaces within the variables are valid, so remove the variables
# before checking.
variables_with_spaces = re.compile(r'\$\([^ ]* \$<\)')
for output in outputs:
output = re.sub(variables_with_spaces, '', output)
assert ' ' not in output, (
"Spaces in rule filenames not yet supported (%s)" % output)
self.WriteLn('all_deps += %s' % ' '.join(outputs))
action = [self.ExpandInputRoot(ac, rule_source_root,
rule_source_dirname)
for ac in rule['action']]
mkdirs = ''
if len(dirs) > 0:
mkdirs = 'mkdir -p %s; ' % ' '.join(dirs)
cd_action = 'cd %s; ' % Sourceify(self.path or '.')
# action, cd_action, and mkdirs get written to a toplevel variable
# called cmd_foo. Toplevel variables can't handle things that change
# per makefile like $(TARGET), so hardcode the target.
if self.flavor == 'mac':
action = [gyp.xcode_emulation.ExpandEnvVars(command, env)
for command in action]
action = gyp.common.EncodePOSIXShellList(action)
action = action.replace('$(TARGET)', self.target)
cd_action = cd_action.replace('$(TARGET)', self.target)
mkdirs = mkdirs.replace('$(TARGET)', self.target)
# Set LD_LIBRARY_PATH in case the rule runs an executable from this
# build which links to shared libs from this build.
# rules run on the host, so they should in theory only use host
# libraries, but until everything is made cross-compile safe, also use
# target libraries.
# TODO(piman): when everything is cross-compile safe, remove lib.target
self.WriteLn(
"cmd_%(name)s_%(count)d = LD_LIBRARY_PATH="
"$(builddir)/lib.host:$(builddir)/lib.target:$$LD_LIBRARY_PATH; "
"export LD_LIBRARY_PATH; "
"%(cd_action)s%(mkdirs)s%(action)s" % {
'action': action,
'cd_action': cd_action,
'count': count,
'mkdirs': mkdirs,
'name': name,
})
self.WriteLn(
'quiet_cmd_%(name)s_%(count)d = RULE %(name)s_%(count)d $@' % {
'count': count,
'name': name,
})
self.WriteLn()
count += 1
outputs_variable = 'rule_%s_outputs' % name
self.WriteList(all_outputs, outputs_variable)
extra_outputs.append('$(%s)' % outputs_variable)
self.WriteLn('### Finished generating for rule: %s' % name)
self.WriteLn()
self.WriteLn('### Finished generating for all rules')
self.WriteLn('')
def WriteCopies(self, copies, extra_outputs, part_of_all):
"""Write Makefile code for any 'copies' from the gyp input.
extra_outputs: a list that will be filled in with any outputs of this action
(used to make other pieces dependent on this action)
part_of_all: flag indicating this target is part of 'all'
"""
self.WriteLn('### Generated for copy rule.')
variable = StringToMakefileVariable(self.qualified_target + '_copies')
outputs = []
for copy in copies:
for path in copy['files']:
# Absolutify() may call normpath, and will strip trailing slashes.
path = Sourceify(self.Absolutify(path))
filename = os.path.split(path)[1]
output = Sourceify(self.Absolutify(os.path.join(copy['destination'],
filename)))
# If the output path has variables in it, which happens in practice for
# 'copies', writing the environment as target-local doesn't work,
# because the variables are already needed for the target name.
# Copying the environment variables into global make variables doesn't
# work either, because then the .d files will potentially contain spaces
# after variable expansion, and .d file handling cannot handle spaces.
# As a workaround, manually expand variables at gyp time. Since 'copies'
# can't run scripts, there's no need to write the env then.
# WriteDoCmd() will escape spaces for .d files.
env = self.GetSortedXcodeEnv()
output = gyp.xcode_emulation.ExpandEnvVars(output, env)
path = gyp.xcode_emulation.ExpandEnvVars(path, env)
self.WriteDoCmd([output], [path], 'copy', part_of_all)
outputs.append(output)
self.WriteLn('%s = %s' % (variable, ' '.join(map(QuoteSpaces, outputs))))
extra_outputs.append('$(%s)' % variable)
self.WriteLn()
def WriteMacBundleResources(self, resources, bundle_deps):
"""Writes Makefile code for 'mac_bundle_resources'."""
self.WriteLn('### Generated for mac_bundle_resources')
for output, res in gyp.xcode_emulation.GetMacBundleResources(
generator_default_variables['PRODUCT_DIR'], self.xcode_settings,
map(Sourceify, map(self.Absolutify, resources))):
_, ext = os.path.splitext(output)
if ext != '.xcassets':
# Make does not supports '.xcassets' emulation.
self.WriteDoCmd([output], [res], 'mac_tool,,,copy-bundle-resource',
part_of_all=True)
bundle_deps.append(output)
def WriteMacInfoPlist(self, bundle_deps):
"""Write Makefile code for bundle Info.plist files."""
info_plist, out, defines, extra_env = gyp.xcode_emulation.GetMacInfoPlist(
generator_default_variables['PRODUCT_DIR'], self.xcode_settings,
lambda p: Sourceify(self.Absolutify(p)))
if not info_plist:
return
if defines:
# Create an intermediate file to store preprocessed results.
intermediate_plist = ('$(obj).$(TOOLSET)/$(TARGET)/' +
os.path.basename(info_plist))
self.WriteList(defines, intermediate_plist + ': INFOPLIST_DEFINES', '-D',
quoter=EscapeCppDefine)
self.WriteMakeRule([intermediate_plist], [info_plist],
['$(call do_cmd,infoplist)',
# "Convert" the plist so that any weird whitespace changes from the
# preprocessor do not affect the XML parser in mac_tool.
'@plutil -convert xml1 $@ $@'])
info_plist = intermediate_plist
# plists can contain envvars and substitute them into the file.
self.WriteSortedXcodeEnv(
out, self.GetSortedXcodeEnv(additional_settings=extra_env))
self.WriteDoCmd([out], [info_plist], 'mac_tool,,,copy-info-plist',
part_of_all=True)
bundle_deps.append(out)
def WriteSources(self, configs, deps, sources,
extra_outputs, extra_link_deps,
part_of_all, precompiled_header):
"""Write Makefile code for any 'sources' from the gyp input.
These are source files necessary to build the current target.
configs, deps, sources: input from gyp.
extra_outputs: a list of extra outputs this action should be dependent on;
used to serialize action/rules before compilation
extra_link_deps: a list that will be filled in with any outputs of
compilation (to be used in link lines)
part_of_all: flag indicating this target is part of 'all'
"""
# Write configuration-specific variables for CFLAGS, etc.
for configname in sorted(configs.keys()):
config = configs[configname]
self.WriteList(config.get('defines'), 'DEFS_%s' % configname, prefix='-D',
quoter=EscapeCppDefine)
if self.flavor == 'mac':
cflags = self.xcode_settings.GetCflags(configname)
cflags_c = self.xcode_settings.GetCflagsC(configname)
cflags_cc = self.xcode_settings.GetCflagsCC(configname)
cflags_objc = self.xcode_settings.GetCflagsObjC(configname)
cflags_objcc = self.xcode_settings.GetCflagsObjCC(configname)
else:
cflags = config.get('cflags')
cflags_c = config.get('cflags_c')
cflags_cc = config.get('cflags_cc')
self.WriteLn("# Flags passed to all source files.");
self.WriteList(cflags, 'CFLAGS_%s' % configname)
self.WriteLn("# Flags passed to only C files.");
self.WriteList(cflags_c, 'CFLAGS_C_%s' % configname)
self.WriteLn("# Flags passed to only C++ files.");
self.WriteList(cflags_cc, 'CFLAGS_CC_%s' % configname)
if self.flavor == 'mac':
self.WriteLn("# Flags passed to only ObjC files.");
self.WriteList(cflags_objc, 'CFLAGS_OBJC_%s' % configname)
self.WriteLn("# Flags passed to only ObjC++ files.");
self.WriteList(cflags_objcc, 'CFLAGS_OBJCC_%s' % configname)
includes = config.get('include_dirs')
if includes:
includes = map(Sourceify, map(self.Absolutify, includes))
self.WriteList(includes, 'INCS_%s' % configname, prefix='-I')
compilable = filter(Compilable, sources)
objs = map(self.Objectify, map(self.Absolutify, map(Target, compilable)))
self.WriteList(objs, 'OBJS')
for obj in objs:
assert ' ' not in obj, (
"Spaces in object filenames not supported (%s)" % obj)
self.WriteLn('# Add to the list of files we specially track '
'dependencies for.')
self.WriteLn('all_deps += $(OBJS)')
self.WriteLn()
# Make sure our dependencies are built first.
if deps:
self.WriteMakeRule(['$(OBJS)'], deps,
comment = 'Make sure our dependencies are built '
'before any of us.',
order_only = True)
# Make sure the actions and rules run first.
# If they generate any extra headers etc., the per-.o file dep tracking
# will catch the proper rebuilds, so order only is still ok here.
if extra_outputs:
self.WriteMakeRule(['$(OBJS)'], extra_outputs,
comment = 'Make sure our actions/rules run '
'before any of us.',
order_only = True)
pchdeps = precompiled_header.GetObjDependencies(compilable, objs )
if pchdeps:
self.WriteLn('# Dependencies from obj files to their precompiled headers')
for source, obj, gch in pchdeps:
self.WriteLn('%s: %s' % (obj, gch))
self.WriteLn('# End precompiled header dependencies')
if objs:
extra_link_deps.append('$(OBJS)')
self.WriteLn("""\
# CFLAGS et al overrides must be target-local.
# See "Target-specific Variable Values" in the GNU Make manual.""")
self.WriteLn("$(OBJS): TOOLSET := $(TOOLSET)")
self.WriteLn("$(OBJS): GYP_CFLAGS := "
"$(DEFS_$(BUILDTYPE)) "
"$(INCS_$(BUILDTYPE)) "
"%s " % precompiled_header.GetInclude('c') +
"$(CFLAGS_$(BUILDTYPE)) "
"$(CFLAGS_C_$(BUILDTYPE))")
self.WriteLn("$(OBJS): GYP_CXXFLAGS := "
"$(DEFS_$(BUILDTYPE)) "
"$(INCS_$(BUILDTYPE)) "
"%s " % precompiled_header.GetInclude('cc') +
"$(CFLAGS_$(BUILDTYPE)) "
"$(CFLAGS_CC_$(BUILDTYPE))")
if self.flavor == 'mac':
self.WriteLn("$(OBJS): GYP_OBJCFLAGS := "
"$(DEFS_$(BUILDTYPE)) "
"$(INCS_$(BUILDTYPE)) "
"%s " % precompiled_header.GetInclude('m') +
"$(CFLAGS_$(BUILDTYPE)) "
"$(CFLAGS_C_$(BUILDTYPE)) "
"$(CFLAGS_OBJC_$(BUILDTYPE))")
self.WriteLn("$(OBJS): GYP_OBJCXXFLAGS := "
"$(DEFS_$(BUILDTYPE)) "
"$(INCS_$(BUILDTYPE)) "
"%s " % precompiled_header.GetInclude('mm') +
"$(CFLAGS_$(BUILDTYPE)) "
"$(CFLAGS_CC_$(BUILDTYPE)) "
"$(CFLAGS_OBJCC_$(BUILDTYPE))")
self.WritePchTargets(precompiled_header.GetPchBuildCommands())
# If there are any object files in our input file list, link them into our
# output.
extra_link_deps += filter(Linkable, sources)
self.WriteLn()
def WritePchTargets(self, pch_commands):
"""Writes make rules to compile prefix headers."""
if not pch_commands:
return
for gch, lang_flag, lang, input in pch_commands:
extra_flags = {
'c': '$(CFLAGS_C_$(BUILDTYPE))',
'cc': '$(CFLAGS_CC_$(BUILDTYPE))',
'm': '$(CFLAGS_C_$(BUILDTYPE)) $(CFLAGS_OBJC_$(BUILDTYPE))',
'mm': '$(CFLAGS_CC_$(BUILDTYPE)) $(CFLAGS_OBJCC_$(BUILDTYPE))',
}[lang]
var_name = {
'c': 'GYP_PCH_CFLAGS',
'cc': 'GYP_PCH_CXXFLAGS',
'm': 'GYP_PCH_OBJCFLAGS',
'mm': 'GYP_PCH_OBJCXXFLAGS',
}[lang]
self.WriteLn("%s: %s := %s " % (gch, var_name, lang_flag) +
"$(DEFS_$(BUILDTYPE)) "
"$(INCS_$(BUILDTYPE)) "
"$(CFLAGS_$(BUILDTYPE)) " +
extra_flags)
self.WriteLn('%s: %s FORCE_DO_CMD' % (gch, input))
self.WriteLn('\t@$(call do_cmd,pch_%s,1)' % lang)
self.WriteLn('')
assert ' ' not in gch, (
"Spaces in gch filenames not supported (%s)" % gch)
self.WriteLn('all_deps += %s' % gch)
self.WriteLn('')
def ComputeOutputBasename(self, spec):
"""Return the 'output basename' of a gyp spec.
E.g., the loadable module 'foobar' in directory 'baz' will produce
'libfoobar.so'
"""
assert not self.is_mac_bundle
if self.flavor == 'mac' and self.type in (
'static_library', 'executable', 'shared_library', 'loadable_module'):
return self.xcode_settings.GetExecutablePath()
target = spec['target_name']
target_prefix = ''
target_ext = ''
if self.type == 'static_library':
if target[:3] == 'lib':
target = target[3:]
target_prefix = 'lib'
target_ext = '.a'
elif self.type in ('loadable_module', 'shared_library'):
if target[:3] == 'lib':
target = target[3:]
target_prefix = 'lib'
target_ext = '.so'
elif self.type == 'none':
target = '%s.stamp' % target
elif self.type != 'executable':
print ("ERROR: What output file should be generated?",
"type", self.type, "target", target)
target_prefix = spec.get('product_prefix', target_prefix)
target = spec.get('product_name', target)
product_ext = spec.get('product_extension')
if product_ext:
target_ext = '.' + product_ext
return target_prefix + target + target_ext
def _InstallImmediately(self):
return self.toolset == 'target' and self.flavor == 'mac' and self.type in (
'static_library', 'executable', 'shared_library', 'loadable_module')
def ComputeOutput(self, spec):
"""Return the 'output' (full output path) of a gyp spec.
E.g., the loadable module 'foobar' in directory 'baz' will produce
'$(obj)/baz/libfoobar.so'
"""
assert not self.is_mac_bundle
path = os.path.join('$(obj).' + self.toolset, self.path)
if self.type == 'executable' or self._InstallImmediately():
path = '$(builddir)'
path = spec.get('product_dir', path)
return os.path.join(path, self.ComputeOutputBasename(spec))
def ComputeMacBundleOutput(self, spec):
"""Return the 'output' (full output path) to a bundle output directory."""
assert self.is_mac_bundle
path = generator_default_variables['PRODUCT_DIR']
return os.path.join(path, self.xcode_settings.GetWrapperName())
def ComputeMacBundleBinaryOutput(self, spec):
"""Return the 'output' (full output path) to the binary in a bundle."""
path = generator_default_variables['PRODUCT_DIR']
return os.path.join(path, self.xcode_settings.GetExecutablePath())
def ComputeDeps(self, spec):
"""Compute the dependencies of a gyp spec.
Returns a tuple (deps, link_deps), where each is a list of
filenames that will need to be put in front of make for either
building (deps) or linking (link_deps).
"""
deps = []
link_deps = []
if 'dependencies' in spec:
deps.extend([target_outputs[dep] for dep in spec['dependencies']
if target_outputs[dep]])
for dep in spec['dependencies']:
if dep in target_link_deps:
link_deps.append(target_link_deps[dep])
deps.extend(link_deps)
# TODO: It seems we need to transitively link in libraries (e.g. -lfoo)?
# This hack makes it work:
# link_deps.extend(spec.get('libraries', []))
return (gyp.common.uniquer(deps), gyp.common.uniquer(link_deps))
def WriteDependencyOnExtraOutputs(self, target, extra_outputs):
self.WriteMakeRule([self.output_binary], extra_outputs,
comment = 'Build our special outputs first.',
order_only = True)
def WriteTarget(self, spec, configs, deps, link_deps, bundle_deps,
extra_outputs, part_of_all):
"""Write Makefile code to produce the final target of the gyp spec.
spec, configs: input from gyp.
deps, link_deps: dependency lists; see ComputeDeps()
extra_outputs: any extra outputs that our target should depend on
part_of_all: flag indicating this target is part of 'all'
"""
self.WriteLn('### Rules for final target.')
if extra_outputs:
self.WriteDependencyOnExtraOutputs(self.output_binary, extra_outputs)
self.WriteMakeRule(extra_outputs, deps,
comment=('Preserve order dependency of '
'special output on deps.'),
order_only = True)
target_postbuilds = {}
if self.type != 'none':
for configname in sorted(configs.keys()):
config = configs[configname]
if self.flavor == 'mac':
ldflags = self.xcode_settings.GetLdflags(configname,
generator_default_variables['PRODUCT_DIR'],
lambda p: Sourceify(self.Absolutify(p)))
# TARGET_POSTBUILDS_$(BUILDTYPE) is added to postbuilds later on.
gyp_to_build = gyp.common.InvertRelativePath(self.path)
target_postbuild = self.xcode_settings.AddImplicitPostbuilds(
configname,
QuoteSpaces(os.path.normpath(os.path.join(gyp_to_build,
self.output))),
QuoteSpaces(os.path.normpath(os.path.join(gyp_to_build,
self.output_binary))))
if target_postbuild:
target_postbuilds[configname] = target_postbuild
else:
ldflags = config.get('ldflags', [])
# Compute an rpath for this output if needed.
if any(dep.endswith('.so') or '.so.' in dep for dep in deps):
# We want to get the literal string "$ORIGIN" into the link command,
# so we need lots of escaping.
ldflags.append(r'-Wl,-rpath=\$$ORIGIN/lib.%s/' % self.toolset)
ldflags.append(r'-Wl,-rpath-link=\$(builddir)/lib.%s/' %
self.toolset)
library_dirs = config.get('library_dirs', [])
ldflags += [('-L%s' % library_dir) for library_dir in library_dirs]
self.WriteList(ldflags, 'LDFLAGS_%s' % configname)
if self.flavor == 'mac':
self.WriteList(self.xcode_settings.GetLibtoolflags(configname),
'LIBTOOLFLAGS_%s' % configname)
libraries = spec.get('libraries')
if libraries:
# Remove duplicate entries
libraries = gyp.common.uniquer(libraries)
if self.flavor == 'mac':
libraries = self.xcode_settings.AdjustLibraries(libraries)
self.WriteList(libraries, 'LIBS')
self.WriteLn('%s: GYP_LDFLAGS := $(LDFLAGS_$(BUILDTYPE))' %
QuoteSpaces(self.output_binary))
self.WriteLn('%s: LIBS := $(LIBS)' % QuoteSpaces(self.output_binary))
if self.flavor == 'mac':
self.WriteLn('%s: GYP_LIBTOOLFLAGS := $(LIBTOOLFLAGS_$(BUILDTYPE))' %
QuoteSpaces(self.output_binary))
# Postbuild actions. Like actions, but implicitly depend on the target's
# output.
postbuilds = []
if self.flavor == 'mac':
if target_postbuilds:
postbuilds.append('$(TARGET_POSTBUILDS_$(BUILDTYPE))')
postbuilds.extend(
gyp.xcode_emulation.GetSpecPostbuildCommands(spec))
if postbuilds:
# Envvars may be referenced by TARGET_POSTBUILDS_$(BUILDTYPE),
# so we must output its definition first, since we declare variables
# using ":=".
self.WriteSortedXcodeEnv(self.output, self.GetSortedXcodePostbuildEnv())
for configname in target_postbuilds:
self.WriteLn('%s: TARGET_POSTBUILDS_%s := %s' %
(QuoteSpaces(self.output),
configname,
gyp.common.EncodePOSIXShellList(target_postbuilds[configname])))
# Postbuilds expect to be run in the gyp file's directory, so insert an
# implicit postbuild to cd to there.
postbuilds.insert(0, gyp.common.EncodePOSIXShellList(['cd', self.path]))
for i in xrange(len(postbuilds)):
if not postbuilds[i].startswith('$'):
postbuilds[i] = EscapeShellArgument(postbuilds[i])
self.WriteLn('%s: builddir := $(abs_builddir)' % QuoteSpaces(self.output))
self.WriteLn('%s: POSTBUILDS := %s' % (
QuoteSpaces(self.output), ' '.join(postbuilds)))
# A bundle directory depends on its dependencies such as bundle resources
# and bundle binary. When all dependencies have been built, the bundle
# needs to be packaged.
if self.is_mac_bundle:
# If the framework doesn't contain a binary, then nothing depends
# on the actions -- make the framework depend on them directly too.
self.WriteDependencyOnExtraOutputs(self.output, extra_outputs)
# Bundle dependencies. Note that the code below adds actions to this
# target, so if you move these two lines, move the lines below as well.
self.WriteList(map(QuoteSpaces, bundle_deps), 'BUNDLE_DEPS')
self.WriteLn('%s: $(BUNDLE_DEPS)' % QuoteSpaces(self.output))
# After the framework is built, package it. Needs to happen before
# postbuilds, since postbuilds depend on this.
if self.type in ('shared_library', 'loadable_module'):
self.WriteLn('\t@$(call do_cmd,mac_package_framework,,,%s)' %
self.xcode_settings.GetFrameworkVersion())
# Bundle postbuilds can depend on the whole bundle, so run them after
# the bundle is packaged, not already after the bundle binary is done.
if postbuilds:
self.WriteLn('\t@$(call do_postbuilds)')
postbuilds = [] # Don't write postbuilds for target's output.
# Needed by test/mac/gyptest-rebuild.py.
self.WriteLn('\t@true # No-op, used by tests')
# Since this target depends on binary and resources which are in
# nested subfolders, the framework directory will be older than
# its dependencies usually. To prevent this rule from executing
# on every build (expensive, especially with postbuilds), expliclity
# update the time on the framework directory.
self.WriteLn('\t@touch -c %s' % QuoteSpaces(self.output))
if postbuilds:
assert not self.is_mac_bundle, ('Postbuilds for bundles should be done '
'on the bundle, not the binary (target \'%s\')' % self.target)
assert 'product_dir' not in spec, ('Postbuilds do not work with '
'custom product_dir')
if self.type == 'executable':
self.WriteLn('%s: LD_INPUTS := %s' % (
QuoteSpaces(self.output_binary),
' '.join(map(QuoteSpaces, link_deps))))
if self.toolset == 'host' and self.flavor == 'android':
self.WriteDoCmd([self.output_binary], link_deps, 'link_host',
part_of_all, postbuilds=postbuilds)
else:
self.WriteDoCmd([self.output_binary], link_deps, 'link', part_of_all,
postbuilds=postbuilds)
elif self.type == 'static_library':
for link_dep in link_deps:
assert ' ' not in link_dep, (
"Spaces in alink input filenames not supported (%s)" % link_dep)
if (self.flavor not in ('mac', 'openbsd', 'netbsd', 'win') and not
self.is_standalone_static_library):
self.WriteDoCmd([self.output_binary], link_deps, 'alink_thin',
part_of_all, postbuilds=postbuilds)
else:
self.WriteDoCmd([self.output_binary], link_deps, 'alink', part_of_all,
postbuilds=postbuilds)
elif self.type == 'shared_library':
self.WriteLn('%s: LD_INPUTS := %s' % (
QuoteSpaces(self.output_binary),
' '.join(map(QuoteSpaces, link_deps))))
self.WriteDoCmd([self.output_binary], link_deps, 'solink', part_of_all,
postbuilds=postbuilds)
elif self.type == 'loadable_module':
for link_dep in link_deps:
assert ' ' not in link_dep, (
"Spaces in module input filenames not supported (%s)" % link_dep)
if self.toolset == 'host' and self.flavor == 'android':
self.WriteDoCmd([self.output_binary], link_deps, 'solink_module_host',
part_of_all, postbuilds=postbuilds)
else:
self.WriteDoCmd(
[self.output_binary], link_deps, 'solink_module', part_of_all,
postbuilds=postbuilds)
elif self.type == 'none':
# Write a stamp line.
self.WriteDoCmd([self.output_binary], deps, 'touch', part_of_all,
postbuilds=postbuilds)
else:
print "WARNING: no output for", self.type, target
# Add an alias for each target (if there are any outputs).
# Installable target aliases are created below.
if ((self.output and self.output != self.target) and
(self.type not in self._INSTALLABLE_TARGETS)):
self.WriteMakeRule([self.target], [self.output],
comment='Add target alias', phony = True)
if part_of_all:
self.WriteMakeRule(['all'], [self.target],
comment = 'Add target alias to "all" target.',
phony = True)
# Add special-case rules for our installable targets.
# 1) They need to install to the build dir or "product" dir.
# 2) They get shortcuts for building (e.g. "make chrome").
# 3) They are part of "make all".
if (self.type in self._INSTALLABLE_TARGETS or
self.is_standalone_static_library):
if self.type == 'shared_library':
file_desc = 'shared library'
elif self.type == 'static_library':
file_desc = 'static library'
else:
file_desc = 'executable'
install_path = self._InstallableTargetInstallPath()
installable_deps = [self.output]
if (self.flavor == 'mac' and not 'product_dir' in spec and
self.toolset == 'target'):
# On mac, products are created in install_path immediately.
assert install_path == self.output, '%s != %s' % (
install_path, self.output)
# Point the target alias to the final binary output.
self.WriteMakeRule([self.target], [install_path],
comment='Add target alias', phony = True)
if install_path != self.output:
assert not self.is_mac_bundle # See comment a few lines above.
self.WriteDoCmd([install_path], [self.output], 'copy',
comment = 'Copy this to the %s output path.' %
file_desc, part_of_all=part_of_all)
installable_deps.append(install_path)
if self.output != self.alias and self.alias != self.target:
self.WriteMakeRule([self.alias], installable_deps,
comment = 'Short alias for building this %s.' %
file_desc, phony = True)
if part_of_all:
self.WriteMakeRule(['all'], [install_path],
comment = 'Add %s to "all" target.' % file_desc,
phony = True)
def WriteList(self, value_list, variable=None, prefix='',
quoter=QuoteIfNecessary):
"""Write a variable definition that is a list of values.
E.g. WriteList(['a','b'], 'foo', prefix='blah') writes out
foo = blaha blahb
but in a pretty-printed style.
"""
values = ''
if value_list:
value_list = [quoter(prefix + l) for l in value_list]
values = ' \\\n\t' + ' \\\n\t'.join(value_list)
self.fp.write('%s :=%s\n\n' % (variable, values))
def WriteDoCmd(self, outputs, inputs, command, part_of_all, comment=None,
postbuilds=False):
"""Write a Makefile rule that uses do_cmd.
This makes the outputs dependent on the command line that was run,
as well as support the V= make command line flag.
"""
suffix = ''
if postbuilds:
assert ',' not in command
suffix = ',,1' # Tell do_cmd to honor $POSTBUILDS
self.WriteMakeRule(outputs, inputs,
actions = ['$(call do_cmd,%s%s)' % (command, suffix)],
comment = comment,
command = command,
force = True)
# Add our outputs to the list of targets we read depfiles from.
# all_deps is only used for deps file reading, and for deps files we replace
# spaces with ? because escaping doesn't work with make's $(sort) and
# other functions.
outputs = [QuoteSpaces(o, SPACE_REPLACEMENT) for o in outputs]
self.WriteLn('all_deps += %s' % ' '.join(outputs))
def WriteMakeRule(self, outputs, inputs, actions=None, comment=None,
order_only=False, force=False, phony=False, command=None):
"""Write a Makefile rule, with some extra tricks.
outputs: a list of outputs for the rule (note: this is not directly
supported by make; see comments below)
inputs: a list of inputs for the rule
actions: a list of shell commands to run for the rule
comment: a comment to put in the Makefile above the rule (also useful
for making this Python script's code self-documenting)
order_only: if true, makes the dependency order-only
force: if true, include FORCE_DO_CMD as an order-only dep
phony: if true, the rule does not actually generate the named output, the
output is just a name to run the rule
command: (optional) command name to generate unambiguous labels
"""
outputs = map(QuoteSpaces, outputs)
inputs = map(QuoteSpaces, inputs)
if comment:
self.WriteLn('# ' + comment)
if phony:
self.WriteLn('.PHONY: ' + ' '.join(outputs))
if actions:
self.WriteLn("%s: TOOLSET := $(TOOLSET)" % outputs[0])
force_append = ' FORCE_DO_CMD' if force else ''
if order_only:
# Order only rule: Just write a simple rule.
# TODO(evanm): just make order_only a list of deps instead of this hack.
self.WriteLn('%s: | %s%s' %
(' '.join(outputs), ' '.join(inputs), force_append))
elif len(outputs) == 1:
# Regular rule, one output: Just write a simple rule.
self.WriteLn('%s: %s%s' % (outputs[0], ' '.join(inputs), force_append))
else:
# Regular rule, more than one output: Multiple outputs are tricky in
# make. We will write three rules:
# - All outputs depend on an intermediate file.
# - Make .INTERMEDIATE depend on the intermediate.
# - The intermediate file depends on the inputs and executes the
# actual command.
# - The intermediate recipe will 'touch' the intermediate file.
# - The multi-output rule will have an do-nothing recipe.
intermediate = "%s.intermediate" % (command if command else self.target)
self.WriteLn('%s: %s' % (' '.join(outputs), intermediate))
self.WriteLn('\t%s' % '@:');
self.WriteLn('%s: %s' % ('.INTERMEDIATE', intermediate))
self.WriteLn('%s: %s%s' %
(intermediate, ' '.join(inputs), force_append))
actions.insert(0, '$(call do_cmd,touch)')
if actions:
for action in actions:
self.WriteLn('\t%s' % action)
self.WriteLn()
def WriteAndroidNdkModuleRule(self, module_name, all_sources, link_deps):
"""Write a set of LOCAL_XXX definitions for Android NDK.
These variable definitions will be used by Android NDK but do nothing for
non-Android applications.
Arguments:
module_name: Android NDK module name, which must be unique among all
module names.
all_sources: A list of source files (will be filtered by Compilable).
link_deps: A list of link dependencies, which must be sorted in
the order from dependencies to dependents.
"""
if self.type not in ('executable', 'shared_library', 'static_library'):
return
self.WriteLn('# Variable definitions for Android applications')
self.WriteLn('include $(CLEAR_VARS)')
self.WriteLn('LOCAL_MODULE := ' + module_name)
self.WriteLn('LOCAL_CFLAGS := $(CFLAGS_$(BUILDTYPE)) '
'$(DEFS_$(BUILDTYPE)) '
# LOCAL_CFLAGS is applied to both of C and C++. There is
# no way to specify $(CFLAGS_C_$(BUILDTYPE)) only for C
# sources.
'$(CFLAGS_C_$(BUILDTYPE)) '
# $(INCS_$(BUILDTYPE)) includes the prefix '-I' while
# LOCAL_C_INCLUDES does not expect it. So put it in
# LOCAL_CFLAGS.
'$(INCS_$(BUILDTYPE))')
# LOCAL_CXXFLAGS is obsolete and LOCAL_CPPFLAGS is preferred.
self.WriteLn('LOCAL_CPPFLAGS := $(CFLAGS_CC_$(BUILDTYPE))')
self.WriteLn('LOCAL_C_INCLUDES :=')
self.WriteLn('LOCAL_LDLIBS := $(LDFLAGS_$(BUILDTYPE)) $(LIBS)')
# Detect the C++ extension.
cpp_ext = {'.cc': 0, '.cpp': 0, '.cxx': 0}
default_cpp_ext = '.cpp'
for filename in all_sources:
ext = os.path.splitext(filename)[1]
if ext in cpp_ext:
cpp_ext[ext] += 1
if cpp_ext[ext] > cpp_ext[default_cpp_ext]:
default_cpp_ext = ext
self.WriteLn('LOCAL_CPP_EXTENSION := ' + default_cpp_ext)
self.WriteList(map(self.Absolutify, filter(Compilable, all_sources)),
'LOCAL_SRC_FILES')
# Filter out those which do not match prefix and suffix and produce
# the resulting list without prefix and suffix.
def DepsToModules(deps, prefix, suffix):
modules = []
for filepath in deps:
filename = os.path.basename(filepath)
if filename.startswith(prefix) and filename.endswith(suffix):
modules.append(filename[len(prefix):-len(suffix)])
return modules
# Retrieve the default value of 'SHARED_LIB_SUFFIX'
params = {'flavor': 'linux'}
default_variables = {}
CalculateVariables(default_variables, params)
self.WriteList(
DepsToModules(link_deps,
generator_default_variables['SHARED_LIB_PREFIX'],
default_variables['SHARED_LIB_SUFFIX']),
'LOCAL_SHARED_LIBRARIES')
self.WriteList(
DepsToModules(link_deps,
generator_default_variables['STATIC_LIB_PREFIX'],
generator_default_variables['STATIC_LIB_SUFFIX']),
'LOCAL_STATIC_LIBRARIES')
if self.type == 'executable':
self.WriteLn('include $(BUILD_EXECUTABLE)')
elif self.type == 'shared_library':
self.WriteLn('include $(BUILD_SHARED_LIBRARY)')
elif self.type == 'static_library':
self.WriteLn('include $(BUILD_STATIC_LIBRARY)')
self.WriteLn()
def WriteLn(self, text=''):
self.fp.write(text + '\n')
def GetSortedXcodeEnv(self, additional_settings=None):
return gyp.xcode_emulation.GetSortedXcodeEnv(
self.xcode_settings, "$(abs_builddir)",
os.path.join("$(abs_srcdir)", self.path), "$(BUILDTYPE)",
additional_settings)
def GetSortedXcodePostbuildEnv(self):
# CHROMIUM_STRIP_SAVE_FILE is a chromium-specific hack.
# TODO(thakis): It would be nice to have some general mechanism instead.
strip_save_file = self.xcode_settings.GetPerTargetSetting(
'CHROMIUM_STRIP_SAVE_FILE', '')
# Even if strip_save_file is empty, explicitly write it. Else a postbuild
# might pick up an export from an earlier target.
return self.GetSortedXcodeEnv(
additional_settings={'CHROMIUM_STRIP_SAVE_FILE': strip_save_file})
def WriteSortedXcodeEnv(self, target, env):
for k, v in env:
# For
# foo := a\ b
# the escaped space does the right thing. For
# export foo := a\ b
# it does not -- the backslash is written to the env as literal character.
# So don't escape spaces in |env[k]|.
self.WriteLn('%s: export %s := %s' % (QuoteSpaces(target), k, v))
def Objectify(self, path):
"""Convert a path to its output directory form."""
if '$(' in path:
path = path.replace('$(obj)/', '$(obj).%s/$(TARGET)/' % self.toolset)
if not '$(obj)' in path:
path = '$(obj).%s/$(TARGET)/%s' % (self.toolset, path)
return path
def Pchify(self, path, lang):
"""Convert a prefix header path to its output directory form."""
path = self.Absolutify(path)
if '$(' in path:
path = path.replace('$(obj)/', '$(obj).%s/$(TARGET)/pch-%s' %
(self.toolset, lang))
return path
return '$(obj).%s/$(TARGET)/pch-%s/%s' % (self.toolset, lang, path)
def Absolutify(self, path):
"""Convert a subdirectory-relative path into a base-relative path.
Skips over paths that contain variables."""
if '$(' in path:
# Don't call normpath in this case, as it might collapse the
# path too aggressively if it features '..'. However it's still
# important to strip trailing slashes.
return path.rstrip('/')
return os.path.normpath(os.path.join(self.path, path))
def ExpandInputRoot(self, template, expansion, dirname):
if '%(INPUT_ROOT)s' not in template and '%(INPUT_DIRNAME)s' not in template:
return template
path = template % {
'INPUT_ROOT': expansion,
'INPUT_DIRNAME': dirname,
}
return path
def _InstallableTargetInstallPath(self):
"""Returns the location of the final output for an installable target."""
# Xcode puts shared_library results into PRODUCT_DIR, and some gyp files
# rely on this. Emulate this behavior for mac.
# XXX(TooTallNate): disabling this code since we don't want this behavior...
#if (self.type == 'shared_library' and
# (self.flavor != 'mac' or self.toolset != 'target')):
# # Install all shared libs into a common directory (per toolset) for
# # convenient access with LD_LIBRARY_PATH.
# return '$(builddir)/lib.%s/%s' % (self.toolset, self.alias)
return '$(builddir)/' + self.alias
def WriteAutoRegenerationRule(params, root_makefile, makefile_name,
build_files):
"""Write the target to regenerate the Makefile."""
options = params['options']
build_files_args = [gyp.common.RelativePath(filename, options.toplevel_dir)
for filename in params['build_files_arg']]
gyp_binary = gyp.common.FixIfRelativePath(params['gyp_binary'],
options.toplevel_dir)
if not gyp_binary.startswith(os.sep):
gyp_binary = os.path.join('.', gyp_binary)
root_makefile.write(
"quiet_cmd_regen_makefile = ACTION Regenerating $@\n"
"cmd_regen_makefile = cd $(srcdir); %(cmd)s\n"
"%(makefile_name)s: %(deps)s\n"
"\t$(call do_cmd,regen_makefile)\n\n" % {
'makefile_name': makefile_name,
'deps': ' '.join(map(Sourceify, build_files)),
'cmd': gyp.common.EncodePOSIXShellList(
[gyp_binary, '-fmake'] +
gyp.RegenerateFlags(options) +
build_files_args)})
def PerformBuild(data, configurations, params):
options = params['options']
for config in configurations:
arguments = ['make']
if options.toplevel_dir and options.toplevel_dir != '.':
arguments += '-C', options.toplevel_dir
arguments.append('BUILDTYPE=' + config)
print 'Building [%s]: %s' % (config, arguments)
subprocess.check_call(arguments)
def GenerateOutput(target_list, target_dicts, data, params):
options = params['options']
flavor = gyp.common.GetFlavor(params)
generator_flags = params.get('generator_flags', {})
builddir_name = generator_flags.get('output_dir', 'out')
android_ndk_version = generator_flags.get('android_ndk_version', None)
default_target = generator_flags.get('default_target', 'all')
def CalculateMakefilePath(build_file, base_name):
"""Determine where to write a Makefile for a given gyp file."""
# Paths in gyp files are relative to the .gyp file, but we want
# paths relative to the source root for the master makefile. Grab
# the path of the .gyp file as the base to relativize against.
# E.g. "foo/bar" when we're constructing targets for "foo/bar/baz.gyp".
base_path = gyp.common.RelativePath(os.path.dirname(build_file),
options.depth)
# We write the file in the base_path directory.
output_file = os.path.join(options.depth, base_path, base_name)
if options.generator_output:
output_file = os.path.join(
options.depth, options.generator_output, base_path, base_name)
base_path = gyp.common.RelativePath(os.path.dirname(build_file),
options.toplevel_dir)
return base_path, output_file
# TODO: search for the first non-'Default' target. This can go
# away when we add verification that all targets have the
# necessary configurations.
default_configuration = None
toolsets = set([target_dicts[target]['toolset'] for target in target_list])
for target in target_list:
spec = target_dicts[target]
if spec['default_configuration'] != 'Default':
default_configuration = spec['default_configuration']
break
if not default_configuration:
default_configuration = 'Default'
srcdir = '.'
makefile_name = 'Makefile' + options.suffix
makefile_path = os.path.join(options.toplevel_dir, makefile_name)
if options.generator_output:
global srcdir_prefix
makefile_path = os.path.join(
options.toplevel_dir, options.generator_output, makefile_name)
srcdir = gyp.common.RelativePath(srcdir, options.generator_output)
srcdir_prefix = '$(srcdir)/'
flock_command= 'flock'
copy_archive_arguments = '-af'
header_params = {
'default_target': default_target,
'builddir': builddir_name,
'default_configuration': default_configuration,
'flock': flock_command,
'flock_index': 1,
'link_commands': LINK_COMMANDS_LINUX,
'extra_commands': '',
'srcdir': srcdir,
'copy_archive_args': copy_archive_arguments,
}
if flavor == 'mac':
flock_command = './gyp-mac-tool flock'
header_params.update({
'flock': flock_command,
'flock_index': 2,
'link_commands': LINK_COMMANDS_MAC,
'extra_commands': SHARED_HEADER_MAC_COMMANDS,
})
elif flavor == 'android':
header_params.update({
'link_commands': LINK_COMMANDS_ANDROID,
})
elif flavor == 'solaris':
header_params.update({
'flock': './gyp-flock-tool flock',
'flock_index': 2,
})
elif flavor == 'freebsd':
# Note: OpenBSD has sysutils/flock. lockf seems to be FreeBSD specific.
header_params.update({
'flock': 'lockf',
})
elif flavor == 'openbsd':
copy_archive_arguments = '-pPRf'
header_params.update({
'copy_archive_args': copy_archive_arguments,
})
elif flavor == 'aix':
copy_archive_arguments = '-pPRf'
header_params.update({
'copy_archive_args': copy_archive_arguments,
'link_commands': LINK_COMMANDS_AIX,
'flock': './gyp-flock-tool flock',
'flock_index': 2,
})
header_params.update({
'CC.target': GetEnvironFallback(('CC_target', 'CC'), '$(CC)'),
'AR.target': GetEnvironFallback(('AR_target', 'AR'), '$(AR)'),
'CXX.target': GetEnvironFallback(('CXX_target', 'CXX'), '$(CXX)'),
'LINK.target': GetEnvironFallback(('LINK_target', 'LINK'), '$(LINK)'),
'CC.host': GetEnvironFallback(('CC_host', 'CC'), 'gcc'),
'AR.host': GetEnvironFallback(('AR_host', 'AR'), 'ar'),
'CXX.host': GetEnvironFallback(('CXX_host', 'CXX'), 'g++'),
'LINK.host': GetEnvironFallback(('LINK_host', 'LINK'), '$(CXX.host)'),
})
build_file, _, _ = gyp.common.ParseQualifiedTarget(target_list[0])
make_global_settings_array = data[build_file].get('make_global_settings', [])
wrappers = {}
for key, value in make_global_settings_array:
if key.endswith('_wrapper'):
wrappers[key[:-len('_wrapper')]] = '$(abspath %s)' % value
make_global_settings = ''
for key, value in make_global_settings_array:
if re.match('.*_wrapper', key):
continue
if value[0] != '$':
value = '$(abspath %s)' % value
wrapper = wrappers.get(key)
if wrapper:
value = '%s %s' % (wrapper, value)
del wrappers[key]
if key in ('CC', 'CC.host', 'CXX', 'CXX.host'):
make_global_settings += (
'ifneq (,$(filter $(origin %s), undefined default))\n' % key)
# Let gyp-time envvars win over global settings.
env_key = key.replace('.', '_') # CC.host -> CC_host
if env_key in os.environ:
value = os.environ[env_key]
make_global_settings += ' %s = %s\n' % (key, value)
make_global_settings += 'endif\n'
else:
make_global_settings += '%s ?= %s\n' % (key, value)
# TODO(ukai): define cmd when only wrapper is specified in
# make_global_settings.
header_params['make_global_settings'] = make_global_settings
gyp.common.EnsureDirExists(makefile_path)
root_makefile = open(makefile_path, 'w')
root_makefile.write(SHARED_HEADER % header_params)
# Currently any versions have the same effect, but in future the behavior
# could be different.
if android_ndk_version:
root_makefile.write(
'# Define LOCAL_PATH for build of Android applications.\n'
'LOCAL_PATH := $(call my-dir)\n'
'\n')
for toolset in toolsets:
root_makefile.write('TOOLSET := %s\n' % toolset)
WriteRootHeaderSuffixRules(root_makefile)
# Put build-time support tools next to the root Makefile.
dest_path = os.path.dirname(makefile_path)
gyp.common.CopyTool(flavor, dest_path)
# Find the list of targets that derive from the gyp file(s) being built.
needed_targets = set()
for build_file in params['build_files']:
for target in gyp.common.AllTargets(target_list, target_dicts, build_file):
needed_targets.add(target)
build_files = set()
include_list = set()
for qualified_target in target_list:
build_file, target, toolset = gyp.common.ParseQualifiedTarget(
qualified_target)
this_make_global_settings = data[build_file].get('make_global_settings', [])
assert make_global_settings_array == this_make_global_settings, (
"make_global_settings needs to be the same for all targets. %s vs. %s" %
(this_make_global_settings, make_global_settings))
build_files.add(gyp.common.RelativePath(build_file, options.toplevel_dir))
included_files = data[build_file]['included_files']
for included_file in included_files:
# The included_files entries are relative to the dir of the build file
# that included them, so we have to undo that and then make them relative
# to the root dir.
relative_include_file = gyp.common.RelativePath(
gyp.common.UnrelativePath(included_file, build_file),
options.toplevel_dir)
abs_include_file = os.path.abspath(relative_include_file)
# If the include file is from the ~/.gyp dir, we should use absolute path
# so that relocating the src dir doesn't break the path.
if (params['home_dot_gyp'] and
abs_include_file.startswith(params['home_dot_gyp'])):
build_files.add(abs_include_file)
else:
build_files.add(relative_include_file)
base_path, output_file = CalculateMakefilePath(build_file,
target + '.' + toolset + options.suffix + '.mk')
spec = target_dicts[qualified_target]
configs = spec['configurations']
if flavor == 'mac':
gyp.xcode_emulation.MergeGlobalXcodeSettingsToSpec(data[build_file], spec)
writer = MakefileWriter(generator_flags, flavor)
writer.Write(qualified_target, base_path, output_file, spec, configs,
part_of_all=qualified_target in needed_targets)
# Our root_makefile lives at the source root. Compute the relative path
# from there to the output_file for including.
mkfile_rel_path = gyp.common.RelativePath(output_file,
os.path.dirname(makefile_path))
include_list.add(mkfile_rel_path)
# Write out per-gyp (sub-project) Makefiles.
depth_rel_path = gyp.common.RelativePath(options.depth, os.getcwd())
for build_file in build_files:
# The paths in build_files were relativized above, so undo that before
# testing against the non-relativized items in target_list and before
# calculating the Makefile path.
build_file = os.path.join(depth_rel_path, build_file)
gyp_targets = [target_dicts[target]['target_name'] for target in target_list
if target.startswith(build_file) and
target in needed_targets]
# Only generate Makefiles for gyp files with targets.
if not gyp_targets:
continue
base_path, output_file = CalculateMakefilePath(build_file,
os.path.splitext(os.path.basename(build_file))[0] + '.Makefile')
makefile_rel_path = gyp.common.RelativePath(os.path.dirname(makefile_path),
os.path.dirname(output_file))
writer.WriteSubMake(output_file, makefile_rel_path, gyp_targets,
builddir_name)
# Write out the sorted list of includes.
root_makefile.write('\n')
for include_file in sorted(include_list):
# We wrap each .mk include in an if statement so users can tell make to
# not load a file by setting NO_LOAD. The below make code says, only
# load the .mk file if the .mk filename doesn't start with a token in
# NO_LOAD.
root_makefile.write(
"ifeq ($(strip $(foreach prefix,$(NO_LOAD),\\\n"
" $(findstring $(join ^,$(prefix)),\\\n"
" $(join ^," + include_file + ")))),)\n")
root_makefile.write(" include " + include_file + "\n")
root_makefile.write("endif\n")
root_makefile.write('\n')
if (not generator_flags.get('standalone')
and generator_flags.get('auto_regeneration', True)):
WriteAutoRegenerationRule(params, root_makefile, makefile_name, build_files)
root_makefile.write(SHARED_FOOTER)
root_makefile.close()
| mit |
ryfeus/lambda-packs | Tensorflow_Pandas_Numpy/source3.6/tensorflow/contrib/session_bundle/manifest_pb2.py | 2 | 18712 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: tensorflow/contrib/session_bundle/manifest.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='tensorflow/contrib/session_bundle/manifest.proto',
package='tensorflow.serving',
syntax='proto3',
serialized_pb=_b('\n0tensorflow/contrib/session_bundle/manifest.proto\x12\x12tensorflow.serving\"\xec\x01\n\nSignatures\x12\x38\n\x11\x64\x65\x66\x61ult_signature\x18\x01 \x01(\x0b\x32\x1d.tensorflow.serving.Signature\x12M\n\x10named_signatures\x18\x02 \x03(\x0b\x32\x33.tensorflow.serving.Signatures.NamedSignaturesEntry\x1aU\n\x14NamedSignaturesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12,\n\x05value\x18\x02 \x01(\x0b\x32\x1d.tensorflow.serving.Signature:\x02\x38\x01\"$\n\rTensorBinding\x12\x13\n\x0btensor_name\x18\x01 \x01(\t\"X\n\tAssetFile\x12\x39\n\x0etensor_binding\x18\x01 \x01(\x0b\x32!.tensorflow.serving.TensorBinding\x12\x10\n\x08\x66ilename\x18\x02 \x01(\t\"\xf0\x01\n\tSignature\x12G\n\x14regression_signature\x18\x01 \x01(\x0b\x32\'.tensorflow.serving.RegressionSignatureH\x00\x12O\n\x18\x63lassification_signature\x18\x02 \x01(\x0b\x32+.tensorflow.serving.ClassificationSignatureH\x00\x12\x41\n\x11generic_signature\x18\x03 \x01(\x0b\x32$.tensorflow.serving.GenericSignatureH\x00\x42\x06\n\x04type\"z\n\x13RegressionSignature\x12\x30\n\x05input\x18\x01 \x01(\x0b\x32!.tensorflow.serving.TensorBinding\x12\x31\n\x06output\x18\x02 \x01(\x0b\x32!.tensorflow.serving.TensorBinding\"\xb2\x01\n\x17\x43lassificationSignature\x12\x30\n\x05input\x18\x01 \x01(\x0b\x32!.tensorflow.serving.TensorBinding\x12\x32\n\x07\x63lasses\x18\x02 \x01(\x0b\x32!.tensorflow.serving.TensorBinding\x12\x31\n\x06scores\x18\x03 \x01(\x0b\x32!.tensorflow.serving.TensorBinding\"\x9d\x01\n\x10GenericSignature\x12:\n\x03map\x18\x01 \x03(\x0b\x32-.tensorflow.serving.GenericSignature.MapEntry\x1aM\n\x08MapEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x30\n\x05value\x18\x02 \x01(\x0b\x32!.tensorflow.serving.TensorBinding:\x02\x38\x01\x62\x06proto3')
)
_SIGNATURES_NAMEDSIGNATURESENTRY = _descriptor.Descriptor(
name='NamedSignaturesEntry',
full_name='tensorflow.serving.Signatures.NamedSignaturesEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='tensorflow.serving.Signatures.NamedSignaturesEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='value', full_name='tensorflow.serving.Signatures.NamedSignaturesEntry.value', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=224,
serialized_end=309,
)
_SIGNATURES = _descriptor.Descriptor(
name='Signatures',
full_name='tensorflow.serving.Signatures',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='default_signature', full_name='tensorflow.serving.Signatures.default_signature', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='named_signatures', full_name='tensorflow.serving.Signatures.named_signatures', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_SIGNATURES_NAMEDSIGNATURESENTRY, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=73,
serialized_end=309,
)
_TENSORBINDING = _descriptor.Descriptor(
name='TensorBinding',
full_name='tensorflow.serving.TensorBinding',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='tensor_name', full_name='tensorflow.serving.TensorBinding.tensor_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=311,
serialized_end=347,
)
_ASSETFILE = _descriptor.Descriptor(
name='AssetFile',
full_name='tensorflow.serving.AssetFile',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='tensor_binding', full_name='tensorflow.serving.AssetFile.tensor_binding', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='filename', full_name='tensorflow.serving.AssetFile.filename', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=349,
serialized_end=437,
)
_SIGNATURE = _descriptor.Descriptor(
name='Signature',
full_name='tensorflow.serving.Signature',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='regression_signature', full_name='tensorflow.serving.Signature.regression_signature', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='classification_signature', full_name='tensorflow.serving.Signature.classification_signature', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='generic_signature', full_name='tensorflow.serving.Signature.generic_signature', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='type', full_name='tensorflow.serving.Signature.type',
index=0, containing_type=None, fields=[]),
],
serialized_start=440,
serialized_end=680,
)
_REGRESSIONSIGNATURE = _descriptor.Descriptor(
name='RegressionSignature',
full_name='tensorflow.serving.RegressionSignature',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='input', full_name='tensorflow.serving.RegressionSignature.input', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='output', full_name='tensorflow.serving.RegressionSignature.output', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=682,
serialized_end=804,
)
_CLASSIFICATIONSIGNATURE = _descriptor.Descriptor(
name='ClassificationSignature',
full_name='tensorflow.serving.ClassificationSignature',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='input', full_name='tensorflow.serving.ClassificationSignature.input', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='classes', full_name='tensorflow.serving.ClassificationSignature.classes', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='scores', full_name='tensorflow.serving.ClassificationSignature.scores', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=807,
serialized_end=985,
)
_GENERICSIGNATURE_MAPENTRY = _descriptor.Descriptor(
name='MapEntry',
full_name='tensorflow.serving.GenericSignature.MapEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='tensorflow.serving.GenericSignature.MapEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='value', full_name='tensorflow.serving.GenericSignature.MapEntry.value', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1068,
serialized_end=1145,
)
_GENERICSIGNATURE = _descriptor.Descriptor(
name='GenericSignature',
full_name='tensorflow.serving.GenericSignature',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='map', full_name='tensorflow.serving.GenericSignature.map', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_GENERICSIGNATURE_MAPENTRY, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=988,
serialized_end=1145,
)
_SIGNATURES_NAMEDSIGNATURESENTRY.fields_by_name['value'].message_type = _SIGNATURE
_SIGNATURES_NAMEDSIGNATURESENTRY.containing_type = _SIGNATURES
_SIGNATURES.fields_by_name['default_signature'].message_type = _SIGNATURE
_SIGNATURES.fields_by_name['named_signatures'].message_type = _SIGNATURES_NAMEDSIGNATURESENTRY
_ASSETFILE.fields_by_name['tensor_binding'].message_type = _TENSORBINDING
_SIGNATURE.fields_by_name['regression_signature'].message_type = _REGRESSIONSIGNATURE
_SIGNATURE.fields_by_name['classification_signature'].message_type = _CLASSIFICATIONSIGNATURE
_SIGNATURE.fields_by_name['generic_signature'].message_type = _GENERICSIGNATURE
_SIGNATURE.oneofs_by_name['type'].fields.append(
_SIGNATURE.fields_by_name['regression_signature'])
_SIGNATURE.fields_by_name['regression_signature'].containing_oneof = _SIGNATURE.oneofs_by_name['type']
_SIGNATURE.oneofs_by_name['type'].fields.append(
_SIGNATURE.fields_by_name['classification_signature'])
_SIGNATURE.fields_by_name['classification_signature'].containing_oneof = _SIGNATURE.oneofs_by_name['type']
_SIGNATURE.oneofs_by_name['type'].fields.append(
_SIGNATURE.fields_by_name['generic_signature'])
_SIGNATURE.fields_by_name['generic_signature'].containing_oneof = _SIGNATURE.oneofs_by_name['type']
_REGRESSIONSIGNATURE.fields_by_name['input'].message_type = _TENSORBINDING
_REGRESSIONSIGNATURE.fields_by_name['output'].message_type = _TENSORBINDING
_CLASSIFICATIONSIGNATURE.fields_by_name['input'].message_type = _TENSORBINDING
_CLASSIFICATIONSIGNATURE.fields_by_name['classes'].message_type = _TENSORBINDING
_CLASSIFICATIONSIGNATURE.fields_by_name['scores'].message_type = _TENSORBINDING
_GENERICSIGNATURE_MAPENTRY.fields_by_name['value'].message_type = _TENSORBINDING
_GENERICSIGNATURE_MAPENTRY.containing_type = _GENERICSIGNATURE
_GENERICSIGNATURE.fields_by_name['map'].message_type = _GENERICSIGNATURE_MAPENTRY
DESCRIPTOR.message_types_by_name['Signatures'] = _SIGNATURES
DESCRIPTOR.message_types_by_name['TensorBinding'] = _TENSORBINDING
DESCRIPTOR.message_types_by_name['AssetFile'] = _ASSETFILE
DESCRIPTOR.message_types_by_name['Signature'] = _SIGNATURE
DESCRIPTOR.message_types_by_name['RegressionSignature'] = _REGRESSIONSIGNATURE
DESCRIPTOR.message_types_by_name['ClassificationSignature'] = _CLASSIFICATIONSIGNATURE
DESCRIPTOR.message_types_by_name['GenericSignature'] = _GENERICSIGNATURE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Signatures = _reflection.GeneratedProtocolMessageType('Signatures', (_message.Message,), dict(
NamedSignaturesEntry = _reflection.GeneratedProtocolMessageType('NamedSignaturesEntry', (_message.Message,), dict(
DESCRIPTOR = _SIGNATURES_NAMEDSIGNATURESENTRY,
__module__ = 'tensorflow.contrib.session_bundle.manifest_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.serving.Signatures.NamedSignaturesEntry)
))
,
DESCRIPTOR = _SIGNATURES,
__module__ = 'tensorflow.contrib.session_bundle.manifest_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.serving.Signatures)
))
_sym_db.RegisterMessage(Signatures)
_sym_db.RegisterMessage(Signatures.NamedSignaturesEntry)
TensorBinding = _reflection.GeneratedProtocolMessageType('TensorBinding', (_message.Message,), dict(
DESCRIPTOR = _TENSORBINDING,
__module__ = 'tensorflow.contrib.session_bundle.manifest_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.serving.TensorBinding)
))
_sym_db.RegisterMessage(TensorBinding)
AssetFile = _reflection.GeneratedProtocolMessageType('AssetFile', (_message.Message,), dict(
DESCRIPTOR = _ASSETFILE,
__module__ = 'tensorflow.contrib.session_bundle.manifest_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.serving.AssetFile)
))
_sym_db.RegisterMessage(AssetFile)
Signature = _reflection.GeneratedProtocolMessageType('Signature', (_message.Message,), dict(
DESCRIPTOR = _SIGNATURE,
__module__ = 'tensorflow.contrib.session_bundle.manifest_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.serving.Signature)
))
_sym_db.RegisterMessage(Signature)
RegressionSignature = _reflection.GeneratedProtocolMessageType('RegressionSignature', (_message.Message,), dict(
DESCRIPTOR = _REGRESSIONSIGNATURE,
__module__ = 'tensorflow.contrib.session_bundle.manifest_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.serving.RegressionSignature)
))
_sym_db.RegisterMessage(RegressionSignature)
ClassificationSignature = _reflection.GeneratedProtocolMessageType('ClassificationSignature', (_message.Message,), dict(
DESCRIPTOR = _CLASSIFICATIONSIGNATURE,
__module__ = 'tensorflow.contrib.session_bundle.manifest_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.serving.ClassificationSignature)
))
_sym_db.RegisterMessage(ClassificationSignature)
GenericSignature = _reflection.GeneratedProtocolMessageType('GenericSignature', (_message.Message,), dict(
MapEntry = _reflection.GeneratedProtocolMessageType('MapEntry', (_message.Message,), dict(
DESCRIPTOR = _GENERICSIGNATURE_MAPENTRY,
__module__ = 'tensorflow.contrib.session_bundle.manifest_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.serving.GenericSignature.MapEntry)
))
,
DESCRIPTOR = _GENERICSIGNATURE,
__module__ = 'tensorflow.contrib.session_bundle.manifest_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.serving.GenericSignature)
))
_sym_db.RegisterMessage(GenericSignature)
_sym_db.RegisterMessage(GenericSignature.MapEntry)
_SIGNATURES_NAMEDSIGNATURESENTRY.has_options = True
_SIGNATURES_NAMEDSIGNATURESENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
_GENERICSIGNATURE_MAPENTRY.has_options = True
_GENERICSIGNATURE_MAPENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
# @@protoc_insertion_point(module_scope)
| mit |
ClearCorp/odoo-clearcorp | TODO-7.0/sneldev_magento/wizard/sneldev_magento_categories_import.py | 4 | 1660 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import pooler
import os
from export_tools import *
from osv import osv, fields
class wiz_sneldev_categories_import(osv.osv_memory):
_name = 'sneldev.categories.import'
_description = 'Import categories'
_columns = {
}
_defaults = {
}
def do_categories_import(self, cr, uid, ids, context=None):
if (self.pool.get('sneldev.magento').import_categories(cr, uid) < 0):
raise osv.except_osv(('Warning'), ('Import failed, please refer to log file for failure details.'))
return {'type': 'ir.actions.act_window_close'}
wiz_sneldev_categories_import()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
takeshineshiro/neutron | neutron/db/migration/alembic_migrations/versions/liberty/expand/1c844d1677f7_dns_nameservers_order.py | 36 | 1056 | # Copyright 2015 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""add order to dnsnameservers
Revision ID: 1c844d1677f7
Revises: 26c371498592
Create Date: 2015-07-21 22:59:03.383850
"""
# revision identifiers, used by Alembic.
revision = '1c844d1677f7'
down_revision = '26c371498592'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('dnsnameservers',
sa.Column('order', sa.Integer(),
server_default='0', nullable=False))
| apache-2.0 |
mgautierfr/devparrot | devparrot/commands/split.py | 1 | 1213 | # This file is part of DevParrot.
#
# Author: Matthieu Gautier <matthieu.gautier@devparrot.org>
#
# DevParrot is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DevParrot is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with DevParrot. If not, see <http://www.gnu.org/licenses/>.
#
#
# Copyright 2011-2013 Matthieu Gautier
from devparrot.core.command import Alias
from devparrot.core.constraints import Boolean
@Alias(
vertical = Boolean(default= lambda : False, help="Do we split vertically")
)
def split(vertical):
"""Split workspace at the current view position and create a new pane"""
return "core.split %s"%vertical
@Alias()
def unsplit():
"""unsplit (merge) two separate panes"""
return "core.unsplit"
| gpl-3.0 |
Endika/django | django/utils/text.py | 11 | 14624 | from __future__ import unicode_literals
import re
import unicodedata
from gzip import GzipFile
from io import BytesIO
from django.utils import six
from django.utils.encoding import force_text
from django.utils.functional import SimpleLazyObject, keep_lazy, keep_lazy_text
from django.utils.safestring import SafeText, mark_safe
from django.utils.six.moves import html_entities
from django.utils.translation import pgettext, ugettext as _, ugettext_lazy
if six.PY2:
# Import force_unicode even though this module doesn't use it, because some
# people rely on it being here.
from django.utils.encoding import force_unicode # NOQA
# Capitalizes the first letter of a string.
def capfirst(x):
return x and force_text(x)[0].upper() + force_text(x)[1:]
capfirst = keep_lazy_text(capfirst)
# Set up regular expressions
re_words = re.compile(r'<.*?>|((?:\w[-\w]*|&.*?;)+)', re.U | re.S)
re_chars = re.compile(r'<.*?>|(.)', re.U | re.S)
re_tag = re.compile(r'<(/)?([^ ]+?)(?:(\s*/)| .*?)?>', re.S)
re_newlines = re.compile(r'\r\n|\r') # Used in normalize_newlines
re_camel_case = re.compile(r'(((?<=[a-z])[A-Z])|([A-Z](?![A-Z]|$)))')
@keep_lazy_text
def wrap(text, width):
"""
A word-wrap function that preserves existing line breaks. Expects that
existing line breaks are posix newlines.
All white space is preserved except added line breaks consume the space on
which they break the line.
Long words are not wrapped, so the output text may have lines longer than
``width``.
"""
text = force_text(text)
def _generator():
for line in text.splitlines(True): # True keeps trailing linebreaks
max_width = min((line.endswith('\n') and width + 1 or width), width)
while len(line) > max_width:
space = line[:max_width + 1].rfind(' ') + 1
if space == 0:
space = line.find(' ') + 1
if space == 0:
yield line
line = ''
break
yield '%s\n' % line[:space - 1]
line = line[space:]
max_width = min((line.endswith('\n') and width + 1 or width), width)
if line:
yield line
return ''.join(_generator())
class Truncator(SimpleLazyObject):
"""
An object used to truncate text, either by characters or words.
"""
def __init__(self, text):
super(Truncator, self).__init__(lambda: force_text(text))
def add_truncation_text(self, text, truncate=None):
if truncate is None:
truncate = pgettext(
'String to return when truncating text',
'%(truncated_text)s...')
truncate = force_text(truncate)
if '%(truncated_text)s' in truncate:
return truncate % {'truncated_text': text}
# The truncation text didn't contain the %(truncated_text)s string
# replacement argument so just append it to the text.
if text.endswith(truncate):
# But don't append the truncation text if the current text already
# ends in this.
return text
return '%s%s' % (text, truncate)
def chars(self, num, truncate=None, html=False):
"""
Returns the text truncated to be no longer than the specified number
of characters.
Takes an optional argument of what should be used to notify that the
string has been truncated, defaulting to a translatable string of an
ellipsis (...).
"""
self._setup()
length = int(num)
text = unicodedata.normalize('NFC', self._wrapped)
# Calculate the length to truncate to (max length - end_text length)
truncate_len = length
for char in self.add_truncation_text('', truncate):
if not unicodedata.combining(char):
truncate_len -= 1
if truncate_len == 0:
break
if html:
return self._truncate_html(length, truncate, text, truncate_len, False)
return self._text_chars(length, truncate, text, truncate_len)
def _text_chars(self, length, truncate, text, truncate_len):
"""
Truncates a string after a certain number of chars.
"""
s_len = 0
end_index = None
for i, char in enumerate(text):
if unicodedata.combining(char):
# Don't consider combining characters
# as adding to the string length
continue
s_len += 1
if end_index is None and s_len > truncate_len:
end_index = i
if s_len > length:
# Return the truncated string
return self.add_truncation_text(text[:end_index or 0],
truncate)
# Return the original string since no truncation was necessary
return text
def words(self, num, truncate=None, html=False):
"""
Truncates a string after a certain number of words. Takes an optional
argument of what should be used to notify that the string has been
truncated, defaulting to ellipsis (...).
"""
self._setup()
length = int(num)
if html:
return self._truncate_html(length, truncate, self._wrapped, length, True)
return self._text_words(length, truncate)
def _text_words(self, length, truncate):
"""
Truncates a string after a certain number of words.
Newlines in the string will be stripped.
"""
words = self._wrapped.split()
if len(words) > length:
words = words[:length]
return self.add_truncation_text(' '.join(words), truncate)
return ' '.join(words)
def _truncate_html(self, length, truncate, text, truncate_len, words):
"""
Truncates HTML to a certain number of chars (not counting tags and
comments), or, if words is True, then to a certain number of words.
Closes opened tags if they were correctly closed in the given HTML.
Newlines in the HTML are preserved.
"""
if words and length <= 0:
return ''
html4_singlets = (
'br', 'col', 'link', 'base', 'img',
'param', 'area', 'hr', 'input'
)
# Count non-HTML chars/words and keep note of open tags
pos = 0
end_text_pos = 0
current_len = 0
open_tags = []
regex = re_words if words else re_chars
while current_len <= length:
m = regex.search(text, pos)
if not m:
# Checked through whole string
break
pos = m.end(0)
if m.group(1):
# It's an actual non-HTML word or char
current_len += 1
if current_len == truncate_len:
end_text_pos = pos
continue
# Check for tag
tag = re_tag.match(m.group(0))
if not tag or current_len >= truncate_len:
# Don't worry about non tags or tags after our truncate point
continue
closing_tag, tagname, self_closing = tag.groups()
# Element names are always case-insensitive
tagname = tagname.lower()
if self_closing or tagname in html4_singlets:
pass
elif closing_tag:
# Check for match in open tags list
try:
i = open_tags.index(tagname)
except ValueError:
pass
else:
# SGML: An end tag closes, back to the matching start tag,
# all unclosed intervening start tags with omitted end tags
open_tags = open_tags[i + 1:]
else:
# Add it to the start of the open tags list
open_tags.insert(0, tagname)
if current_len <= length:
return text
out = text[:end_text_pos]
truncate_text = self.add_truncation_text('', truncate)
if truncate_text:
out += truncate_text
# Close any tags still open
for tag in open_tags:
out += '</%s>' % tag
# Return string
return out
@keep_lazy_text
def get_valid_filename(s):
"""
Returns the given string converted to a string that can be used for a clean
filename. Specifically, leading and trailing spaces are removed; other
spaces are converted to underscores; and anything that is not a unicode
alphanumeric, dash, underscore, or dot, is removed.
>>> get_valid_filename("john's portrait in 2004.jpg")
'johns_portrait_in_2004.jpg'
"""
s = force_text(s).strip().replace(' ', '_')
return re.sub(r'(?u)[^-\w.]', '', s)
@keep_lazy_text
def get_text_list(list_, last_word=ugettext_lazy('or')):
"""
>>> get_text_list(['a', 'b', 'c', 'd'])
'a, b, c or d'
>>> get_text_list(['a', 'b', 'c'], 'and')
'a, b and c'
>>> get_text_list(['a', 'b'], 'and')
'a and b'
>>> get_text_list(['a'])
'a'
>>> get_text_list([])
''
"""
if len(list_) == 0:
return ''
if len(list_) == 1:
return force_text(list_[0])
return '%s %s %s' % (
# Translators: This string is used as a separator between list elements
_(', ').join(force_text(i) for i in list_[:-1]),
force_text(last_word), force_text(list_[-1]))
@keep_lazy_text
def normalize_newlines(text):
"""Normalizes CRLF and CR newlines to just LF."""
text = force_text(text)
return re_newlines.sub('\n', text)
@keep_lazy_text
def phone2numeric(phone):
"""Converts a phone number with letters into its numeric equivalent."""
char2number = {'a': '2', 'b': '2', 'c': '2', 'd': '3', 'e': '3', 'f': '3',
'g': '4', 'h': '4', 'i': '4', 'j': '5', 'k': '5', 'l': '5', 'm': '6',
'n': '6', 'o': '6', 'p': '7', 'q': '7', 'r': '7', 's': '7', 't': '8',
'u': '8', 'v': '8', 'w': '9', 'x': '9', 'y': '9', 'z': '9'}
return ''.join(char2number.get(c, c) for c in phone.lower())
# From http://www.xhaus.com/alan/python/httpcomp.html#gzip
# Used with permission.
def compress_string(s):
zbuf = BytesIO()
zfile = GzipFile(mode='wb', compresslevel=6, fileobj=zbuf)
zfile.write(s)
zfile.close()
return zbuf.getvalue()
class StreamingBuffer(object):
def __init__(self):
self.vals = []
def write(self, val):
self.vals.append(val)
def read(self):
if not self.vals:
return b''
ret = b''.join(self.vals)
self.vals = []
return ret
def flush(self):
return
def close(self):
return
# Like compress_string, but for iterators of strings.
def compress_sequence(sequence):
buf = StreamingBuffer()
zfile = GzipFile(mode='wb', compresslevel=6, fileobj=buf)
# Output headers...
yield buf.read()
for item in sequence:
zfile.write(item)
data = buf.read()
if data:
yield data
zfile.close()
yield buf.read()
# Expression to match some_token and some_token="with spaces" (and similarly
# for single-quoted strings).
smart_split_re = re.compile(r"""
((?:
[^\s'"]*
(?:
(?:"(?:[^"\\]|\\.)*" | '(?:[^'\\]|\\.)*')
[^\s'"]*
)+
) | \S+)
""", re.VERBOSE)
def smart_split(text):
r"""
Generator that splits a string by spaces, leaving quoted phrases together.
Supports both single and double quotes, and supports escaping quotes with
backslashes. In the output, strings will keep their initial and trailing
quote marks and escaped quotes will remain escaped (the results can then
be further processed with unescape_string_literal()).
>>> list(smart_split(r'This is "a person\'s" test.'))
['This', 'is', '"a person\\\'s"', 'test.']
>>> list(smart_split(r"Another 'person\'s' test."))
['Another', "'person\\'s'", 'test.']
>>> list(smart_split(r'A "\"funky\" style" test.'))
['A', '"\\"funky\\" style"', 'test.']
"""
text = force_text(text)
for bit in smart_split_re.finditer(text):
yield bit.group(0)
def _replace_entity(match):
text = match.group(1)
if text[0] == '#':
text = text[1:]
try:
if text[0] in 'xX':
c = int(text[1:], 16)
else:
c = int(text)
return six.unichr(c)
except ValueError:
return match.group(0)
else:
try:
return six.unichr(html_entities.name2codepoint[text])
except (ValueError, KeyError):
return match.group(0)
_entity_re = re.compile(r"&(#?[xX]?(?:[0-9a-fA-F]+|\w{1,8}));")
@keep_lazy_text
def unescape_entities(text):
return _entity_re.sub(_replace_entity, force_text(text))
@keep_lazy_text
def unescape_string_literal(s):
r"""
Convert quoted string literals to unquoted strings with escaped quotes and
backslashes unquoted::
>>> unescape_string_literal('"abc"')
'abc'
>>> unescape_string_literal("'abc'")
'abc'
>>> unescape_string_literal('"a \"bc\""')
'a "bc"'
>>> unescape_string_literal("'\'ab\' c'")
"'ab' c"
"""
if s[0] not in "\"'" or s[-1] != s[0]:
raise ValueError("Not a string literal: %r" % s)
quote = s[0]
return s[1:-1].replace(r'\%s' % quote, quote).replace(r'\\', '\\')
@keep_lazy(six.text_type, SafeText)
def slugify(value, allow_unicode=False):
"""
Convert to ASCII if 'allow_unicode' is False. Convert spaces to hyphens.
Remove characters that aren't alphanumerics, underscores, or hyphens.
Convert to lowercase. Also strip leading and trailing whitespace.
"""
value = force_text(value)
if allow_unicode:
value = unicodedata.normalize('NFKC', value)
value = re.sub('[^\w\s-]', '', value, flags=re.U).strip().lower()
return mark_safe(re.sub('[-\s]+', '-', value, flags=re.U))
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode('ascii')
value = re.sub('[^\w\s-]', '', value).strip().lower()
return mark_safe(re.sub('[-\s]+', '-', value))
def camel_case_to_spaces(value):
"""
Splits CamelCase and converts to lower case. Also strips leading and
trailing whitespace.
"""
return re_camel_case.sub(r' \1', value).strip().lower()
| bsd-3-clause |
cyberark-bizdev/ansible | test/units/modules/network/f5/test_bigip_pool.py | 23 | 14477 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
from nose.plugins.skip import SkipTest
if sys.version_info < (2, 7):
raise SkipTest("F5 Ansible modules require Python >= 2.7")
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import Mock
from ansible.compat.tests.mock import patch
from ansible.module_utils.basic import AnsibleModule
try:
from library.bigip_pool import ApiParameters
from library.bigip_pool import ModuleParameters
from library.bigip_pool import ModuleManager
from library.bigip_pool import ArgumentSpec
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import iControlUnexpectedHTTPError
from test.unit.modules.utils import set_module_args
except ImportError:
try:
from ansible.modules.network.f5.bigip_pool import ApiParameters
from ansible.modules.network.f5.bigip_pool import ModuleParameters
from ansible.modules.network.f5.bigip_pool import ModuleManager
from ansible.modules.network.f5.bigip_pool import ArgumentSpec
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import iControlUnexpectedHTTPError
from units.modules.utils import set_module_args
except ImportError:
raise SkipTest("F5 Ansible modules require the f5-sdk Python library")
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
monitor_type='m_of_n',
monitors=['/Common/Fake', '/Common/Fake2'],
quorum=1,
slow_ramp_time=200,
reselect_tries=5,
service_down_action='drop'
)
p = ModuleParameters(params=args)
assert p.monitor_type == 'm_of_n'
assert p.quorum == 1
assert p.monitors == 'min 1 of { /Common/Fake /Common/Fake2 }'
assert p.slow_ramp_time == 200
assert p.reselect_tries == 5
assert p.service_down_action == 'drop'
def test_api_parameters(self):
args = dict(
monitor="/Common/Fake and /Common/Fake2 ",
slowRampTime=200,
reselectTries=5,
serviceDownAction='drop'
)
p = ApiParameters(params=args)
assert p.monitors == '/Common/Fake and /Common/Fake2'
assert p.slow_ramp_time == 200
assert p.reselect_tries == 5
assert p.service_down_action == 'drop'
def test_unknown_module_lb_method(self):
args = dict(
lb_method='obscure_hyphenated_fake_method',
)
with pytest.raises(F5ModuleError):
p = ModuleParameters(params=args)
assert p.lb_method == 'foo'
def test_unknown_api_lb_method(self):
args = dict(
loadBalancingMode='obscure_hypenated_fake_method'
)
with pytest.raises(F5ModuleError):
p = ApiParameters(params=args)
assert p.lb_method == 'foo'
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_create_pool(self, *args):
set_module_args(dict(
pool='fake_pool',
description='fakepool',
service_down_action='drop',
lb_method='round-robin',
partition='Common',
slow_ramp_time=10,
reselect_tries=1,
server='localhost',
password='password',
user='admin'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
mm = ModuleManager(module=module)
mm.create_on_device = Mock(return_value=True)
mm.exists = Mock(return_value=False)
results = mm.exec_module()
assert results['changed'] is True
assert results['name'] == 'fake_pool'
assert results['description'] == 'fakepool'
assert results['service_down_action'] == 'drop'
assert results['lb_method'] == 'round-robin'
assert results['slow_ramp_time'] == 10
assert results['reselect_tries'] == 1
def test_create_pool_monitor_type_missing(self, *args):
set_module_args(dict(
pool='fake_pool',
lb_method='round-robin',
partition='Common',
monitors=['/Common/tcp', '/Common/http'],
server='localhost',
password='password',
user='admin'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
mm = ModuleManager(module=module)
mm.create_on_device = Mock(return_value=True)
mm.exists = Mock(return_value=False)
results = mm.exec_module()
assert results['changed'] is True
assert results['name'] == 'fake_pool'
assert results['monitors'] == ['/Common/http', '/Common/tcp']
assert results['monitor_type'] == 'and_list'
def test_create_pool_monitors_missing(self, *args):
set_module_args(dict(
pool='fake_pool',
lb_method='round-robin',
partition='Common',
monitor_type='and_list',
server='localhost',
password='password',
user='admin'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
mm = ModuleManager(module=module)
mm.create_on_device = Mock(return_value=True)
mm.exists = Mock(return_value=False)
msg = "The 'monitors' parameter cannot be empty when " \
"'monitor_type' parameter is specified"
with pytest.raises(F5ModuleError) as err:
mm.exec_module()
assert str(err.value) == msg
def test_create_pool_quorum_missing(self, *args):
set_module_args(dict(
pool='fake_pool',
lb_method='round-robin',
partition='Common',
monitor_type='m_of_n',
monitors=['/Common/tcp', '/Common/http'],
server='localhost',
password='password',
user='admin'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
mm = ModuleManager(module=module)
mm.create_on_device = Mock(return_value=True)
mm.exists = Mock(return_value=False)
msg = "Quorum value must be specified with monitor_type 'm_of_n'."
with pytest.raises(F5ModuleError) as err:
mm.exec_module()
assert str(err.value) == msg
def test_create_pool_monitor_and_list(self, *args):
set_module_args(dict(
pool='fake_pool',
partition='Common',
monitor_type='and_list',
monitors=['/Common/tcp', '/Common/http'],
server='localhost',
password='password',
user='admin'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
mm = ModuleManager(module=module)
mm.create_on_device = Mock(return_value=True)
mm.exists = Mock(return_value=False)
results = mm.exec_module()
assert results['changed'] is True
assert results['name'] == 'fake_pool'
assert results['monitors'] == ['/Common/http', '/Common/tcp']
assert results['monitor_type'] == 'and_list'
def test_create_pool_monitor_m_of_n(self, *args):
set_module_args(dict(
pool='fake_pool',
partition='Common',
monitor_type='m_of_n',
quorum=1,
monitors=['/Common/tcp', '/Common/http'],
server='localhost',
password='password',
user='admin'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
mm = ModuleManager(module=module)
mm.create_on_device = Mock(return_value=True)
mm.exists = Mock(return_value=False)
results = mm.exec_module()
assert results['changed'] is True
assert results['name'] == 'fake_pool'
assert results['monitors'] == ['/Common/http', '/Common/tcp']
assert results['monitor_type'] == 'm_of_n'
def test_update_monitors(self, *args):
set_module_args(dict(
name='test_pool',
partition='Common',
monitor_type='and_list',
monitors=['/Common/http', '/Common/tcp'],
server='localhost',
password='password',
user='admin'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
mm = ModuleManager(module=module)
current = ApiParameters(params=load_fixture('load_ltm_pool.json'))
mm.update_on_device = Mock(return_value=True)
mm.exists = Mock(return_value=True)
mm.read_current_from_device = Mock(return_value=current)
results = mm.exec_module()
assert results['changed'] is True
assert results['monitor_type'] == 'and_list'
def test_create_pool_monitor_and_list_no_partition(self, *args):
set_module_args(dict(
pool='fake_pool',
monitor_type='and_list',
monitors=['tcp', 'http'],
server='localhost',
password='password',
user='admin'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
mm = ModuleManager(module=module)
mm.create_on_device = Mock(return_value=True)
mm.exists = Mock(return_value=False)
results = mm.exec_module()
assert results['changed'] is True
assert results['name'] == 'fake_pool'
assert results['monitors'] == ['/Common/http', '/Common/tcp']
assert results['monitor_type'] == 'and_list'
def test_create_pool_monitor_m_of_n_no_partition(self, *args):
set_module_args(dict(
pool='fake_pool',
monitor_type='m_of_n',
quorum=1,
monitors=['tcp', 'http'],
server='localhost',
password='password',
user='admin'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
mm = ModuleManager(module=module)
mm.create_on_device = Mock(return_value=True)
mm.exists = Mock(return_value=False)
results = mm.exec_module()
assert results['changed'] is True
assert results['name'] == 'fake_pool'
assert results['monitors'] == ['/Common/http', '/Common/tcp']
assert results['monitor_type'] == 'm_of_n'
def test_create_pool_monitor_and_list_custom_partition(self, *args):
set_module_args(dict(
pool='fake_pool',
partition='Testing',
monitor_type='and_list',
monitors=['tcp', 'http'],
server='localhost',
password='password',
user='admin'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
mm = ModuleManager(module=module)
mm.create_on_device = Mock(return_value=True)
mm.exists = Mock(return_value=False)
results = mm.exec_module()
assert results['changed'] is True
assert results['name'] == 'fake_pool'
assert results['monitors'] == ['/Testing/http', '/Testing/tcp']
assert results['monitor_type'] == 'and_list'
def test_create_pool_monitor_m_of_n_custom_partition(self, *args):
set_module_args(dict(
pool='fake_pool',
partition='Testing',
monitor_type='m_of_n',
quorum=1,
monitors=['tcp', 'http'],
server='localhost',
password='password',
user='admin'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
mm = ModuleManager(module=module)
mm.create_on_device = Mock(return_value=True)
mm.exists = Mock(return_value=False)
results = mm.exec_module()
assert results['changed'] is True
assert results['name'] == 'fake_pool'
assert results['monitors'] == ['/Testing/http', '/Testing/tcp']
assert results['monitor_type'] == 'm_of_n'
def test_create_pool_with_metadata(self, *args):
set_module_args(dict(
pool='fake_pool',
metadata=dict(ansible='2.4'),
server='localhost',
password='password',
user='admin'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
mm = ModuleManager(module=module)
mm.create_on_device = Mock(return_value=True)
mm.exists = Mock(return_value=False)
results = mm.exec_module()
assert results['changed'] is True
assert results['name'] == 'fake_pool'
assert 'metadata' in results
assert 'ansible' in results['metadata']
assert results['metadata']['ansible'] == '2.4'
| gpl-3.0 |
zonemercy/Kaggle | quora/solution/utils/keras_utils.py | 2 | 3685 | # -*- coding: utf-8 -*-
"""
@author: Chenglong Chen <c.chenglong@gmail.com>
@brief: utils for Keras models
"""
from sklearn.preprocessing import StandardScaler
from keras.models import Sequential
from keras.layers.core import Dense, Layer, Dropout, Activation
from keras.layers.normalization import BatchNormalization
from keras.layers.advanced_activations import ELU, PReLU
from keras.optimizers import SGD
from keras.utils import np_utils, generic_utils
class KerasDNNRegressor:
def __init__(self, input_dropout=0.2, hidden_layers=2, hidden_units=64,
hidden_activation="relu", hidden_dropout=0.5, batch_norm=None,
optimizer="adadelta", nb_epoch=10, batch_size=64):
self.input_dropout = input_dropout
self.hidden_layers = hidden_layers
self.hidden_units = hidden_units
self.hidden_activation = hidden_activation
self.hidden_dropout = hidden_dropout
self.batch_norm = batch_norm
self.optimizer = optimizer
self.nb_epoch = nb_epoch
self.batch_size = batch_size
self.scaler = None
self.model = None
def __str__(self):
return self.__repr__()
def __repr__(self):
return ("%s(input_dropout=%f, hidden_layers=%d, hidden_units=%d, \n"
"hidden_activation=\'%s\', hidden_dropout=%f, batch_norm=\'%s\', \n"
"optimizer=\'%s\', nb_epoch=%d, batch_size=%d)" % (
self.__class__.__name__,
self.input_dropout,
self.hidden_layers,
self.hidden_units,
self.hidden_activation,
self.hidden_dropout,
str(self.batch_norm),
self.optimizer,
self.nb_epoch,
self.batch_size,
))
def fit(self, X, y):
## scaler
self.scaler = StandardScaler()
X = self.scaler.fit_transform(X)
#### build model
self.model = Sequential()
## input layer
self.model.add(Dropout(self.input_dropout, input_shape=(X.shape[1],)))
## hidden layers
first = True
hidden_layers = self.hidden_layers
while hidden_layers > 0:
self.model.add(Dense(self.hidden_units))
if self.batch_norm == "before_act":
self.model.add(BatchNormalization())
if self.hidden_activation == "prelu":
self.model.add(PReLU())
elif self.hidden_activation == "elu":
self.model.add(ELU())
else:
self.model.add(Activation(self.hidden_activation))
if self.batch_norm == "after_act":
self.model.add(BatchNormalization())
self.model.add(Dropout(self.hidden_dropout))
hidden_layers -= 1
## output layer
output_dim = 1
output_act = "linear"
self.model.add(Dense(output_dim))
self.model.add(Activation(output_act))
## loss
if self.optimizer == "sgd":
sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
self.model.compile(loss="mse", optimizer=sgd)
else:
self.model.compile(loss="mse", optimizer=self.optimizer)
## fit
self.model.fit(X, y,
nb_epoch=self.nb_epoch,
batch_size=self.batch_size,
validation_split=0, verbose=0)
return self
def predict(self, X):
X = self.scaler.transform(X)
y_pred = self.model.predict(X)
y_pred = y_pred.flatten()
return y_pred
| mit |
40223134/w16b_test | static/Brython3.1.3-20150514-095342/Lib/socket.py | 730 | 14913 | # Wrapper module for _socket, providing some additional facilities
# implemented in Python.
"""\
This module provides socket operations and some related functions.
On Unix, it supports IP (Internet Protocol) and Unix domain sockets.
On other systems, it only supports IP. Functions specific for a
socket are available as methods of the socket object.
Functions:
socket() -- create a new socket object
socketpair() -- create a pair of new socket objects [*]
fromfd() -- create a socket object from an open file descriptor [*]
fromshare() -- create a socket object from data received from socket.share() [*]
gethostname() -- return the current hostname
gethostbyname() -- map a hostname to its IP number
gethostbyaddr() -- map an IP number or hostname to DNS info
getservbyname() -- map a service name and a protocol name to a port number
getprotobyname() -- map a protocol name (e.g. 'tcp') to a number
ntohs(), ntohl() -- convert 16, 32 bit int from network to host byte order
htons(), htonl() -- convert 16, 32 bit int from host to network byte order
inet_aton() -- convert IP addr string (123.45.67.89) to 32-bit packed format
inet_ntoa() -- convert 32-bit packed format IP to string (123.45.67.89)
socket.getdefaulttimeout() -- get the default timeout value
socket.setdefaulttimeout() -- set the default timeout value
create_connection() -- connects to an address, with an optional timeout and
optional source address.
[*] not available on all platforms!
Special objects:
SocketType -- type object for socket objects
error -- exception raised for I/O errors
has_ipv6 -- boolean value indicating if IPv6 is supported
Integer constants:
AF_INET, AF_UNIX -- socket domains (first argument to socket() call)
SOCK_STREAM, SOCK_DGRAM, SOCK_RAW -- socket types (second argument)
Many other constants may be defined; these may be used in calls to
the setsockopt() and getsockopt() methods.
"""
import _socket
from _socket import *
import os, sys, io
try:
import errno
except ImportError:
errno = None
EBADF = getattr(errno, 'EBADF', 9)
EAGAIN = getattr(errno, 'EAGAIN', 11)
EWOULDBLOCK = getattr(errno, 'EWOULDBLOCK', 11)
__all__ = ["getfqdn", "create_connection"]
__all__.extend(os._get_exports_list(_socket))
_realsocket = socket
# WSA error codes
if sys.platform.lower().startswith("win"):
errorTab = {}
errorTab[10004] = "The operation was interrupted."
errorTab[10009] = "A bad file handle was passed."
errorTab[10013] = "Permission denied."
errorTab[10014] = "A fault occurred on the network??" # WSAEFAULT
errorTab[10022] = "An invalid operation was attempted."
errorTab[10035] = "The socket operation would block"
errorTab[10036] = "A blocking operation is already in progress."
errorTab[10048] = "The network address is in use."
errorTab[10054] = "The connection has been reset."
errorTab[10058] = "The network has been shut down."
errorTab[10060] = "The operation timed out."
errorTab[10061] = "Connection refused."
errorTab[10063] = "The name is too long."
errorTab[10064] = "The host is down."
errorTab[10065] = "The host is unreachable."
__all__.append("errorTab")
class socket(_socket.socket):
"""A subclass of _socket.socket adding the makefile() method."""
__slots__ = ["__weakref__", "_io_refs", "_closed"]
def __init__(self, family=AF_INET, type=SOCK_STREAM, proto=0, fileno=None):
_socket.socket.__init__(self, family, type, proto, fileno)
self._io_refs = 0
self._closed = False
def __enter__(self):
return self
def __exit__(self, *args):
if not self._closed:
self.close()
def __repr__(self):
"""Wrap __repr__() to reveal the real class name."""
s = _socket.socket.__repr__(self)
if s.startswith("<socket object"):
s = "<%s.%s%s%s" % (self.__class__.__module__,
self.__class__.__name__,
getattr(self, '_closed', False) and " [closed] " or "",
s[7:])
return s
def __getstate__(self):
raise TypeError("Cannot serialize socket object")
def dup(self):
"""dup() -> socket object
Return a new socket object connected to the same system resource.
"""
fd = dup(self.fileno())
sock = self.__class__(self.family, self.type, self.proto, fileno=fd)
sock.settimeout(self.gettimeout())
return sock
def accept(self):
"""accept() -> (socket object, address info)
Wait for an incoming connection. Return a new socket
representing the connection, and the address of the client.
For IP sockets, the address info is a pair (hostaddr, port).
"""
fd, addr = self._accept()
sock = socket(self.family, self.type, self.proto, fileno=fd)
# Issue #7995: if no default timeout is set and the listening
# socket had a (non-zero) timeout, force the new socket in blocking
# mode to override platform-specific socket flags inheritance.
if getdefaulttimeout() is None and self.gettimeout():
sock.setblocking(True)
return sock, addr
def makefile(self, mode="r", buffering=None, *,
encoding=None, errors=None, newline=None):
"""makefile(...) -> an I/O stream connected to the socket
The arguments are as for io.open() after the filename,
except the only mode characters supported are 'r', 'w' and 'b'.
The semantics are similar too. (XXX refactor to share code?)
"""
for c in mode:
if c not in {"r", "w", "b"}:
raise ValueError("invalid mode %r (only r, w, b allowed)")
writing = "w" in mode
reading = "r" in mode or not writing
assert reading or writing
binary = "b" in mode
rawmode = ""
if reading:
rawmode += "r"
if writing:
rawmode += "w"
raw = SocketIO(self, rawmode)
self._io_refs += 1
if buffering is None:
buffering = -1
if buffering < 0:
buffering = io.DEFAULT_BUFFER_SIZE
if buffering == 0:
if not binary:
raise ValueError("unbuffered streams must be binary")
return raw
if reading and writing:
buffer = io.BufferedRWPair(raw, raw, buffering)
elif reading:
buffer = io.BufferedReader(raw, buffering)
else:
assert writing
buffer = io.BufferedWriter(raw, buffering)
if binary:
return buffer
text = io.TextIOWrapper(buffer, encoding, errors, newline)
text.mode = mode
return text
def _decref_socketios(self):
if self._io_refs > 0:
self._io_refs -= 1
if self._closed:
self.close()
def _real_close(self, _ss=_socket.socket):
# This function should not reference any globals. See issue #808164.
_ss.close(self)
def close(self):
# This function should not reference any globals. See issue #808164.
self._closed = True
if self._io_refs <= 0:
self._real_close()
def detach(self):
"""detach() -> file descriptor
Close the socket object without closing the underlying file descriptor.
The object cannot be used after this call, but the file descriptor
can be reused for other purposes. The file descriptor is returned.
"""
self._closed = True
return super().detach()
def fromfd(fd, family, type, proto=0):
""" fromfd(fd, family, type[, proto]) -> socket object
Create a socket object from a duplicate of the given file
descriptor. The remaining arguments are the same as for socket().
"""
nfd = dup(fd)
return socket(family, type, proto, nfd)
if hasattr(_socket.socket, "share"):
def fromshare(info):
""" fromshare(info) -> socket object
Create a socket object from a the bytes object returned by
socket.share(pid).
"""
return socket(0, 0, 0, info)
if hasattr(_socket, "socketpair"):
def socketpair(family=None, type=SOCK_STREAM, proto=0):
"""socketpair([family[, type[, proto]]]) -> (socket object, socket object)
Create a pair of socket objects from the sockets returned by the platform
socketpair() function.
The arguments are the same as for socket() except the default family is
AF_UNIX if defined on the platform; otherwise, the default is AF_INET.
"""
if family is None:
try:
family = AF_UNIX
except NameError:
family = AF_INET
a, b = _socket.socketpair(family, type, proto)
a = socket(family, type, proto, a.detach())
b = socket(family, type, proto, b.detach())
return a, b
_blocking_errnos = { EAGAIN, EWOULDBLOCK }
class SocketIO(io.RawIOBase):
"""Raw I/O implementation for stream sockets.
This class supports the makefile() method on sockets. It provides
the raw I/O interface on top of a socket object.
"""
# One might wonder why not let FileIO do the job instead. There are two
# main reasons why FileIO is not adapted:
# - it wouldn't work under Windows (where you can't used read() and
# write() on a socket handle)
# - it wouldn't work with socket timeouts (FileIO would ignore the
# timeout and consider the socket non-blocking)
# XXX More docs
def __init__(self, sock, mode):
if mode not in ("r", "w", "rw", "rb", "wb", "rwb"):
raise ValueError("invalid mode: %r" % mode)
io.RawIOBase.__init__(self)
self._sock = sock
if "b" not in mode:
mode += "b"
self._mode = mode
self._reading = "r" in mode
self._writing = "w" in mode
self._timeout_occurred = False
def readinto(self, b):
"""Read up to len(b) bytes into the writable buffer *b* and return
the number of bytes read. If the socket is non-blocking and no bytes
are available, None is returned.
If *b* is non-empty, a 0 return value indicates that the connection
was shutdown at the other end.
"""
self._checkClosed()
self._checkReadable()
if self._timeout_occurred:
raise IOError("cannot read from timed out object")
while True:
try:
return self._sock.recv_into(b)
except timeout:
self._timeout_occurred = True
raise
except InterruptedError:
continue
except error as e:
if e.args[0] in _blocking_errnos:
return None
raise
def write(self, b):
"""Write the given bytes or bytearray object *b* to the socket
and return the number of bytes written. This can be less than
len(b) if not all data could be written. If the socket is
non-blocking and no bytes could be written None is returned.
"""
self._checkClosed()
self._checkWritable()
try:
return self._sock.send(b)
except error as e:
# XXX what about EINTR?
if e.args[0] in _blocking_errnos:
return None
raise
def readable(self):
"""True if the SocketIO is open for reading.
"""
if self.closed:
raise ValueError("I/O operation on closed socket.")
return self._reading
def writable(self):
"""True if the SocketIO is open for writing.
"""
if self.closed:
raise ValueError("I/O operation on closed socket.")
return self._writing
def seekable(self):
"""True if the SocketIO is open for seeking.
"""
if self.closed:
raise ValueError("I/O operation on closed socket.")
return super().seekable()
def fileno(self):
"""Return the file descriptor of the underlying socket.
"""
self._checkClosed()
return self._sock.fileno()
@property
def name(self):
if not self.closed:
return self.fileno()
else:
return -1
@property
def mode(self):
return self._mode
def close(self):
"""Close the SocketIO object. This doesn't close the underlying
socket, except if all references to it have disappeared.
"""
if self.closed:
return
io.RawIOBase.close(self)
self._sock._decref_socketios()
self._sock = None
def getfqdn(name=''):
"""Get fully qualified domain name from name.
An empty argument is interpreted as meaning the local host.
First the hostname returned by gethostbyaddr() is checked, then
possibly existing aliases. In case no FQDN is available, hostname
from gethostname() is returned.
"""
name = name.strip()
if not name or name == '0.0.0.0':
name = gethostname()
try:
hostname, aliases, ipaddrs = gethostbyaddr(name)
except error:
pass
else:
aliases.insert(0, hostname)
for name in aliases:
if '.' in name:
break
else:
name = hostname
return name
_GLOBAL_DEFAULT_TIMEOUT = object()
def create_connection(address, timeout=_GLOBAL_DEFAULT_TIMEOUT,
source_address=None):
"""Connect to *address* and return the socket object.
Convenience function. Connect to *address* (a 2-tuple ``(host,
port)``) and return the socket object. Passing the optional
*timeout* parameter will set the timeout on the socket instance
before attempting to connect. If no *timeout* is supplied, the
global default timeout setting returned by :func:`getdefaulttimeout`
is used. If *source_address* is set it must be a tuple of (host, port)
for the socket to bind as a source address before making the connection.
An host of '' or port 0 tells the OS to use the default.
"""
host, port = address
err = None
for res in getaddrinfo(host, port, 0, SOCK_STREAM):
af, socktype, proto, canonname, sa = res
sock = None
try:
sock = socket(af, socktype, proto)
if timeout is not _GLOBAL_DEFAULT_TIMEOUT:
sock.settimeout(timeout)
if source_address:
sock.bind(source_address)
sock.connect(sa)
return sock
except error as _:
err = _
if sock is not None:
sock.close()
if err is not None:
raise err
else:
raise error("getaddrinfo returns an empty list")
| agpl-3.0 |
mantidproject/mantid | Framework/PythonInterface/plugins/functions/Examples/ExamplePeakFunction.py | 3 | 7448 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
# pylint: disable=no-init,invalid-name
"""
This example reimplements a Gaussian fitting function. It is not meant to
be used in production for fitting, it is simply provided as a relatively complete
guide to creating a Fit function.
It uses the so-called IPeakFunction that should be used when there is a sensible way to
calculate the centre, height & fwhm of the function. If it does not make sense, for example a in linear background,
where does not give a peak shape, then see the more general Example1DFunction that does not require these concepts.
"""
import math
import numpy as np
from mantid.api import IPeakFunction, FunctionFactory
class ExamplePeakFunction(IPeakFunction):
_nterms = None
def category(self):
"""
Optional method to return the category that this
function should be listed under. Multiple categories
should be separated with a semi-colon(;). Sub-categories
can be specified using a \\ separator, e.g. Category\\Sub-category
"""
return "Examples"
def init(self):
"""
Declare parameters that participate in the fitting (declareParameter)
and attributes that are constants to be passed (declareAttribute) in
and do not participate in the fit. Attributes must have type=int,float,string,bool
"""
# Active fitting parameters
self.declareParameter("Height")
self.declareParameter("PeakCentre")
self.declareParameter("Sigma")
# Simple attributes required for the function but
# not as part of the fit itself e.g. number of terms to evaluate in some expression
# They must have a default value.
# It is advisable to look at the setAttributeValue function below and take local copies
# of attributes so that they do not have to be retrieved repeatedly througout the fitting.
self.declareAttribute("NTerms", 1)
def functionLocal(self, xvals):
"""
Computes the function on the set of values given and returns
the answer as a numpy array of floats
"""
# As Fit progresses the declared parameter values will change
# Can also be retrieve by index self.getParameterValue(0)
height = self.getParameterValue("Height")
peak_centre = self.getParameterValue("PeakCentre")
sigma = self.getParameterValue("Sigma")
weight = math.pow(1./sigma, 2)
# Here you can use the NTerms attr if required by
# using self._nterms: see setAttributeValue below or
# accessing the attribute each time directly nterms = self.getAttributeValue("NTerms") but this is much slower
offset_sq = np.square(xvals-peak_centre)
out = height*np.exp(-0.5*offset_sq*weight)
return out
def functionDerivLocal(self, xvals, jacobian):
"""
Computes the partial derivatives of the function on the set of values given
and the sets these values in the given jacobian. The Jacobian is essentially
a matrix where jacobian.set(iy,ip,value) takes 3 parameters:
iy = The index of the data value whose partial derivative this corresponds to
ip = The index of the parameter value whose partial derivative this corresponds to
value = The value of the derivative
"""
height = self.getParameterValue("Height")
peak_centre = self.getParameterValue("PeakCentre")
sigma = self.getParameterValue("Sigma")
weight = math.pow(1./sigma, 2)
# X index
i = 0
for x in xvals:
diff = x-peak_centre
exp_term = math.exp(-0.5*diff*diff*weight)
jacobian.set(i, 0, exp_term)
jacobian.set(i, 1, diff*height*exp_term*weight)
# derivative with respect to weight not sigma
jacobian.set(i, 2, -0.5*diff*diff*height*exp_term)
i += 1
def setAttributeValue(self, name, value):
"""
This is called by the framework when an attribute is passed to Fit and its value set.
It's main use is to store the attribute value on the object once to avoid
repeated calls during the fitting process
"""
if name == "NTerms":
self._nterms = value
def activeParameter(self, index):
"""
Returns the value of the parameter that
is taking part in the fitting for the given index.
Only required if the fitting is to be done over
a different parameter than declared for some reason, i.e
stability
"""
param_value = self.getParameterValue(index)
if index == 2: # Sigma. Actually fit to 1/(sigma^2) for stability
return 1./math.pow(param_value, 2)
else:
return param_value
def setActiveParameter(self, index, value):
"""
Called by the fitting framework when a parameter value is updated.
Only required if the fitting is done over a different parameter
set than that declared
"""
param_value = value
if index == 2:
param_value = math.sqrt(math.fabs(1.0/value))
else:
param_value = value
# Final explicit arugment is required to be false here by framework
self.setParameter(index, param_value, False)
param_value = self.getParameterValue(index)
if index == 2: # Sigma. Actually fit to 1/(sigma^2) for stability
return math.pow(1./param_value, 2)
else:
return param_value
def centre(self):
"""
Return what should be considered the centre of this function. In this
simple case it is just the centre value but it can be any combination
of parameters
"""
return self.getParameterValue("PeakCentre")
def height(self):
"""
Return what should be considered the 'height' of this function. In this
simple case it is just the centre value but it can be any combination
of parameters
"""
return self. getParameterValue("Height")
def fwhm(self):
"""
Return what should be considered the 'fwhm' of this function.
"""
return 2.0*math.sqrt(2.0*math.log(2.0))*self.getParameterValue("Sigma")
def setCentre(self, new_centre):
"""
Called by an external entity, probably a GUI, in response to a mouse click
that gives a guess at the centre.
"""
self.setParameter("PeakCentre", new_centre)
def setHeight(self, new_height):
"""
Called by an external entity, probably a GUI, in response to a user guessing
the height.
"""
self.setParameter("Height", new_height)
def setFwhm(self, new_fwhm):
"""
Called by an external entity, probably a GUI, in response to a user guessing
the height.
"""
sigma = new_fwhm/(2.0*math.sqrt(2.0*math.log(2.0)))
self.setParameter("Sigma", sigma)
# Required to have Mantid recognise the new function
FunctionFactory.subscribe(ExamplePeakFunction)
| gpl-3.0 |
meteorcloudy/tensorflow | tensorflow/contrib/distributions/python/kernel_tests/bijectors/permute_test.py | 14 | 3241 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Permute bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops.bijectors.permute import Permute
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops.distributions.bijector_test_util import assert_bijective_and_finite
from tensorflow.python.platform import test
class PermuteBijectorTest(test.TestCase):
"""Tests correctness of the Permute bijector."""
def setUp(self):
self._rng = np.random.RandomState(42)
def testBijector(self):
expected_permutation = np.int32([2, 0, 1])
expected_x = np.random.randn(4, 2, 3)
expected_y = expected_x[..., expected_permutation]
with self.test_session() as sess:
permutation_ph = array_ops.placeholder(dtype=dtypes.int32)
bijector = Permute(
permutation=permutation_ph,
validate_args=True)
[
permutation_,
x_,
y_,
fldj,
ildj,
] = sess.run([
bijector.permutation,
bijector.inverse(expected_y),
bijector.forward(expected_x),
bijector.forward_log_det_jacobian(expected_x, event_ndims=1),
bijector.inverse_log_det_jacobian(expected_y, event_ndims=1),
], feed_dict={permutation_ph: expected_permutation})
self.assertEqual("permute", bijector.name)
self.assertAllEqual(expected_permutation, permutation_)
self.assertAllClose(expected_y, y_, rtol=1e-6, atol=0)
self.assertAllClose(expected_x, x_, rtol=1e-6, atol=0)
self.assertAllClose(0., fldj, rtol=1e-6, atol=0)
self.assertAllClose(0., ildj, rtol=1e-6, atol=0)
def testRaisesOpError(self):
with self.test_session() as sess:
with self.assertRaisesOpError("Permutation over `d` must contain"):
permutation_ph = array_ops.placeholder(dtype=dtypes.int32)
bijector = Permute(
permutation=permutation_ph,
validate_args=True)
sess.run(bijector.inverse([1.]),
feed_dict={permutation_ph: [1, 2]})
def testBijectiveAndFinite(self):
permutation = np.int32([2, 0, 1])
x = np.random.randn(4, 2, 3)
y = x[..., permutation]
with self.test_session():
bijector = Permute(permutation=permutation, validate_args=True)
assert_bijective_and_finite(
bijector, x, y, event_ndims=1, rtol=1e-6, atol=0)
if __name__ == "__main__":
test.main()
| apache-2.0 |
jlspyaozhongkai/Uter | third_party_backup/Python-2.7.9/Lib/test/win_console_handler.py | 133 | 1419 | """Script used to test os.kill on Windows, for issue #1220212
This script is started as a subprocess in test_os and is used to test the
CTRL_C_EVENT and CTRL_BREAK_EVENT signals, which requires a custom handler
to be written into the kill target.
See http://msdn.microsoft.com/en-us/library/ms685049%28v=VS.85%29.aspx for a
similar example in C.
"""
from ctypes import wintypes, WINFUNCTYPE
import signal
import ctypes
import mmap
import sys
# Function prototype for the handler function. Returns BOOL, takes a DWORD.
HandlerRoutine = WINFUNCTYPE(wintypes.BOOL, wintypes.DWORD)
def _ctrl_handler(sig):
"""Handle a sig event and return 0 to terminate the process"""
if sig == signal.CTRL_C_EVENT:
pass
elif sig == signal.CTRL_BREAK_EVENT:
pass
else:
print("UNKNOWN EVENT")
return 0
ctrl_handler = HandlerRoutine(_ctrl_handler)
SetConsoleCtrlHandler = ctypes.windll.kernel32.SetConsoleCtrlHandler
SetConsoleCtrlHandler.argtypes = (HandlerRoutine, wintypes.BOOL)
SetConsoleCtrlHandler.restype = wintypes.BOOL
if __name__ == "__main__":
# Add our console control handling function with value 1
if not SetConsoleCtrlHandler(ctrl_handler, 1):
print("Unable to add SetConsoleCtrlHandler")
exit(-1)
# Awaken mail process
m = mmap.mmap(-1, 1, sys.argv[1])
m[0] = '1'
# Do nothing but wait for the signal
while True:
pass
| gpl-3.0 |
deandunbar/html2bwml | venv/lib/python2.7/site-packages/django/views/decorators/cache.py | 74 | 2280 | from functools import wraps
from django.utils.decorators import decorator_from_middleware_with_args, available_attrs
from django.utils.cache import patch_cache_control, add_never_cache_headers
from django.middleware.cache import CacheMiddleware
def cache_page(*args, **kwargs):
"""
Decorator for views that tries getting the page from the cache and
populates the cache if the page isn't in the cache yet.
The cache is keyed by the URL and some data from the headers.
Additionally there is the key prefix that is used to distinguish different
cache areas in a multi-site setup. You could use the
get_current_site().domain, for example, as that is unique across a Django
project.
Additionally, all headers from the response's Vary header will be taken
into account on caching -- just like the middleware does.
"""
# We also add some asserts to give better error messages in case people are
# using other ways to call cache_page that no longer work.
if len(args) != 1 or callable(args[0]):
raise TypeError("cache_page has a single mandatory positional argument: timeout")
cache_timeout = args[0]
cache_alias = kwargs.pop('cache', None)
key_prefix = kwargs.pop('key_prefix', None)
if kwargs:
raise TypeError("cache_page has two optional keyword arguments: cache and key_prefix")
return decorator_from_middleware_with_args(CacheMiddleware)(cache_timeout=cache_timeout, cache_alias=cache_alias, key_prefix=key_prefix)
def cache_control(**kwargs):
def _cache_controller(viewfunc):
@wraps(viewfunc, assigned=available_attrs(viewfunc))
def _cache_controlled(request, *args, **kw):
response = viewfunc(request, *args, **kw)
patch_cache_control(response, **kwargs)
return response
return _cache_controlled
return _cache_controller
def never_cache(view_func):
"""
Decorator that adds headers to a response so that it will
never be cached.
"""
@wraps(view_func, assigned=available_attrs(view_func))
def _wrapped_view_func(request, *args, **kwargs):
response = view_func(request, *args, **kwargs)
add_never_cache_headers(response)
return response
return _wrapped_view_func
| mit |
ruffsl/ros_buildfarm | ros_buildfarm/config/doc_build_file.py | 1 | 6923 | # Copyright 2013, 2015-2016 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .build_file import BuildFile
DOC_TYPE_ROSDOC = 'rosdoc_lite'
DOC_TYPE_MANIFEST = 'released_manifest'
DOC_TYPE_MAKE = 'make_target'
DOC_TYPES = [DOC_TYPE_ROSDOC, DOC_TYPE_MANIFEST, DOC_TYPE_MAKE]
class DocBuildFile(BuildFile):
_type = 'doc-build'
def __init__(self, name, data):
assert 'type' in data, \
"Expected file type is '%s'" % DocBuildFile._type
assert data['type'] == DocBuildFile._type, \
"Expected file type is '%s', not '%s'" % \
(DocBuildFile._type, data['type'])
assert 'version' in data, \
"Doc build file for '%s' lacks required version information" % \
self.name
assert int(data['version']) in [1, 2], \
("Unable to handle '%s' format version '%d', please update " +
"rosdistro (e.g. on Ubuntu/Debian use: sudo apt update && " +
"sudo apt install --only-upgrade python-rosdistro)") % \
(DocBuildFile._type, int(data['version']))
self.version = int(data['version'])
super(DocBuildFile, self).__init__(name, data)
# ensure that a single target is specified
assert len(self.targets) == 1
os_name = list(self.targets.keys())[0]
assert len(self.targets[os_name]) == 1
os_code_name = list(self.targets[os_name].keys())[0]
assert len(self.targets[os_name][os_code_name]) == 1
self.documentation_type = DOC_TYPE_ROSDOC
if 'documentation_type' in data:
assert data['documentation_type'] in DOC_TYPES, \
("Doc build file for '%s' has unknown documentation type " +
"'%s'") % (self.name, data['documentation_type'])
self.documentation_type = data['documentation_type']
# repository keys and urls can only be used with doc type rosdoc
is_rosdoc_type = self.documentation_type == DOC_TYPE_ROSDOC
assert not self.repository_keys or is_rosdoc_type
assert not self.repository_urls or is_rosdoc_type
self.canonical_base_url = None
if 'canonical_base_url' in data:
self.canonical_base_url = data['canonical_base_url']
assert not self.canonical_base_url or is_rosdoc_type
self.doc_repositories = []
if 'doc_repositories' in data:
self.doc_repositories = data['doc_repositories']
assert isinstance(self.doc_repositories, list)
# doc_repositories can only be used with doc type make_target
is_make_target_type = self.documentation_type == DOC_TYPE_MAKE
assert not self.doc_repositories or is_make_target_type
self.jenkins_job_label = None
if 'jenkins_job_label' in data:
self.jenkins_job_label = data['jenkins_job_label']
self.jenkins_job_priority = None
if 'jenkins_job_priority' in data:
self.jenkins_job_priority = int(data['jenkins_job_priority'])
self.jenkins_job_timeout = None
if 'jenkins_job_timeout' in data:
self.jenkins_job_timeout = int(data['jenkins_job_timeout'])
self.notify_committers = None
if 'notifications' in data:
if 'committers' in data['notifications']:
self.notify_committers = \
bool(data['notifications']['committers'])
# notify committers/maintainers can only be used with doc type rosdoc
assert not self.notify_committers or is_rosdoc_type
assert not self.notify_maintainers or is_rosdoc_type
self.package_blacklist = []
if 'package_blacklist' in data:
self.package_blacklist = data['package_blacklist']
assert isinstance(self.package_blacklist, list)
self.package_whitelist = []
if 'package_whitelist' in data:
self.package_whitelist = data['package_whitelist']
assert isinstance(self.package_whitelist, list)
# package black-/whitelist can only be used with doc type manifest
is_manifest_type = self.documentation_type == DOC_TYPE_MANIFEST
assert not self.package_blacklist or is_manifest_type
assert not self.package_whitelist or is_manifest_type
self.repository_blacklist = []
if 'repository_blacklist' in data:
self.repository_blacklist = data['repository_blacklist']
assert isinstance(self.repository_blacklist, list)
self.repository_whitelist = []
if 'repository_whitelist' in data:
self.repository_whitelist = data['repository_whitelist']
assert isinstance(self.repository_whitelist, list)
self.skip_ignored_repositories = None
if 'skip_ignored_repositories' in data:
self.skip_ignored_repositories = \
bool(data['skip_ignored_repositories'])
self.custom_rosdep_urls = []
if '_config' in data['targets']:
if 'custom_rosdep_urls' in data['targets']['_config']:
self.custom_rosdep_urls = \
data['targets']['_config']['custom_rosdep_urls']
assert isinstance(self.custom_rosdep_urls, list)
# repository black-/whitelist can only be used with doc type rosdoc
assert not self.repository_blacklist or is_rosdoc_type
assert not self.repository_whitelist or is_rosdoc_type
assert self.skip_ignored_repositories is None or is_rosdoc_type
# user host and docroot have default of uploading to the repo machine
# next to the debs
self.upload_user = data.get('upload_user', 'jenkins-slave')
self.upload_host = data.get('upload_host', 'repo')
self.upload_root = data.get('upload_root', '/var/repos/docs')
assert 'upload_credential_id' in data
self.upload_credential_id = data['upload_credential_id']
def filter_packages(self, package_names):
res = set(package_names)
if self.package_whitelist:
res &= set(self.package_whitelist)
res -= set(self.package_blacklist)
return res
def filter_repositories(self, repository_names):
res = set(repository_names)
if self.repository_whitelist:
res &= set(self.repository_whitelist)
res -= set(self.repository_blacklist)
return res
| apache-2.0 |
samirelanduk/molecupy | tests/unit/test_base.py | 2 | 8452 | from collections import OrderedDict
from unittest import TestCase
from unittest.mock import Mock, patch, PropertyMock, MagicMock
from atomium.base import *
class ObjectFromFilterTests(TestCase):
def test_can_get_same_object(self):
obj = Mock()
obj.__lt__ = MagicMock()
obj2 = get_object_from_filter(obj, ["height"])
self.assertIs(obj, obj2)
obj2 = get_object_from_filter(obj, ["height", "regex"])
self.assertIs(obj, obj2)
obj2 = get_object_from_filter(obj, ["height", "lt"])
self.assertIs(obj, obj2)
def test_can_get_chained_object(self):
obj = Mock()
obj2 = get_object_from_filter(obj, ["o1", "o2", "o3", "height", "regex"])
self.assertIs(obj2, obj.o1.o2.o3)
obj2 = get_object_from_filter(obj, ["o1", "o2", "o3", "height"])
self.assertIs(obj2, obj.o1.o2.o3)
class AttributeGettingTests(TestCase):
def test_can_get_basic_attribute(self):
obj = Mock(x=10)
self.assertEqual(get_object_attribute_from_filter(obj, ["x"]), 10)
self.assertEqual(get_object_attribute_from_filter(obj, ["y", "x"]), 10)
def test_can_get_attribute_from_early_chain(self):
obj = Mock(x=10)
del obj.regex
self.assertEqual(get_object_attribute_from_filter(obj, ["x", "regex"]), 10)
def test_can_get_no_attribute(self):
obj = Mock(x=10)
del obj.y
self.assertIsNone(get_object_attribute_from_filter(obj, ["y"]))
class AttributeMatchingTests(TestCase):
def test_exact_match(self):
self.assertTrue(attribute_matches_value(10, 10, ["height", "xy"]))
self.assertFalse(attribute_matches_value(10, 11, ["height", "xy"]))
def test_regex_match(self):
self.assertTrue(attribute_matches_value("jon", "jon|joe", ["name", "regex"]))
self.assertFalse(attribute_matches_value("jon", "jon|joe", ["name", "rogox"]))
def test_magic_method_match(self):
self.assertTrue(attribute_matches_value(12, 10, ["height", "gt"]))
self.assertFalse(attribute_matches_value(10, 10, ["height", "gt"]))
self.assertTrue(attribute_matches_value(10, 10, ["height", "gte"]))
class ObjectFilteringTests(TestCase):
@patch("atomium.base.get_object_from_filter")
@patch("atomium.base.get_object_attribute_from_filter")
@patch("atomium.base.attribute_matches_value")
@patch("atomium.base.StructureSet")
def test_can_filter_objects(self, mock_s, mock_match, mock_getat, mock_getob):
structures=[
Mock(x="A", y=1), Mock(x="B", y=3), Mock(x="B", y=3),
Mock(x="C", y=2), Mock(x="D", y=4), Mock(x="D", y=4)
]
objects = Mock(structures=structures)
mock_getob.side_effect = lambda s, c: s
mock_getat.side_effect = lambda s, c: c[0]
mock_match.side_effect = [False, True, False, True, False, False]
filter_objects(objects, "key__key2__key_3", "value")
for structure in structures:
mock_getob.assert_any_call(structure, ["key", "key2", "key_3"])
mock_getat.assert_any_call(structure, ["key", "key2", "key_3"])
mock_match.assert_any_call("key", "value", ["key", "key2", "key_3"])
mock_s.assert_called_with(structures[1], structures[3])
class QueryDecoratorTests(TestCase):
def setUp(self):
self.s = Mock(structures={2, 4, 6}, ids={1, 3, 5})
self.f = lambda s: self.s
def test_can_get_unfiltered_objects(self):
f = query(self.f)
self.assertEqual(f(self), {2, 4, 6})
@patch("atomium.base.filter_objects")
def test_can_get_filtered_objects(self, mock_filter):
mock_filter.side_effect = [Mock(structures={20}, ids={10})]
f = query(self.f)
self.assertEqual(f(self, a=1), {20})
mock_filter.assert_any_call(self.s, "a", 1)
@patch("atomium.base.filter_objects")
def test_can_get_filtered_objects_as_tuple(self, mock_filter):
mock_filter.side_effect = [Mock(structures={2}, ids={1})]
f = query(self.f, tuple_=True)
self.assertEqual(f(self, a=1), (2,))
mock_filter.assert_any_call(self.s, "a", 1)
def test_can_get_objects_by_id(self):
f = query(self.f)
self.assertEqual(f(self, 3), {self.s.get.return_value})
self.s.get.assert_called_with(3)
self.assertEqual(f(self, 8), set())
class GetOneDecoratorTests(TestCase):
def test_can_get_one(self):
f = lambda s: [4, 6, 7]
f = getone(f)
self.assertEqual(f(self), 4)
def test_can_get_mone(self):
f = lambda s: []
f = getone(f)
self.assertEqual(f(self), None)
class StructureClassMetaclassTests(TestCase):
@patch("atomium.base.query")
@patch("atomium.base.getone")
def test_structure_class_metaclass(self, mock_getone, mock_query):
class TestClass(metaclass=StructureClass):
def a(self): return 1000
def chains(self): return {1: 2, 3: 4}
def residues(self): return {10: 2, 30: 4}
def ligands(self): return {11: 2, 31: 4}
def waters(self): return {12: 2, 32: 4}
def molecules(self): return {13: 2, 33: 4}
def atoms(self): return {14: 2, 34: 4}
def b(self): return 2000
obj = TestClass()
self.assertIs(obj.chains, mock_query.return_value)
self.assertIs(obj.chain, mock_getone.return_value)
self.assertIs(obj.residues, mock_query.return_value)
self.assertIs(obj.residue, mock_getone.return_value)
self.assertIs(obj.ligands, mock_query.return_value)
self.assertIs(obj.ligand, mock_getone.return_value)
self.assertIs(obj.waters, mock_query.return_value)
self.assertIs(obj.water, mock_getone.return_value)
self.assertIs(obj.molecules, mock_query.return_value)
self.assertIs(obj.molecule, mock_getone.return_value)
self.assertIs(obj.atoms, mock_query.return_value)
self.assertIs(obj.atom, mock_getone.return_value)
self.assertEqual(obj.a(), 1000)
self.assertEqual(obj.b(), 2000)
class StructureSetTests(TestCase):
def test_can_make_structure_set(self):
objects = [Mock(_id=n) for n in range(5)]
s = StructureSet(*objects)
self.assertEqual(s._d, {
0: {objects[0]}, 1: {objects[1]}, 2: {objects[2]},
3: {objects[3]}, 4: {objects[4]}
})
objects[2]._id = 0
s = StructureSet(*objects)
self.assertEqual(s._d, {
0: {objects[0], objects[2]}, 1: {objects[1]},
3: {objects[3]}, 4: {objects[4]}
})
def test_can_add_two_structure_sets(self):
objects = [Mock(_id=n) for n in range(5)]
objects[2]._id = 0
s1 = StructureSet(*objects[:3])
s2 = StructureSet(*objects[3:])
self.assertEqual(s1._d, {
0: {objects[0], objects[2]}, 1: {objects[1]},
})
self.assertEqual(s2._d, {3: {objects[3]}, 4: {objects[4]}})
s3 = s1 + s2
self.assertEqual(s3._d, {
0: {objects[0], objects[2]}, 1: {objects[1]},
3: {objects[3]}, 4: {objects[4]}
})
def test_can_get_length_of_structure_sets(self):
objects = [Mock(_id=n) for n in range(5)]
s = StructureSet(*objects)
self.assertEqual(len(s), 5)
objects[2]._id = 0
s = StructureSet(*objects)
self.assertEqual(len(s), 5)
def test_can_get_structure_set_ids(self):
objects = [Mock(_id=n) for n in range(5)]
s = StructureSet(*objects)
self.assertEqual(s.ids, {0, 1, 2, 3, 4})
def test_can_get_structure_set_structures(self):
objects = [Mock(_id=n) for n in range(5)]
s = StructureSet(*objects)
self.assertEqual(s.structures, objects)
objects[2]._id = 0
s = StructureSet(*objects)
self.assertEqual(set(s.structures), set(objects))
def test_can_get_structures_by_id(self):
objects = [Mock(_id=n) for n in range(5)]
s = StructureSet(*objects)
self.assertEqual(s.get(0), objects[0])
self.assertEqual(s.get(4), objects[4])
self.assertEqual(s.get(5), None)
objects[2]._id = 0
s = StructureSet(*objects)
self.assertIn(s.get(0), (objects[0], objects[2]))
self.assertEqual(s.get(4), objects[4])
self.assertEqual(s.get(2), None) | mit |
dmnfarrell/peat | PEATSA/Tools/TDCycle.py | 1 | 6414 | #! /bin/env python
#
# Protein Engineering Analysis Tool Structure Analysis (PEATSA)
# Copyright (C) 2010 Michael Johnston & Jens Erik Nielsen
#
# Author: Michael Johnston
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Contact information:
# Email: Jens.Nielsen_at_gmail.com
# Normal mail:
# Jens Nielsen
# SBBS, Conway Institute
# University College Dublin
# Dublin 4, Ireland
#
#
# Five different types of free-energy differences can be specified.
# A. Stability - ΔGstability(A) = G(Fold A) - G(Unfold A)
# B. Binding - ΔGbinding(AB) = G(AB) - G(Bound A + Ligand)
# C. Interaction - Protein-Protein interaction: ΔGpp(A + B -> AB) = ΔGstability(AB) - ΔGstability(A) - ΔGstability(B)
# D. BindingWithChange - ΔGbwc(A + B -> AB) = ΔGbinding(AB) + ΔGstability(A Bound Conf) - ΔGstability(A Unbound Conf)
# F. Mutation - ΔG(WT -> M) = ΔGstability(M) - ΔGstability(WT) + G(WT Unfold -> M unfold)
# However the last term is assumed to be zero. Also this does not work with bound ligand states.
# Each difference can be calculated for the WT and any number of mutants.
# This allows WT and Mutant cycles to be compared.
#! /bin/env python
import Environment, Utilities
from ProteinDesignTool import ProteinDesignTool
def InteractionFreeEnergy(structureA, structureB, complex, mutantListA, mutantListB);
'''Calculates the free-energy difference between two structures and their complex.
ΔGstability(AB) - ΔGstability(A) - ΔGstability(B) '''
proteinDesignTool.dataDirectory = interactionData
#ΔGstability(AB)
proteinDesignTool.setPDB(complex)
proteinDesignTool.runStabilityCalculation(mutantFiles=mutantFiles, resultsName='StabilityAB')
proteinDesignTool.setPDB(structureA)
proteinDesignTool.runStabilityCalculation(mutantFiles=mutantFiles, resultsName='StabilityA')
proteinDesignTool.setPDB(structureB)
proteinDesignTool.runStabilityCalculation(mutantFiles=mutantFiles, resultsName='StabilityB')
#Totals for WT and each mutant
for mutation in mutations:
ab = interactionData.stabilityAB.dataForMutation[mutation]
a = interactionData.stabilityA.dataForMutation[mutation]
b = interactionData.stabilityB.dataForMutation[mutation]
#Compute the resulting totals
result = ab - a - b
#BindingWithChange - ΔGbwc(A + B -> AB) = (ΔΔGstability(AU->AB) + ( ΔGstability(H) - ΔGstability(AU))
def BindingWithChange(holoStructure, apoStructure, ligand, mutantions)
#
#ΔGstability(Unbound Conf)
#
#Pass the HOLO pdb file to the ProteinDesignTool instance.
#This cleans the pdb and copies it to the working directory
proteinDesignTool.setPDB(pdb=holoStructure, dataDirectoryName='BindingWithChange')
resultsData = proteinDesignTool.dataDirectory
#Create the mutants - NOTE: Must use cleaned pdb file
mutantCollection = Data.MutantCollection(pdbFile=proteinDesignTool.pdbFile,
mutationList=mutations,
location=proteinDesignTool.outputDirectory,
maxOverlap=proteinDesignTool.configuration.getfloat('PKA SCAN PARAMETERS', 'mutation_quality'),
clean=False)
#Run the calculation
proteinDesignTool.runStabilityCalculation(mutantFiles=mutantCollection.mutantFiles(),
resultsName='StabilityHolo')
#
#ΔGstability(Bound Conf - No Ligand)
#
#Pass the APO pdb file to the tool. NOTE: The ligand is removed here during the cleaning process
#Then create mutants with the ligand in place
proteinDesignTool.setPDB(pdb=apoStructure)
mutantCollection = Data.MutantCollection(pdbFile=proteinDesignTool.pdbFile,
mutationList=mutations,
ligandFiles=[ligand],
location=proteinDesignTool.outputDirectory,
maxOverlap=proteinDesignTool.configuration.getfloat('PKA SCAN PARAMETERS', 'mutation_quality'),
clean=False)
proteinDesignTool.runStabilityCalculation(mutantFiles=mutantCollection.mutantFiles(), resultsName='StabilityApoUnbound')
#ΔGstability(Bound Conf - With Ligand)
#Pass the APO pdb to the tool this time keeping the ligand.
#NOTE: The mutant files must also contain the ligand coordinates
proteinDesignTool.setPDB(apoStructure, removeLigand=False)
proteinDesignTool.runStabilityCalculation(mutantFiles=mutantCollection.mutationFiles(), resultsName='StabilityApoBound')
#Do the sum FreeEnergy = ΔGStability(AB) + ΔGstability(A Bound Conf) - ΔGstability(A Unbound Conf)
results = []
headers = ['Mutation', 'DeltaBind', 'DeltaDeltaApoBoundStability', 'DeltaDeltaApoUnboundStability', 'DeltaDeltaHoloStability']
for i in range(len(resultsData.stabilityHolo.numberOfRows())):
data = [resultsData.stabilityApoBound.total[i], resultsData.stabilityApoUnbound.total[i], resultsData.stabilityHolo.total[i]]
freeEnergy = data[0] + data[1] - data[2]
staticEnergy =
data.insert(0, freeEnergy)
data.insert(0, resultsData.stabilityApoBound.mutation[i])
results.append(data)
matrix = Data.PEATSAMatrix(rows=results, headers=headers, name='DeltaBinding')
resultsData.addMatrix(matrix, name='DeltaBinding')
def main():
'''Main function for the TDCycle program.
Parses the command line and starts the jobs requested'''
environment = Environment.Environment()
#Read arguments
parser = Utilities.CommandLineParser()
parser.parseCommandLine()
if parser.helpRequested() is True:
print __doc__
sys.exit(0)
if parser.createConfigurationRequested() is True:
print 'Creating default configuration file, proteinDesignTool.conf, in the current directory'
print 'See the programs README file for information on the options'
Environment.Configuration(searchDefaultLocations=False, writeFile=True)
sys.exit(0)
#Create the ProteinDesignTool instance
tool = ProteinDesignTool(parser.configurationFile(),
workingDirectory=parser.workingDirectory(),
pdbFile=parser.pdbFile(),
outputDirectory=parser.outputDirectory())
| mit |
msimet/Stile | stile/stile_utils.py | 1 | 7951 | """
stile_utils.py: Various utilities for the Stile pipeline. Includes input parsing and some
numerical helper functions.
"""
import numpy
def Parser():
"""
Returns an argparse Parser object with input args used by Stile and TreeCorr.
"""
from . import treecorr_utils
import argparse
p = argparse.Parser(parent=treecorr_utils.Parser())
#TODO: add, obviously, EVERYTHING ELSE
return p
def FormatArray(d, fields=None):
"""
Turn a regular NumPy array of arbitrary types into a formatted array, with optional field name
description.
This function uses the existing dtype of the array ``d``. This means that arrays of
heterogeneous objects may not return the dtype you expect (for example, ints will be
converted to floats if there are floats in the array, or all numbers will be converted to
strings if there are any strings in the array). Predefining the format or using a function like
:func:`numpy.genfromtxt` will prevent these issues, as will reading from a FITS file.
:param d: A NumPy array.
:param fields: A dictionary whose keys are the names of the fields you'd like for the output
array, and whose values are field numbers (starting with 0) whose names those
keys should replace (or, if the array is already formatted, the existing field
names the keys should replace); alternately, a list with the same length as the
rows of ``d``. [default: None]
:returns: A formatted numpy array with the same shape as ``d`` except that the innermost
dimension has turned into a record field if it was not already one, optionally
with field names appropriately replaced.
"""
# We want arrays to be numpy.arrays with field access (so we can say d['ra'] or something like
# that). In order for these to be created correctly, two conditions have to be met:
# - The array needs to be initialized with the innermost dimension as a tuple rather than
# a list or other array, so NumPy knows that it's a record field;
# - The array has to be created with a dtype that indicates there are multiple fields. For
# convenience I'm going to do this as a single string of the form '?,?,?[...]' where each
# question mark is a single character (plus optional width for strings/voids) denoting what
# kind of data to expect. (This is the "array-protocol type string", see
# http://docs.scipy.org/doc/numpy/reference/arrays.dtypes.html)
if not hasattr(d, 'dtype'):
# If it's not an array, make it one.
d = numpy.array(d)
if not d.dtype.names:
# If it is an array, but doesn't have a "names" attribute, that means it doesn't have
# records/fields. So we need to reformat the array. Given the difficulty of generating
# an individual dtype for each field, we'll just use the dtype of the overall array for
# every entry, which involves no casting of types.
d_shape = d.shape
if len(d_shape) == 1: # Assume this was a single row (not a set of 1-column rows)
d = numpy.array([d])
d_shape = d.shape
# Cast this into a 2-d array
new_d = d.reshape(-1, d_shape[-1])
# Generate the dtype string
if isinstance(d.dtype, str):
dtype = ','.join([d.dtype]*len(d[0]))
else:
dtype_char = d.dtype.char
if dtype_char == 'S' or dtype_char == 'O' or dtype_char == 'V' or dtype_char == 'U':
dtype = ','.join([d.dtype.str]*len(new_d[0])) # need the width as well as the char
else:
dtype = ','.join([dtype_char]*len(new_d[0]))
# Make a new array with each row turned into a tuple and the correct dtype
d = numpy.array([tuple(nd) for nd in new_d], dtype=dtype)
if len(d_shape) > 1:
# If this was a more-than-2d array, reshape it back to that original form, minus the
# dimension we turned into a record (which will no longer appear in the shape).
d = d.reshape(d_shape[:-1])
if fields:
# If the "fields" parameter was set, rewrite the numpy.dtype.names attribute to be the
# field specification we want.
if isinstance(fields, dict):
names = list(d.dtype.names)
for key in fields:
names[fields[key]] = key
d.dtype.names = names
elif len(fields) == len(d.dtype.names):
d.dtype.names = fields
else:
raise RuntimeError('Cannot use given fields: '+str(fields))
return d
class Stats:
"""A Stats object can carry around and output the statistics of some array.
Currently it can carry around two types of statistics:
(1) Basic array statistics: typically one would use length (N), min, max, median, mean, standard
deviation (stddev), variance, median absolute deviation ('mad') as defined using the
``simple_stats`` option at initialization.
(2) Percentiles: the value at a given percentile level.
The :class:`StatSysTest <stile.sys_tests.StatSysTest>` class can be used to create and populate
values for one of these objects. If you want to change the list of simple statistics, it's
only necessary to change the code there, not here.
"""
def __init__(self, simple_stats):
self.simple_stats = simple_stats
for stat in self.simple_stats:
init_str = 'self.' + stat + '=None'
exec(init_str)
self.percentiles = None
self.values = None
def __str__(self):
"""This routine will print the contents of the ``Stats`` object in a nice format.
We assume that the ``Stats`` object was created by a :class:`StatSysTest`, so that certain
sanity checks have already been done (e.g., self.percentiles, if not None, is iterable)."""
# Preamble:
ret_str = 'Summary statistics:\n'
# Loop over simple statistics and print them, if not None. Generically if one is None then
# all will be, so just check one.
test_str = "test_val = self."+("%s"%self.simple_stats[0])
exec(test_str)
if test_val is not None:
for stat in self.simple_stats:
this_string = 'this_val = self.'+stat
exec(this_string)
ret_str += '\t%s: %f\n'%(stat, this_val)
ret_str += '\n'
# Loop over combinations of percentiles and values, and print them.
if self.percentiles is not None:
ret_str += 'Below are lists of (percentile, value) combinations:\n'
for index in range(len(self.percentiles)):
ret_str += '\t%f %f\n'%(self.percentiles[index], self.values[index])
return ret_str
fieldNames = {
'dec': 'the declination of the object',
'ra': 'the RA of the object',
'x': 'the x coordinate of the object',
'y': 'the y coordinate of the object',
'g1': 'g1, a shear component in the ra direction',
'g2': 'g2, a shear component 45 degrees from the ra direction',
'sigma': 'a size parameter for objects with dimension [length] in arbitrary units',
'psf_g1': 'the g1 of the psf at the location of this object',
'psf_g2': 'the g2 of the psf at the location of this object',
'psf_sigma': 'the sigma of the psf at the location of this object',
'w': 'the weight to apply per object',
'z': 'the redshift of the object'}
objectNames = {
'galaxy': 'galaxy data',
'star': 'star data',
'galaxy lens': 'galaxies to be used as lenses in galaxy-galaxy lensing',
'star PSF': 'stars used in PSF determination',
'star bright': 'especially bright stars',
'galaxy random': 'random catalog corresponding to the "galaxy" sample',
'star random': 'random catalog corresponding to the "star" sample'
}
| bsd-3-clause |
raymondgom/pmip6ns3.13new | src/mpi/bindings/modulegen__gcc_ILP32.py | 34 | 178173 | from pybindgen import Module, FileCodeSink, param, retval, cppclass, typehandlers
import pybindgen.settings
import warnings
class ErrorHandler(pybindgen.settings.ErrorHandler):
def handle_error(self, wrapper, exception, traceback_):
warnings.warn("exception %r in wrapper %s" % (exception, wrapper))
return True
pybindgen.settings.error_handler = ErrorHandler()
import sys
def module_init():
root_module = Module('ns.mpi', cpp_namespace='::ns3')
return root_module
def register_types(module):
root_module = module.get_root()
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList [class]
module.add_class('AttributeConstructionList', import_from_module='ns.core')
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item [struct]
module.add_class('Item', import_from_module='ns.core', outer_class=root_module['ns3::AttributeConstructionList'])
## buffer.h (module 'network'): ns3::Buffer [class]
module.add_class('Buffer', import_from_module='ns.network')
## buffer.h (module 'network'): ns3::Buffer::Iterator [class]
module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::Buffer'])
## packet.h (module 'network'): ns3::ByteTagIterator [class]
module.add_class('ByteTagIterator', import_from_module='ns.network')
## packet.h (module 'network'): ns3::ByteTagIterator::Item [class]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagIterator'])
## byte-tag-list.h (module 'network'): ns3::ByteTagList [class]
module.add_class('ByteTagList', import_from_module='ns.network')
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator [class]
module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList'])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item [struct]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList::Iterator'])
## callback.h (module 'core'): ns3::CallbackBase [class]
module.add_class('CallbackBase', import_from_module='ns.core')
## event-id.h (module 'core'): ns3::EventId [class]
module.add_class('EventId', import_from_module='ns.core')
## distributed-simulator-impl.h (module 'mpi'): ns3::LbtsMessage [class]
module.add_class('LbtsMessage')
## mpi-interface.h (module 'mpi'): ns3::MpiInterface [class]
module.add_class('MpiInterface')
## object-base.h (module 'core'): ns3::ObjectBase [class]
module.add_class('ObjectBase', allow_subclassing=True, import_from_module='ns.core')
## object.h (module 'core'): ns3::ObjectDeleter [struct]
module.add_class('ObjectDeleter', import_from_module='ns.core')
## object-factory.h (module 'core'): ns3::ObjectFactory [class]
module.add_class('ObjectFactory', import_from_module='ns.core')
## packet-metadata.h (module 'network'): ns3::PacketMetadata [class]
module.add_class('PacketMetadata', import_from_module='ns.network')
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item [struct]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata'])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item [enumeration]
module.add_enum('', ['PAYLOAD', 'HEADER', 'TRAILER'], outer_class=root_module['ns3::PacketMetadata::Item'], import_from_module='ns.network')
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator [class]
module.add_class('ItemIterator', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata'])
## packet.h (module 'network'): ns3::PacketTagIterator [class]
module.add_class('PacketTagIterator', import_from_module='ns.network')
## packet.h (module 'network'): ns3::PacketTagIterator::Item [class]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagIterator'])
## packet-tag-list.h (module 'network'): ns3::PacketTagList [class]
module.add_class('PacketTagList', import_from_module='ns.network')
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData [struct]
module.add_class('TagData', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagList'])
## mpi-interface.h (module 'mpi'): ns3::SentBuffer [class]
module.add_class('SentBuffer')
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Object', 'ns3::ObjectBase', 'ns3::ObjectDeleter'], parent=root_module['ns3::ObjectBase'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## tag.h (module 'network'): ns3::Tag [class]
module.add_class('Tag', import_from_module='ns.network', parent=root_module['ns3::ObjectBase'])
## tag-buffer.h (module 'network'): ns3::TagBuffer [class]
module.add_class('TagBuffer', import_from_module='ns.network')
## type-id.h (module 'core'): ns3::TypeId [class]
module.add_class('TypeId', import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::AttributeFlag [enumeration]
module.add_enum('AttributeFlag', ['ATTR_GET', 'ATTR_SET', 'ATTR_CONSTRUCT', 'ATTR_SGC'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation [struct]
module.add_class('AttributeInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation [struct]
module.add_class('TraceSourceInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
## empty.h (module 'core'): ns3::empty [class]
module.add_class('empty', import_from_module='ns.core')
## int64x64-double.h (module 'core'): ns3::int64x64_t [class]
module.add_class('int64x64_t', import_from_module='ns.core')
## chunk.h (module 'network'): ns3::Chunk [class]
module.add_class('Chunk', import_from_module='ns.network', parent=root_module['ns3::ObjectBase'])
## header.h (module 'network'): ns3::Header [class]
module.add_class('Header', import_from_module='ns.network', parent=root_module['ns3::Chunk'])
## object.h (module 'core'): ns3::Object [class]
module.add_class('Object', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
## object.h (module 'core'): ns3::Object::AggregateIterator [class]
module.add_class('AggregateIterator', import_from_module='ns.core', outer_class=root_module['ns3::Object'])
## scheduler.h (module 'core'): ns3::Scheduler [class]
module.add_class('Scheduler', import_from_module='ns.core', parent=root_module['ns3::Object'])
## scheduler.h (module 'core'): ns3::Scheduler::Event [struct]
module.add_class('Event', import_from_module='ns.core', outer_class=root_module['ns3::Scheduler'])
## scheduler.h (module 'core'): ns3::Scheduler::EventKey [struct]
module.add_class('EventKey', import_from_module='ns.core', outer_class=root_module['ns3::Scheduler'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeChecker', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeChecker>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeValue', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeValue>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::CallbackImplBase', 'ns3::empty', 'ns3::DefaultDeleter<ns3::CallbackImplBase>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::EventImpl', 'ns3::empty', 'ns3::DefaultDeleter<ns3::EventImpl>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::NixVector', 'ns3::empty', 'ns3::DefaultDeleter<ns3::NixVector>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Packet', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Packet>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::TraceSourceAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::TraceSourceAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simulator-impl.h (module 'core'): ns3::SimulatorImpl [class]
module.add_class('SimulatorImpl', import_from_module='ns.core', parent=root_module['ns3::Object'])
## nstime.h (module 'core'): ns3::Time [class]
module.add_class('Time', import_from_module='ns.core')
## nstime.h (module 'core'): ns3::Time::Unit [enumeration]
module.add_enum('Unit', ['S', 'MS', 'US', 'NS', 'PS', 'FS', 'LAST'], outer_class=root_module['ns3::Time'], import_from_module='ns.core')
## nstime.h (module 'core'): ns3::Time [class]
root_module['ns3::Time'].implicitly_converts_to(root_module['ns3::int64x64_t'])
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor [class]
module.add_class('TraceSourceAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
## trailer.h (module 'network'): ns3::Trailer [class]
module.add_class('Trailer', import_from_module='ns.network', parent=root_module['ns3::Chunk'])
## attribute.h (module 'core'): ns3::AttributeAccessor [class]
module.add_class('AttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
## attribute.h (module 'core'): ns3::AttributeChecker [class]
module.add_class('AttributeChecker', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
## attribute.h (module 'core'): ns3::AttributeValue [class]
module.add_class('AttributeValue', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
## callback.h (module 'core'): ns3::CallbackChecker [class]
module.add_class('CallbackChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## callback.h (module 'core'): ns3::CallbackImplBase [class]
module.add_class('CallbackImplBase', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
## callback.h (module 'core'): ns3::CallbackValue [class]
module.add_class('CallbackValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## distributed-simulator-impl.h (module 'mpi'): ns3::DistributedSimulatorImpl [class]
module.add_class('DistributedSimulatorImpl', parent=root_module['ns3::SimulatorImpl'])
## attribute.h (module 'core'): ns3::EmptyAttributeValue [class]
module.add_class('EmptyAttributeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## event-impl.h (module 'core'): ns3::EventImpl [class]
module.add_class('EventImpl', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >'])
## mpi-receiver.h (module 'mpi'): ns3::MpiReceiver [class]
module.add_class('MpiReceiver', parent=root_module['ns3::Object'])
## nix-vector.h (module 'network'): ns3::NixVector [class]
module.add_class('NixVector', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >'])
## object-factory.h (module 'core'): ns3::ObjectFactoryChecker [class]
module.add_class('ObjectFactoryChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## object-factory.h (module 'core'): ns3::ObjectFactoryValue [class]
module.add_class('ObjectFactoryValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## packet.h (module 'network'): ns3::Packet [class]
module.add_class('Packet', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >'])
## nstime.h (module 'core'): ns3::TimeChecker [class]
module.add_class('TimeChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## nstime.h (module 'core'): ns3::TimeValue [class]
module.add_class('TimeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## type-id.h (module 'core'): ns3::TypeIdChecker [class]
module.add_class('TypeIdChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## type-id.h (module 'core'): ns3::TypeIdValue [class]
module.add_class('TypeIdValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## Register a nested module for the namespace FatalImpl
nested_module = module.add_cpp_namespace('FatalImpl')
register_types_ns3_FatalImpl(nested_module)
def register_types_ns3_FatalImpl(module):
root_module = module.get_root()
def register_methods(root_module):
register_Ns3AttributeConstructionList_methods(root_module, root_module['ns3::AttributeConstructionList'])
register_Ns3AttributeConstructionListItem_methods(root_module, root_module['ns3::AttributeConstructionList::Item'])
register_Ns3Buffer_methods(root_module, root_module['ns3::Buffer'])
register_Ns3BufferIterator_methods(root_module, root_module['ns3::Buffer::Iterator'])
register_Ns3ByteTagIterator_methods(root_module, root_module['ns3::ByteTagIterator'])
register_Ns3ByteTagIteratorItem_methods(root_module, root_module['ns3::ByteTagIterator::Item'])
register_Ns3ByteTagList_methods(root_module, root_module['ns3::ByteTagList'])
register_Ns3ByteTagListIterator_methods(root_module, root_module['ns3::ByteTagList::Iterator'])
register_Ns3ByteTagListIteratorItem_methods(root_module, root_module['ns3::ByteTagList::Iterator::Item'])
register_Ns3CallbackBase_methods(root_module, root_module['ns3::CallbackBase'])
register_Ns3EventId_methods(root_module, root_module['ns3::EventId'])
register_Ns3LbtsMessage_methods(root_module, root_module['ns3::LbtsMessage'])
register_Ns3MpiInterface_methods(root_module, root_module['ns3::MpiInterface'])
register_Ns3ObjectBase_methods(root_module, root_module['ns3::ObjectBase'])
register_Ns3ObjectDeleter_methods(root_module, root_module['ns3::ObjectDeleter'])
register_Ns3ObjectFactory_methods(root_module, root_module['ns3::ObjectFactory'])
register_Ns3PacketMetadata_methods(root_module, root_module['ns3::PacketMetadata'])
register_Ns3PacketMetadataItem_methods(root_module, root_module['ns3::PacketMetadata::Item'])
register_Ns3PacketMetadataItemIterator_methods(root_module, root_module['ns3::PacketMetadata::ItemIterator'])
register_Ns3PacketTagIterator_methods(root_module, root_module['ns3::PacketTagIterator'])
register_Ns3PacketTagIteratorItem_methods(root_module, root_module['ns3::PacketTagIterator::Item'])
register_Ns3PacketTagList_methods(root_module, root_module['ns3::PacketTagList'])
register_Ns3PacketTagListTagData_methods(root_module, root_module['ns3::PacketTagList::TagData'])
register_Ns3SentBuffer_methods(root_module, root_module['ns3::SentBuffer'])
register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
register_Ns3Tag_methods(root_module, root_module['ns3::Tag'])
register_Ns3TagBuffer_methods(root_module, root_module['ns3::TagBuffer'])
register_Ns3TypeId_methods(root_module, root_module['ns3::TypeId'])
register_Ns3TypeIdAttributeInformation_methods(root_module, root_module['ns3::TypeId::AttributeInformation'])
register_Ns3TypeIdTraceSourceInformation_methods(root_module, root_module['ns3::TypeId::TraceSourceInformation'])
register_Ns3Empty_methods(root_module, root_module['ns3::empty'])
register_Ns3Int64x64_t_methods(root_module, root_module['ns3::int64x64_t'])
register_Ns3Chunk_methods(root_module, root_module['ns3::Chunk'])
register_Ns3Header_methods(root_module, root_module['ns3::Header'])
register_Ns3Object_methods(root_module, root_module['ns3::Object'])
register_Ns3ObjectAggregateIterator_methods(root_module, root_module['ns3::Object::AggregateIterator'])
register_Ns3Scheduler_methods(root_module, root_module['ns3::Scheduler'])
register_Ns3SchedulerEvent_methods(root_module, root_module['ns3::Scheduler::Event'])
register_Ns3SchedulerEventKey_methods(root_module, root_module['ns3::Scheduler::EventKey'])
register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
register_Ns3SimpleRefCount__Ns3EventImpl_Ns3Empty_Ns3DefaultDeleter__lt__ns3EventImpl__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >'])
register_Ns3SimpleRefCount__Ns3NixVector_Ns3Empty_Ns3DefaultDeleter__lt__ns3NixVector__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >'])
register_Ns3SimpleRefCount__Ns3Packet_Ns3Empty_Ns3DefaultDeleter__lt__ns3Packet__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >'])
register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
register_Ns3SimulatorImpl_methods(root_module, root_module['ns3::SimulatorImpl'])
register_Ns3Time_methods(root_module, root_module['ns3::Time'])
register_Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::TraceSourceAccessor'])
register_Ns3Trailer_methods(root_module, root_module['ns3::Trailer'])
register_Ns3AttributeAccessor_methods(root_module, root_module['ns3::AttributeAccessor'])
register_Ns3AttributeChecker_methods(root_module, root_module['ns3::AttributeChecker'])
register_Ns3AttributeValue_methods(root_module, root_module['ns3::AttributeValue'])
register_Ns3CallbackChecker_methods(root_module, root_module['ns3::CallbackChecker'])
register_Ns3CallbackImplBase_methods(root_module, root_module['ns3::CallbackImplBase'])
register_Ns3CallbackValue_methods(root_module, root_module['ns3::CallbackValue'])
register_Ns3DistributedSimulatorImpl_methods(root_module, root_module['ns3::DistributedSimulatorImpl'])
register_Ns3EmptyAttributeValue_methods(root_module, root_module['ns3::EmptyAttributeValue'])
register_Ns3EventImpl_methods(root_module, root_module['ns3::EventImpl'])
register_Ns3MpiReceiver_methods(root_module, root_module['ns3::MpiReceiver'])
register_Ns3NixVector_methods(root_module, root_module['ns3::NixVector'])
register_Ns3ObjectFactoryChecker_methods(root_module, root_module['ns3::ObjectFactoryChecker'])
register_Ns3ObjectFactoryValue_methods(root_module, root_module['ns3::ObjectFactoryValue'])
register_Ns3Packet_methods(root_module, root_module['ns3::Packet'])
register_Ns3TimeChecker_methods(root_module, root_module['ns3::TimeChecker'])
register_Ns3TimeValue_methods(root_module, root_module['ns3::TimeValue'])
register_Ns3TypeIdChecker_methods(root_module, root_module['ns3::TypeIdChecker'])
register_Ns3TypeIdValue_methods(root_module, root_module['ns3::TypeIdValue'])
return
def register_Ns3AttributeConstructionList_methods(root_module, cls):
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList(ns3::AttributeConstructionList const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeConstructionList const &', 'arg0')])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList() [constructor]
cls.add_constructor([])
## attribute-construction-list.h (module 'core'): void ns3::AttributeConstructionList::Add(std::string name, ns3::Ptr<ns3::AttributeChecker const> checker, ns3::Ptr<ns3::AttributeValue> value) [member function]
cls.add_method('Add',
'void',
[param('std::string', 'name'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::Ptr< ns3::AttributeValue >', 'value')])
## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::Begin() const [member function]
cls.add_method('Begin',
'std::_List_const_iterator< ns3::AttributeConstructionList::Item >',
[],
is_const=True)
## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::End() const [member function]
cls.add_method('End',
'std::_List_const_iterator< ns3::AttributeConstructionList::Item >',
[],
is_const=True)
## attribute-construction-list.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeConstructionList::Find(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('Find',
'ns3::Ptr< ns3::AttributeValue >',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True)
return
def register_Ns3AttributeConstructionListItem_methods(root_module, cls):
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item() [constructor]
cls.add_constructor([])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item(ns3::AttributeConstructionList::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeConstructionList::Item const &', 'arg0')])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::checker [variable]
cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False)
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::value [variable]
cls.add_instance_attribute('value', 'ns3::Ptr< ns3::AttributeValue >', is_const=False)
return
def register_Ns3Buffer_methods(root_module, cls):
## buffer.h (module 'network'): ns3::Buffer::Buffer() [constructor]
cls.add_constructor([])
## buffer.h (module 'network'): ns3::Buffer::Buffer(uint32_t dataSize) [constructor]
cls.add_constructor([param('uint32_t', 'dataSize')])
## buffer.h (module 'network'): ns3::Buffer::Buffer(uint32_t dataSize, bool initialize) [constructor]
cls.add_constructor([param('uint32_t', 'dataSize'), param('bool', 'initialize')])
## buffer.h (module 'network'): ns3::Buffer::Buffer(ns3::Buffer const & o) [copy constructor]
cls.add_constructor([param('ns3::Buffer const &', 'o')])
## buffer.h (module 'network'): bool ns3::Buffer::AddAtEnd(uint32_t end) [member function]
cls.add_method('AddAtEnd',
'bool',
[param('uint32_t', 'end')])
## buffer.h (module 'network'): void ns3::Buffer::AddAtEnd(ns3::Buffer const & o) [member function]
cls.add_method('AddAtEnd',
'void',
[param('ns3::Buffer const &', 'o')])
## buffer.h (module 'network'): bool ns3::Buffer::AddAtStart(uint32_t start) [member function]
cls.add_method('AddAtStart',
'bool',
[param('uint32_t', 'start')])
## buffer.h (module 'network'): ns3::Buffer::Iterator ns3::Buffer::Begin() const [member function]
cls.add_method('Begin',
'ns3::Buffer::Iterator',
[],
is_const=True)
## buffer.h (module 'network'): void ns3::Buffer::CopyData(std::ostream * os, uint32_t size) const [member function]
cls.add_method('CopyData',
'void',
[param('std::ostream *', 'os'), param('uint32_t', 'size')],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::CopyData(uint8_t * buffer, uint32_t size) const [member function]
cls.add_method('CopyData',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')],
is_const=True)
## buffer.h (module 'network'): ns3::Buffer ns3::Buffer::CreateFragment(uint32_t start, uint32_t length) const [member function]
cls.add_method('CreateFragment',
'ns3::Buffer',
[param('uint32_t', 'start'), param('uint32_t', 'length')],
is_const=True)
## buffer.h (module 'network'): ns3::Buffer ns3::Buffer::CreateFullCopy() const [member function]
cls.add_method('CreateFullCopy',
'ns3::Buffer',
[],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::Deserialize(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## buffer.h (module 'network'): ns3::Buffer::Iterator ns3::Buffer::End() const [member function]
cls.add_method('End',
'ns3::Buffer::Iterator',
[],
is_const=True)
## buffer.h (module 'network'): int32_t ns3::Buffer::GetCurrentEndOffset() const [member function]
cls.add_method('GetCurrentEndOffset',
'int32_t',
[],
is_const=True)
## buffer.h (module 'network'): int32_t ns3::Buffer::GetCurrentStartOffset() const [member function]
cls.add_method('GetCurrentStartOffset',
'int32_t',
[],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::GetSize() const [member function]
cls.add_method('GetSize',
'uint32_t',
[],
is_const=True)
## buffer.h (module 'network'): uint8_t const * ns3::Buffer::PeekData() const [member function]
cls.add_method('PeekData',
'uint8_t const *',
[],
is_const=True)
## buffer.h (module 'network'): void ns3::Buffer::RemoveAtEnd(uint32_t end) [member function]
cls.add_method('RemoveAtEnd',
'void',
[param('uint32_t', 'end')])
## buffer.h (module 'network'): void ns3::Buffer::RemoveAtStart(uint32_t start) [member function]
cls.add_method('RemoveAtStart',
'void',
[param('uint32_t', 'start')])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
return
def register_Ns3BufferIterator_methods(root_module, cls):
## buffer.h (module 'network'): ns3::Buffer::Iterator::Iterator(ns3::Buffer::Iterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Buffer::Iterator const &', 'arg0')])
## buffer.h (module 'network'): ns3::Buffer::Iterator::Iterator() [constructor]
cls.add_constructor([])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::CalculateIpChecksum(uint16_t size) [member function]
cls.add_method('CalculateIpChecksum',
'uint16_t',
[param('uint16_t', 'size')])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::CalculateIpChecksum(uint16_t size, uint32_t initialChecksum) [member function]
cls.add_method('CalculateIpChecksum',
'uint16_t',
[param('uint16_t', 'size'), param('uint32_t', 'initialChecksum')])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::GetDistanceFrom(ns3::Buffer::Iterator const & o) const [member function]
cls.add_method('GetDistanceFrom',
'uint32_t',
[param('ns3::Buffer::Iterator const &', 'o')],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::GetSize() const [member function]
cls.add_method('GetSize',
'uint32_t',
[],
is_const=True)
## buffer.h (module 'network'): bool ns3::Buffer::Iterator::IsEnd() const [member function]
cls.add_method('IsEnd',
'bool',
[],
is_const=True)
## buffer.h (module 'network'): bool ns3::Buffer::Iterator::IsStart() const [member function]
cls.add_method('IsStart',
'bool',
[],
is_const=True)
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Next() [member function]
cls.add_method('Next',
'void',
[])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Next(uint32_t delta) [member function]
cls.add_method('Next',
'void',
[param('uint32_t', 'delta')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Prev() [member function]
cls.add_method('Prev',
'void',
[])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Prev(uint32_t delta) [member function]
cls.add_method('Prev',
'void',
[param('uint32_t', 'delta')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Read(uint8_t * buffer, uint32_t size) [member function]
cls.add_method('Read',
'void',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadLsbtohU16() [member function]
cls.add_method('ReadLsbtohU16',
'uint16_t',
[])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadLsbtohU32() [member function]
cls.add_method('ReadLsbtohU32',
'uint32_t',
[])
## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadLsbtohU64() [member function]
cls.add_method('ReadLsbtohU64',
'uint64_t',
[])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadNtohU16() [member function]
cls.add_method('ReadNtohU16',
'uint16_t',
[])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadNtohU32() [member function]
cls.add_method('ReadNtohU32',
'uint32_t',
[])
## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadNtohU64() [member function]
cls.add_method('ReadNtohU64',
'uint64_t',
[])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadU16() [member function]
cls.add_method('ReadU16',
'uint16_t',
[])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadU32() [member function]
cls.add_method('ReadU32',
'uint32_t',
[])
## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadU64() [member function]
cls.add_method('ReadU64',
'uint64_t',
[])
## buffer.h (module 'network'): uint8_t ns3::Buffer::Iterator::ReadU8() [member function]
cls.add_method('ReadU8',
'uint8_t',
[])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Write(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Write',
'void',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Write(ns3::Buffer::Iterator start, ns3::Buffer::Iterator end) [member function]
cls.add_method('Write',
'void',
[param('ns3::Buffer::Iterator', 'start'), param('ns3::Buffer::Iterator', 'end')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU16(uint16_t data) [member function]
cls.add_method('WriteHtolsbU16',
'void',
[param('uint16_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU32(uint32_t data) [member function]
cls.add_method('WriteHtolsbU32',
'void',
[param('uint32_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU64(uint64_t data) [member function]
cls.add_method('WriteHtolsbU64',
'void',
[param('uint64_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU16(uint16_t data) [member function]
cls.add_method('WriteHtonU16',
'void',
[param('uint16_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU32(uint32_t data) [member function]
cls.add_method('WriteHtonU32',
'void',
[param('uint32_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU64(uint64_t data) [member function]
cls.add_method('WriteHtonU64',
'void',
[param('uint64_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU16(uint16_t data) [member function]
cls.add_method('WriteU16',
'void',
[param('uint16_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU32(uint32_t data) [member function]
cls.add_method('WriteU32',
'void',
[param('uint32_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU64(uint64_t data) [member function]
cls.add_method('WriteU64',
'void',
[param('uint64_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU8(uint8_t data) [member function]
cls.add_method('WriteU8',
'void',
[param('uint8_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU8(uint8_t data, uint32_t len) [member function]
cls.add_method('WriteU8',
'void',
[param('uint8_t', 'data'), param('uint32_t', 'len')])
return
def register_Ns3ByteTagIterator_methods(root_module, cls):
## packet.h (module 'network'): ns3::ByteTagIterator::ByteTagIterator(ns3::ByteTagIterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ByteTagIterator const &', 'arg0')])
## packet.h (module 'network'): bool ns3::ByteTagIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## packet.h (module 'network'): ns3::ByteTagIterator::Item ns3::ByteTagIterator::Next() [member function]
cls.add_method('Next',
'ns3::ByteTagIterator::Item',
[])
return
def register_Ns3ByteTagIteratorItem_methods(root_module, cls):
## packet.h (module 'network'): ns3::ByteTagIterator::Item::Item(ns3::ByteTagIterator::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ByteTagIterator::Item const &', 'arg0')])
## packet.h (module 'network'): uint32_t ns3::ByteTagIterator::Item::GetEnd() const [member function]
cls.add_method('GetEnd',
'uint32_t',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::ByteTagIterator::Item::GetStart() const [member function]
cls.add_method('GetStart',
'uint32_t',
[],
is_const=True)
## packet.h (module 'network'): void ns3::ByteTagIterator::Item::GetTag(ns3::Tag & tag) const [member function]
cls.add_method('GetTag',
'void',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h (module 'network'): ns3::TypeId ns3::ByteTagIterator::Item::GetTypeId() const [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_const=True)
return
def register_Ns3ByteTagList_methods(root_module, cls):
## byte-tag-list.h (module 'network'): ns3::ByteTagList::ByteTagList() [constructor]
cls.add_constructor([])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::ByteTagList(ns3::ByteTagList const & o) [copy constructor]
cls.add_constructor([param('ns3::ByteTagList const &', 'o')])
## byte-tag-list.h (module 'network'): ns3::TagBuffer ns3::ByteTagList::Add(ns3::TypeId tid, uint32_t bufferSize, int32_t start, int32_t end) [member function]
cls.add_method('Add',
'ns3::TagBuffer',
[param('ns3::TypeId', 'tid'), param('uint32_t', 'bufferSize'), param('int32_t', 'start'), param('int32_t', 'end')])
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::Add(ns3::ByteTagList const & o) [member function]
cls.add_method('Add',
'void',
[param('ns3::ByteTagList const &', 'o')])
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::AddAtEnd(int32_t adjustment, int32_t appendOffset) [member function]
cls.add_method('AddAtEnd',
'void',
[param('int32_t', 'adjustment'), param('int32_t', 'appendOffset')])
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::AddAtStart(int32_t adjustment, int32_t prependOffset) [member function]
cls.add_method('AddAtStart',
'void',
[param('int32_t', 'adjustment'), param('int32_t', 'prependOffset')])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator ns3::ByteTagList::Begin(int32_t offsetStart, int32_t offsetEnd) const [member function]
cls.add_method('Begin',
'ns3::ByteTagList::Iterator',
[param('int32_t', 'offsetStart'), param('int32_t', 'offsetEnd')],
is_const=True)
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::RemoveAll() [member function]
cls.add_method('RemoveAll',
'void',
[])
return
def register_Ns3ByteTagListIterator_methods(root_module, cls):
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Iterator(ns3::ByteTagList::Iterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ByteTagList::Iterator const &', 'arg0')])
## byte-tag-list.h (module 'network'): uint32_t ns3::ByteTagList::Iterator::GetOffsetStart() const [member function]
cls.add_method('GetOffsetStart',
'uint32_t',
[],
is_const=True)
## byte-tag-list.h (module 'network'): bool ns3::ByteTagList::Iterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item ns3::ByteTagList::Iterator::Next() [member function]
cls.add_method('Next',
'ns3::ByteTagList::Iterator::Item',
[])
return
def register_Ns3ByteTagListIteratorItem_methods(root_module, cls):
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::Item(ns3::ByteTagList::Iterator::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ByteTagList::Iterator::Item const &', 'arg0')])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::Item(ns3::TagBuffer buf) [constructor]
cls.add_constructor([param('ns3::TagBuffer', 'buf')])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::buf [variable]
cls.add_instance_attribute('buf', 'ns3::TagBuffer', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::end [variable]
cls.add_instance_attribute('end', 'int32_t', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::size [variable]
cls.add_instance_attribute('size', 'uint32_t', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::start [variable]
cls.add_instance_attribute('start', 'int32_t', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::tid [variable]
cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False)
return
def register_Ns3CallbackBase_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::CallbackBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackBase const &', 'arg0')])
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::Ptr<ns3::CallbackImplBase> ns3::CallbackBase::GetImpl() const [member function]
cls.add_method('GetImpl',
'ns3::Ptr< ns3::CallbackImplBase >',
[],
is_const=True)
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::Ptr<ns3::CallbackImplBase> impl) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::CallbackImplBase >', 'impl')],
visibility='protected')
## callback.h (module 'core'): static std::string ns3::CallbackBase::Demangle(std::string const & mangled) [member function]
cls.add_method('Demangle',
'std::string',
[param('std::string const &', 'mangled')],
is_static=True, visibility='protected')
return
def register_Ns3EventId_methods(root_module, cls):
cls.add_binary_comparison_operator('!=')
cls.add_binary_comparison_operator('==')
## event-id.h (module 'core'): ns3::EventId::EventId(ns3::EventId const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EventId const &', 'arg0')])
## event-id.h (module 'core'): ns3::EventId::EventId() [constructor]
cls.add_constructor([])
## event-id.h (module 'core'): ns3::EventId::EventId(ns3::Ptr<ns3::EventImpl> const & impl, uint64_t ts, uint32_t context, uint32_t uid) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::EventImpl > const &', 'impl'), param('uint64_t', 'ts'), param('uint32_t', 'context'), param('uint32_t', 'uid')])
## event-id.h (module 'core'): void ns3::EventId::Cancel() [member function]
cls.add_method('Cancel',
'void',
[])
## event-id.h (module 'core'): uint32_t ns3::EventId::GetContext() const [member function]
cls.add_method('GetContext',
'uint32_t',
[],
is_const=True)
## event-id.h (module 'core'): uint64_t ns3::EventId::GetTs() const [member function]
cls.add_method('GetTs',
'uint64_t',
[],
is_const=True)
## event-id.h (module 'core'): uint32_t ns3::EventId::GetUid() const [member function]
cls.add_method('GetUid',
'uint32_t',
[],
is_const=True)
## event-id.h (module 'core'): bool ns3::EventId::IsExpired() const [member function]
cls.add_method('IsExpired',
'bool',
[],
is_const=True)
## event-id.h (module 'core'): bool ns3::EventId::IsRunning() const [member function]
cls.add_method('IsRunning',
'bool',
[],
is_const=True)
## event-id.h (module 'core'): ns3::EventImpl * ns3::EventId::PeekEventImpl() const [member function]
cls.add_method('PeekEventImpl',
'ns3::EventImpl *',
[],
is_const=True)
return
def register_Ns3LbtsMessage_methods(root_module, cls):
## distributed-simulator-impl.h (module 'mpi'): ns3::LbtsMessage::LbtsMessage(ns3::LbtsMessage const & arg0) [copy constructor]
cls.add_constructor([param('ns3::LbtsMessage const &', 'arg0')])
## distributed-simulator-impl.h (module 'mpi'): ns3::LbtsMessage::LbtsMessage() [constructor]
cls.add_constructor([])
## distributed-simulator-impl.h (module 'mpi'): ns3::LbtsMessage::LbtsMessage(uint32_t rxc, uint32_t txc, uint32_t id, ns3::Time const & t) [constructor]
cls.add_constructor([param('uint32_t', 'rxc'), param('uint32_t', 'txc'), param('uint32_t', 'id'), param('ns3::Time const &', 't')])
## distributed-simulator-impl.h (module 'mpi'): uint32_t ns3::LbtsMessage::GetMyId() [member function]
cls.add_method('GetMyId',
'uint32_t',
[])
## distributed-simulator-impl.h (module 'mpi'): uint32_t ns3::LbtsMessage::GetRxCount() [member function]
cls.add_method('GetRxCount',
'uint32_t',
[])
## distributed-simulator-impl.h (module 'mpi'): ns3::Time ns3::LbtsMessage::GetSmallestTime() [member function]
cls.add_method('GetSmallestTime',
'ns3::Time',
[])
## distributed-simulator-impl.h (module 'mpi'): uint32_t ns3::LbtsMessage::GetTxCount() [member function]
cls.add_method('GetTxCount',
'uint32_t',
[])
return
def register_Ns3MpiInterface_methods(root_module, cls):
## mpi-interface.h (module 'mpi'): ns3::MpiInterface::MpiInterface() [constructor]
cls.add_constructor([])
## mpi-interface.h (module 'mpi'): ns3::MpiInterface::MpiInterface(ns3::MpiInterface const & arg0) [copy constructor]
cls.add_constructor([param('ns3::MpiInterface const &', 'arg0')])
## mpi-interface.h (module 'mpi'): static void ns3::MpiInterface::Destroy() [member function]
cls.add_method('Destroy',
'void',
[],
is_static=True)
## mpi-interface.h (module 'mpi'): static void ns3::MpiInterface::Disable() [member function]
cls.add_method('Disable',
'void',
[],
is_static=True)
## mpi-interface.h (module 'mpi'): static void ns3::MpiInterface::Enable(int * pargc, char * * * pargv) [member function]
cls.add_method('Enable',
'void',
[param('int *', 'pargc'), param('char * * *', 'pargv')],
is_static=True)
## mpi-interface.h (module 'mpi'): static uint32_t ns3::MpiInterface::GetRxCount() [member function]
cls.add_method('GetRxCount',
'uint32_t',
[],
is_static=True)
## mpi-interface.h (module 'mpi'): static uint32_t ns3::MpiInterface::GetSize() [member function]
cls.add_method('GetSize',
'uint32_t',
[],
is_static=True)
## mpi-interface.h (module 'mpi'): static uint32_t ns3::MpiInterface::GetSystemId() [member function]
cls.add_method('GetSystemId',
'uint32_t',
[],
is_static=True)
## mpi-interface.h (module 'mpi'): static uint32_t ns3::MpiInterface::GetTxCount() [member function]
cls.add_method('GetTxCount',
'uint32_t',
[],
is_static=True)
## mpi-interface.h (module 'mpi'): static bool ns3::MpiInterface::IsEnabled() [member function]
cls.add_method('IsEnabled',
'bool',
[],
is_static=True)
## mpi-interface.h (module 'mpi'): static void ns3::MpiInterface::ReceiveMessages() [member function]
cls.add_method('ReceiveMessages',
'void',
[],
is_static=True)
## mpi-interface.h (module 'mpi'): static void ns3::MpiInterface::SendPacket(ns3::Ptr<ns3::Packet> p, ns3::Time const & rxTime, uint32_t node, uint32_t dev) [member function]
cls.add_method('SendPacket',
'void',
[param('ns3::Ptr< ns3::Packet >', 'p'), param('ns3::Time const &', 'rxTime'), param('uint32_t', 'node'), param('uint32_t', 'dev')],
is_static=True)
## mpi-interface.h (module 'mpi'): static void ns3::MpiInterface::TestSendComplete() [member function]
cls.add_method('TestSendComplete',
'void',
[],
is_static=True)
return
def register_Ns3ObjectBase_methods(root_module, cls):
## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase() [constructor]
cls.add_constructor([])
## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase(ns3::ObjectBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectBase const &', 'arg0')])
## object-base.h (module 'core'): void ns3::ObjectBase::GetAttribute(std::string name, ns3::AttributeValue & value) const [member function]
cls.add_method('GetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'value')],
is_const=True)
## object-base.h (module 'core'): bool ns3::ObjectBase::GetAttributeFailSafe(std::string name, ns3::AttributeValue & attribute) const [member function]
cls.add_method('GetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'attribute')],
is_const=True)
## object-base.h (module 'core'): ns3::TypeId ns3::ObjectBase::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## object-base.h (module 'core'): static ns3::TypeId ns3::ObjectBase::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## object-base.h (module 'core'): void ns3::ObjectBase::SetAttribute(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-base.h (module 'core'): bool ns3::ObjectBase::SetAttributeFailSafe(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceConnect',
'bool',
[param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceConnectWithoutContext',
'bool',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceDisconnect',
'bool',
[param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceDisconnectWithoutContext',
'bool',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): void ns3::ObjectBase::ConstructSelf(ns3::AttributeConstructionList const & attributes) [member function]
cls.add_method('ConstructSelf',
'void',
[param('ns3::AttributeConstructionList const &', 'attributes')],
visibility='protected')
## object-base.h (module 'core'): void ns3::ObjectBase::NotifyConstructionCompleted() [member function]
cls.add_method('NotifyConstructionCompleted',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3ObjectDeleter_methods(root_module, cls):
## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter() [constructor]
cls.add_constructor([])
## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter(ns3::ObjectDeleter const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectDeleter const &', 'arg0')])
## object.h (module 'core'): static void ns3::ObjectDeleter::Delete(ns3::Object * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::Object *', 'object')],
is_static=True)
return
def register_Ns3ObjectFactory_methods(root_module, cls):
cls.add_output_stream_operator()
## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory(ns3::ObjectFactory const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectFactory const &', 'arg0')])
## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory() [constructor]
cls.add_constructor([])
## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory(std::string typeId) [constructor]
cls.add_constructor([param('std::string', 'typeId')])
## object-factory.h (module 'core'): ns3::Ptr<ns3::Object> ns3::ObjectFactory::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::Object >',
[],
is_const=True)
## object-factory.h (module 'core'): ns3::TypeId ns3::ObjectFactory::GetTypeId() const [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_const=True)
## object-factory.h (module 'core'): void ns3::ObjectFactory::Set(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('Set',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(ns3::TypeId tid) [member function]
cls.add_method('SetTypeId',
'void',
[param('ns3::TypeId', 'tid')])
## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(char const * tid) [member function]
cls.add_method('SetTypeId',
'void',
[param('char const *', 'tid')])
## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(std::string tid) [member function]
cls.add_method('SetTypeId',
'void',
[param('std::string', 'tid')])
return
def register_Ns3PacketMetadata_methods(root_module, cls):
## packet-metadata.h (module 'network'): ns3::PacketMetadata::PacketMetadata(uint64_t uid, uint32_t size) [constructor]
cls.add_constructor([param('uint64_t', 'uid'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::PacketMetadata(ns3::PacketMetadata const & o) [copy constructor]
cls.add_constructor([param('ns3::PacketMetadata const &', 'o')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddAtEnd(ns3::PacketMetadata const & o) [member function]
cls.add_method('AddAtEnd',
'void',
[param('ns3::PacketMetadata const &', 'o')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddHeader(ns3::Header const & header, uint32_t size) [member function]
cls.add_method('AddHeader',
'void',
[param('ns3::Header const &', 'header'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddPaddingAtEnd(uint32_t end) [member function]
cls.add_method('AddPaddingAtEnd',
'void',
[param('uint32_t', 'end')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddTrailer(ns3::Trailer const & trailer, uint32_t size) [member function]
cls.add_method('AddTrailer',
'void',
[param('ns3::Trailer const &', 'trailer'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator ns3::PacketMetadata::BeginItem(ns3::Buffer buffer) const [member function]
cls.add_method('BeginItem',
'ns3::PacketMetadata::ItemIterator',
[param('ns3::Buffer', 'buffer')],
is_const=True)
## packet-metadata.h (module 'network'): ns3::PacketMetadata ns3::PacketMetadata::CreateFragment(uint32_t start, uint32_t end) const [member function]
cls.add_method('CreateFragment',
'ns3::PacketMetadata',
[param('uint32_t', 'start'), param('uint32_t', 'end')],
is_const=True)
## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::Deserialize(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): static void ns3::PacketMetadata::Enable() [member function]
cls.add_method('Enable',
'void',
[],
is_static=True)
## packet-metadata.h (module 'network'): static void ns3::PacketMetadata::EnableChecking() [member function]
cls.add_method('EnableChecking',
'void',
[],
is_static=True)
## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## packet-metadata.h (module 'network'): uint64_t ns3::PacketMetadata::GetUid() const [member function]
cls.add_method('GetUid',
'uint64_t',
[],
is_const=True)
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveAtEnd(uint32_t end) [member function]
cls.add_method('RemoveAtEnd',
'void',
[param('uint32_t', 'end')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveAtStart(uint32_t start) [member function]
cls.add_method('RemoveAtStart',
'void',
[param('uint32_t', 'start')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveHeader(ns3::Header const & header, uint32_t size) [member function]
cls.add_method('RemoveHeader',
'void',
[param('ns3::Header const &', 'header'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveTrailer(ns3::Trailer const & trailer, uint32_t size) [member function]
cls.add_method('RemoveTrailer',
'void',
[param('ns3::Trailer const &', 'trailer'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
return
def register_Ns3PacketMetadataItem_methods(root_module, cls):
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::Item() [constructor]
cls.add_constructor([])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::Item(ns3::PacketMetadata::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketMetadata::Item const &', 'arg0')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::current [variable]
cls.add_instance_attribute('current', 'ns3::Buffer::Iterator', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentSize [variable]
cls.add_instance_attribute('currentSize', 'uint32_t', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentTrimedFromEnd [variable]
cls.add_instance_attribute('currentTrimedFromEnd', 'uint32_t', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentTrimedFromStart [variable]
cls.add_instance_attribute('currentTrimedFromStart', 'uint32_t', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::isFragment [variable]
cls.add_instance_attribute('isFragment', 'bool', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::tid [variable]
cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False)
return
def register_Ns3PacketMetadataItemIterator_methods(root_module, cls):
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator::ItemIterator(ns3::PacketMetadata::ItemIterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketMetadata::ItemIterator const &', 'arg0')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator::ItemIterator(ns3::PacketMetadata const * metadata, ns3::Buffer buffer) [constructor]
cls.add_constructor([param('ns3::PacketMetadata const *', 'metadata'), param('ns3::Buffer', 'buffer')])
## packet-metadata.h (module 'network'): bool ns3::PacketMetadata::ItemIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item ns3::PacketMetadata::ItemIterator::Next() [member function]
cls.add_method('Next',
'ns3::PacketMetadata::Item',
[])
return
def register_Ns3PacketTagIterator_methods(root_module, cls):
## packet.h (module 'network'): ns3::PacketTagIterator::PacketTagIterator(ns3::PacketTagIterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketTagIterator const &', 'arg0')])
## packet.h (module 'network'): bool ns3::PacketTagIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## packet.h (module 'network'): ns3::PacketTagIterator::Item ns3::PacketTagIterator::Next() [member function]
cls.add_method('Next',
'ns3::PacketTagIterator::Item',
[])
return
def register_Ns3PacketTagIteratorItem_methods(root_module, cls):
## packet.h (module 'network'): ns3::PacketTagIterator::Item::Item(ns3::PacketTagIterator::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketTagIterator::Item const &', 'arg0')])
## packet.h (module 'network'): void ns3::PacketTagIterator::Item::GetTag(ns3::Tag & tag) const [member function]
cls.add_method('GetTag',
'void',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h (module 'network'): ns3::TypeId ns3::PacketTagIterator::Item::GetTypeId() const [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_const=True)
return
def register_Ns3PacketTagList_methods(root_module, cls):
## packet-tag-list.h (module 'network'): ns3::PacketTagList::PacketTagList() [constructor]
cls.add_constructor([])
## packet-tag-list.h (module 'network'): ns3::PacketTagList::PacketTagList(ns3::PacketTagList const & o) [copy constructor]
cls.add_constructor([param('ns3::PacketTagList const &', 'o')])
## packet-tag-list.h (module 'network'): void ns3::PacketTagList::Add(ns3::Tag const & tag) const [member function]
cls.add_method('Add',
'void',
[param('ns3::Tag const &', 'tag')],
is_const=True)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData const * ns3::PacketTagList::Head() const [member function]
cls.add_method('Head',
'ns3::PacketTagList::TagData const *',
[],
is_const=True)
## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Peek(ns3::Tag & tag) const [member function]
cls.add_method('Peek',
'bool',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Remove(ns3::Tag & tag) [member function]
cls.add_method('Remove',
'bool',
[param('ns3::Tag &', 'tag')])
## packet-tag-list.h (module 'network'): void ns3::PacketTagList::RemoveAll() [member function]
cls.add_method('RemoveAll',
'void',
[])
return
def register_Ns3PacketTagListTagData_methods(root_module, cls):
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::TagData() [constructor]
cls.add_constructor([])
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::TagData(ns3::PacketTagList::TagData const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketTagList::TagData const &', 'arg0')])
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::count [variable]
cls.add_instance_attribute('count', 'uint32_t', is_const=False)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::data [variable]
cls.add_instance_attribute('data', 'uint8_t [ 20 ]', is_const=False)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::next [variable]
cls.add_instance_attribute('next', 'ns3::PacketTagList::TagData *', is_const=False)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::tid [variable]
cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False)
return
def register_Ns3SentBuffer_methods(root_module, cls):
## mpi-interface.h (module 'mpi'): ns3::SentBuffer::SentBuffer(ns3::SentBuffer const & arg0) [copy constructor]
cls.add_constructor([param('ns3::SentBuffer const &', 'arg0')])
## mpi-interface.h (module 'mpi'): ns3::SentBuffer::SentBuffer() [constructor]
cls.add_constructor([])
## mpi-interface.h (module 'mpi'): uint8_t * ns3::SentBuffer::GetBuffer() [member function]
cls.add_method('GetBuffer',
'uint8_t *',
[])
## mpi-interface.h (module 'mpi'): MPI_Request * ns3::SentBuffer::GetRequest() [member function]
cls.add_method('GetRequest',
'MPI_Request *',
[])
## mpi-interface.h (module 'mpi'): void ns3::SentBuffer::SetBuffer(uint8_t * buffer) [member function]
cls.add_method('SetBuffer',
'void',
[param('uint8_t *', 'buffer')])
return
def register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount(ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3Tag_methods(root_module, cls):
## tag.h (module 'network'): ns3::Tag::Tag() [constructor]
cls.add_constructor([])
## tag.h (module 'network'): ns3::Tag::Tag(ns3::Tag const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Tag const &', 'arg0')])
## tag.h (module 'network'): void ns3::Tag::Deserialize(ns3::TagBuffer i) [member function]
cls.add_method('Deserialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_pure_virtual=True, is_virtual=True)
## tag.h (module 'network'): uint32_t ns3::Tag::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## tag.h (module 'network'): static ns3::TypeId ns3::Tag::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## tag.h (module 'network'): void ns3::Tag::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## tag.h (module 'network'): void ns3::Tag::Serialize(ns3::TagBuffer i) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3TagBuffer_methods(root_module, cls):
## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(ns3::TagBuffer const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TagBuffer const &', 'arg0')])
## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(uint8_t * start, uint8_t * end) [constructor]
cls.add_constructor([param('uint8_t *', 'start'), param('uint8_t *', 'end')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::CopyFrom(ns3::TagBuffer o) [member function]
cls.add_method('CopyFrom',
'void',
[param('ns3::TagBuffer', 'o')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::Read(uint8_t * buffer, uint32_t size) [member function]
cls.add_method('Read',
'void',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')])
## tag-buffer.h (module 'network'): double ns3::TagBuffer::ReadDouble() [member function]
cls.add_method('ReadDouble',
'double',
[])
## tag-buffer.h (module 'network'): uint16_t ns3::TagBuffer::ReadU16() [member function]
cls.add_method('ReadU16',
'uint16_t',
[])
## tag-buffer.h (module 'network'): uint32_t ns3::TagBuffer::ReadU32() [member function]
cls.add_method('ReadU32',
'uint32_t',
[])
## tag-buffer.h (module 'network'): uint64_t ns3::TagBuffer::ReadU64() [member function]
cls.add_method('ReadU64',
'uint64_t',
[])
## tag-buffer.h (module 'network'): uint8_t ns3::TagBuffer::ReadU8() [member function]
cls.add_method('ReadU8',
'uint8_t',
[])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::TrimAtEnd(uint32_t trim) [member function]
cls.add_method('TrimAtEnd',
'void',
[param('uint32_t', 'trim')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::Write(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Write',
'void',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteDouble(double v) [member function]
cls.add_method('WriteDouble',
'void',
[param('double', 'v')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU16(uint16_t data) [member function]
cls.add_method('WriteU16',
'void',
[param('uint16_t', 'data')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU32(uint32_t data) [member function]
cls.add_method('WriteU32',
'void',
[param('uint32_t', 'data')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU64(uint64_t v) [member function]
cls.add_method('WriteU64',
'void',
[param('uint64_t', 'v')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU8(uint8_t v) [member function]
cls.add_method('WriteU8',
'void',
[param('uint8_t', 'v')])
return
def register_Ns3TypeId_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## type-id.h (module 'core'): ns3::TypeId::TypeId(char const * name) [constructor]
cls.add_constructor([param('char const *', 'name')])
## type-id.h (module 'core'): ns3::TypeId::TypeId() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::TypeId(ns3::TypeId const & o) [copy constructor]
cls.add_constructor([param('ns3::TypeId const &', 'o')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('AddAttribute',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, uint32_t flags, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('AddAttribute',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('uint32_t', 'flags'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<ns3::TraceSourceAccessor const> accessor) [member function]
cls.add_method('AddTraceSource',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor')])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation ns3::TypeId::GetAttribute(uint32_t i) const [member function]
cls.add_method('GetAttribute',
'ns3::TypeId::AttributeInformation',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetAttributeFullName(uint32_t i) const [member function]
cls.add_method('GetAttributeFullName',
'std::string',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetAttributeN() const [member function]
cls.add_method('GetAttributeN',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): ns3::Callback<ns3::ObjectBase*,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> ns3::TypeId::GetConstructor() const [member function]
cls.add_method('GetConstructor',
'ns3::Callback< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetGroupName() const [member function]
cls.add_method('GetGroupName',
'std::string',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetName() const [member function]
cls.add_method('GetName',
'std::string',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::GetParent() const [member function]
cls.add_method('GetParent',
'ns3::TypeId',
[],
is_const=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::GetRegistered(uint32_t i) [member function]
cls.add_method('GetRegistered',
'ns3::TypeId',
[param('uint32_t', 'i')],
is_static=True)
## type-id.h (module 'core'): static uint32_t ns3::TypeId::GetRegisteredN() [member function]
cls.add_method('GetRegisteredN',
'uint32_t',
[],
is_static=True)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation ns3::TypeId::GetTraceSource(uint32_t i) const [member function]
cls.add_method('GetTraceSource',
'ns3::TypeId::TraceSourceInformation',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetTraceSourceN() const [member function]
cls.add_method('GetTraceSourceN',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): uint16_t ns3::TypeId::GetUid() const [member function]
cls.add_method('GetUid',
'uint16_t',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::HasConstructor() const [member function]
cls.add_method('HasConstructor',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::HasParent() const [member function]
cls.add_method('HasParent',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::HideFromDocumentation() [member function]
cls.add_method('HideFromDocumentation',
'ns3::TypeId',
[])
## type-id.h (module 'core'): bool ns3::TypeId::IsChildOf(ns3::TypeId other) const [member function]
cls.add_method('IsChildOf',
'bool',
[param('ns3::TypeId', 'other')],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::LookupAttributeByName(std::string name, ns3::TypeId::AttributeInformation * info) const [member function]
cls.add_method('LookupAttributeByName',
'bool',
[param('std::string', 'name'), param('ns3::TypeId::AttributeInformation *', 'info')],
is_const=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByName(std::string name) [member function]
cls.add_method('LookupByName',
'ns3::TypeId',
[param('std::string', 'name')],
is_static=True)
## type-id.h (module 'core'): ns3::Ptr<ns3::TraceSourceAccessor const> ns3::TypeId::LookupTraceSourceByName(std::string name) const [member function]
cls.add_method('LookupTraceSourceByName',
'ns3::Ptr< ns3::TraceSourceAccessor const >',
[param('std::string', 'name')],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::MustHideFromDocumentation() const [member function]
cls.add_method('MustHideFromDocumentation',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::SetAttributeInitialValue(uint32_t i, ns3::Ptr<ns3::AttributeValue const> initialValue) [member function]
cls.add_method('SetAttributeInitialValue',
'bool',
[param('uint32_t', 'i'), param('ns3::Ptr< ns3::AttributeValue const >', 'initialValue')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetGroupName(std::string groupName) [member function]
cls.add_method('SetGroupName',
'ns3::TypeId',
[param('std::string', 'groupName')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetParent(ns3::TypeId tid) [member function]
cls.add_method('SetParent',
'ns3::TypeId',
[param('ns3::TypeId', 'tid')])
## type-id.h (module 'core'): void ns3::TypeId::SetUid(uint16_t tid) [member function]
cls.add_method('SetUid',
'void',
[param('uint16_t', 'tid')])
return
def register_Ns3TypeIdAttributeInformation_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation(ns3::TypeId::AttributeInformation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeId::AttributeInformation const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::accessor [variable]
cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::AttributeAccessor const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::checker [variable]
cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::flags [variable]
cls.add_instance_attribute('flags', 'uint32_t', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::help [variable]
cls.add_instance_attribute('help', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::initialValue [variable]
cls.add_instance_attribute('initialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::originalInitialValue [variable]
cls.add_instance_attribute('originalInitialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)
return
def register_Ns3TypeIdTraceSourceInformation_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation(ns3::TypeId::TraceSourceInformation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeId::TraceSourceInformation const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::accessor [variable]
cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::TraceSourceAccessor const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::help [variable]
cls.add_instance_attribute('help', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
return
def register_Ns3Empty_methods(root_module, cls):
## empty.h (module 'core'): ns3::empty::empty() [constructor]
cls.add_constructor([])
## empty.h (module 'core'): ns3::empty::empty(ns3::empty const & arg0) [copy constructor]
cls.add_constructor([param('ns3::empty const &', 'arg0')])
return
def register_Ns3Int64x64_t_methods(root_module, cls):
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long unsigned int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long unsigned int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short unsigned int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned char const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('signed char const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('double const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long unsigned int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long unsigned int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short unsigned int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned char const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('signed char const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('double const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long unsigned int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long unsigned int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short unsigned int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned char const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('signed char const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('double const', 'right'))
cls.add_unary_numeric_operator('-')
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long unsigned int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long unsigned int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short unsigned int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned char const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('signed char const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('double const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right'))
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('>')
cls.add_binary_comparison_operator('!=')
cls.add_inplace_numeric_operator('*=', param('ns3::int64x64_t const &', 'right'))
cls.add_inplace_numeric_operator('+=', param('ns3::int64x64_t const &', 'right'))
cls.add_inplace_numeric_operator('-=', param('ns3::int64x64_t const &', 'right'))
cls.add_inplace_numeric_operator('/=', param('ns3::int64x64_t const &', 'right'))
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('<=')
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('>=')
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t() [constructor]
cls.add_constructor([])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(double v) [constructor]
cls.add_constructor([param('double', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(int v) [constructor]
cls.add_constructor([param('int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long int v) [constructor]
cls.add_constructor([param('long int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long long int v) [constructor]
cls.add_constructor([param('long long int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(unsigned int v) [constructor]
cls.add_constructor([param('unsigned int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long unsigned int v) [constructor]
cls.add_constructor([param('long unsigned int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long long unsigned int v) [constructor]
cls.add_constructor([param('long long unsigned int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(int64_t hi, uint64_t lo) [constructor]
cls.add_constructor([param('int64_t', 'hi'), param('uint64_t', 'lo')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(ns3::int64x64_t const & o) [copy constructor]
cls.add_constructor([param('ns3::int64x64_t const &', 'o')])
## int64x64-double.h (module 'core'): double ns3::int64x64_t::GetDouble() const [member function]
cls.add_method('GetDouble',
'double',
[],
is_const=True)
## int64x64-double.h (module 'core'): int64_t ns3::int64x64_t::GetHigh() const [member function]
cls.add_method('GetHigh',
'int64_t',
[],
is_const=True)
## int64x64-double.h (module 'core'): uint64_t ns3::int64x64_t::GetLow() const [member function]
cls.add_method('GetLow',
'uint64_t',
[],
is_const=True)
## int64x64-double.h (module 'core'): static ns3::int64x64_t ns3::int64x64_t::Invert(uint64_t v) [member function]
cls.add_method('Invert',
'ns3::int64x64_t',
[param('uint64_t', 'v')],
is_static=True)
## int64x64-double.h (module 'core'): void ns3::int64x64_t::MulByInvert(ns3::int64x64_t const & o) [member function]
cls.add_method('MulByInvert',
'void',
[param('ns3::int64x64_t const &', 'o')])
return
def register_Ns3Chunk_methods(root_module, cls):
## chunk.h (module 'network'): ns3::Chunk::Chunk() [constructor]
cls.add_constructor([])
## chunk.h (module 'network'): ns3::Chunk::Chunk(ns3::Chunk const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Chunk const &', 'arg0')])
## chunk.h (module 'network'): uint32_t ns3::Chunk::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, is_virtual=True)
## chunk.h (module 'network'): static ns3::TypeId ns3::Chunk::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## chunk.h (module 'network'): void ns3::Chunk::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3Header_methods(root_module, cls):
cls.add_output_stream_operator()
## header.h (module 'network'): ns3::Header::Header() [constructor]
cls.add_constructor([])
## header.h (module 'network'): ns3::Header::Header(ns3::Header const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Header const &', 'arg0')])
## header.h (module 'network'): uint32_t ns3::Header::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, is_virtual=True)
## header.h (module 'network'): uint32_t ns3::Header::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## header.h (module 'network'): static ns3::TypeId ns3::Header::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## header.h (module 'network'): void ns3::Header::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## header.h (module 'network'): void ns3::Header::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3Object_methods(root_module, cls):
## object.h (module 'core'): ns3::Object::Object() [constructor]
cls.add_constructor([])
## object.h (module 'core'): void ns3::Object::AggregateObject(ns3::Ptr<ns3::Object> other) [member function]
cls.add_method('AggregateObject',
'void',
[param('ns3::Ptr< ns3::Object >', 'other')])
## object.h (module 'core'): void ns3::Object::Dispose() [member function]
cls.add_method('Dispose',
'void',
[])
## object.h (module 'core'): ns3::Object::AggregateIterator ns3::Object::GetAggregateIterator() const [member function]
cls.add_method('GetAggregateIterator',
'ns3::Object::AggregateIterator',
[],
is_const=True)
## object.h (module 'core'): ns3::TypeId ns3::Object::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## object.h (module 'core'): static ns3::TypeId ns3::Object::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## object.h (module 'core'): void ns3::Object::Start() [member function]
cls.add_method('Start',
'void',
[])
## object.h (module 'core'): ns3::Object::Object(ns3::Object const & o) [copy constructor]
cls.add_constructor([param('ns3::Object const &', 'o')],
visibility='protected')
## object.h (module 'core'): void ns3::Object::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
## object.h (module 'core'): void ns3::Object::DoStart() [member function]
cls.add_method('DoStart',
'void',
[],
visibility='protected', is_virtual=True)
## object.h (module 'core'): void ns3::Object::NotifyNewAggregate() [member function]
cls.add_method('NotifyNewAggregate',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3ObjectAggregateIterator_methods(root_module, cls):
## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator(ns3::Object::AggregateIterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Object::AggregateIterator const &', 'arg0')])
## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator() [constructor]
cls.add_constructor([])
## object.h (module 'core'): bool ns3::Object::AggregateIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## object.h (module 'core'): ns3::Ptr<ns3::Object const> ns3::Object::AggregateIterator::Next() [member function]
cls.add_method('Next',
'ns3::Ptr< ns3::Object const >',
[])
return
def register_Ns3Scheduler_methods(root_module, cls):
## scheduler.h (module 'core'): ns3::Scheduler::Scheduler() [constructor]
cls.add_constructor([])
## scheduler.h (module 'core'): ns3::Scheduler::Scheduler(ns3::Scheduler const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Scheduler const &', 'arg0')])
## scheduler.h (module 'core'): static ns3::TypeId ns3::Scheduler::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## scheduler.h (module 'core'): void ns3::Scheduler::Insert(ns3::Scheduler::Event const & ev) [member function]
cls.add_method('Insert',
'void',
[param('ns3::Scheduler::Event const &', 'ev')],
is_pure_virtual=True, is_virtual=True)
## scheduler.h (module 'core'): bool ns3::Scheduler::IsEmpty() const [member function]
cls.add_method('IsEmpty',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## scheduler.h (module 'core'): ns3::Scheduler::Event ns3::Scheduler::PeekNext() const [member function]
cls.add_method('PeekNext',
'ns3::Scheduler::Event',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## scheduler.h (module 'core'): void ns3::Scheduler::Remove(ns3::Scheduler::Event const & ev) [member function]
cls.add_method('Remove',
'void',
[param('ns3::Scheduler::Event const &', 'ev')],
is_pure_virtual=True, is_virtual=True)
## scheduler.h (module 'core'): ns3::Scheduler::Event ns3::Scheduler::RemoveNext() [member function]
cls.add_method('RemoveNext',
'ns3::Scheduler::Event',
[],
is_pure_virtual=True, is_virtual=True)
return
def register_Ns3SchedulerEvent_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
## scheduler.h (module 'core'): ns3::Scheduler::Event::Event() [constructor]
cls.add_constructor([])
## scheduler.h (module 'core'): ns3::Scheduler::Event::Event(ns3::Scheduler::Event const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Scheduler::Event const &', 'arg0')])
## scheduler.h (module 'core'): ns3::Scheduler::Event::impl [variable]
cls.add_instance_attribute('impl', 'ns3::EventImpl *', is_const=False)
## scheduler.h (module 'core'): ns3::Scheduler::Event::key [variable]
cls.add_instance_attribute('key', 'ns3::Scheduler::EventKey', is_const=False)
return
def register_Ns3SchedulerEventKey_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('>')
cls.add_binary_comparison_operator('!=')
## scheduler.h (module 'core'): ns3::Scheduler::EventKey::EventKey() [constructor]
cls.add_constructor([])
## scheduler.h (module 'core'): ns3::Scheduler::EventKey::EventKey(ns3::Scheduler::EventKey const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Scheduler::EventKey const &', 'arg0')])
## scheduler.h (module 'core'): ns3::Scheduler::EventKey::m_context [variable]
cls.add_instance_attribute('m_context', 'uint32_t', is_const=False)
## scheduler.h (module 'core'): ns3::Scheduler::EventKey::m_ts [variable]
cls.add_instance_attribute('m_ts', 'uint64_t', is_const=False)
## scheduler.h (module 'core'): ns3::Scheduler::EventKey::m_uid [variable]
cls.add_instance_attribute('m_uid', 'uint32_t', is_const=False)
return
def register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter< ns3::AttributeAccessor > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter< ns3::AttributeChecker > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter< ns3::AttributeValue > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount(ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter< ns3::CallbackImplBase > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3EventImpl_Ns3Empty_Ns3DefaultDeleter__lt__ns3EventImpl__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::SimpleRefCount(ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter< ns3::EventImpl > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3NixVector_Ns3Empty_Ns3DefaultDeleter__lt__ns3NixVector__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::SimpleRefCount(ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter< ns3::NixVector > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3Packet_Ns3Empty_Ns3DefaultDeleter__lt__ns3Packet__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter< ns3::Packet > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter< ns3::TraceSourceAccessor > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimulatorImpl_methods(root_module, cls):
## simulator-impl.h (module 'core'): ns3::SimulatorImpl::SimulatorImpl() [constructor]
cls.add_constructor([])
## simulator-impl.h (module 'core'): ns3::SimulatorImpl::SimulatorImpl(ns3::SimulatorImpl const & arg0) [copy constructor]
cls.add_constructor([param('ns3::SimulatorImpl const &', 'arg0')])
## simulator-impl.h (module 'core'): void ns3::SimulatorImpl::Cancel(ns3::EventId const & ev) [member function]
cls.add_method('Cancel',
'void',
[param('ns3::EventId const &', 'ev')],
is_pure_virtual=True, is_virtual=True)
## simulator-impl.h (module 'core'): void ns3::SimulatorImpl::Destroy() [member function]
cls.add_method('Destroy',
'void',
[],
is_pure_virtual=True, is_virtual=True)
## simulator-impl.h (module 'core'): uint32_t ns3::SimulatorImpl::GetContext() const [member function]
cls.add_method('GetContext',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## simulator-impl.h (module 'core'): ns3::Time ns3::SimulatorImpl::GetDelayLeft(ns3::EventId const & id) const [member function]
cls.add_method('GetDelayLeft',
'ns3::Time',
[param('ns3::EventId const &', 'id')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## simulator-impl.h (module 'core'): ns3::Time ns3::SimulatorImpl::GetMaximumSimulationTime() const [member function]
cls.add_method('GetMaximumSimulationTime',
'ns3::Time',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## simulator-impl.h (module 'core'): uint32_t ns3::SimulatorImpl::GetSystemId() const [member function]
cls.add_method('GetSystemId',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## simulator-impl.h (module 'core'): static ns3::TypeId ns3::SimulatorImpl::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## simulator-impl.h (module 'core'): bool ns3::SimulatorImpl::IsExpired(ns3::EventId const & ev) const [member function]
cls.add_method('IsExpired',
'bool',
[param('ns3::EventId const &', 'ev')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## simulator-impl.h (module 'core'): bool ns3::SimulatorImpl::IsFinished() const [member function]
cls.add_method('IsFinished',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## simulator-impl.h (module 'core'): ns3::Time ns3::SimulatorImpl::Next() const [member function]
cls.add_method('Next',
'ns3::Time',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## simulator-impl.h (module 'core'): ns3::Time ns3::SimulatorImpl::Now() const [member function]
cls.add_method('Now',
'ns3::Time',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## simulator-impl.h (module 'core'): void ns3::SimulatorImpl::Remove(ns3::EventId const & ev) [member function]
cls.add_method('Remove',
'void',
[param('ns3::EventId const &', 'ev')],
is_pure_virtual=True, is_virtual=True)
## simulator-impl.h (module 'core'): void ns3::SimulatorImpl::Run() [member function]
cls.add_method('Run',
'void',
[],
is_pure_virtual=True, is_virtual=True)
## simulator-impl.h (module 'core'): void ns3::SimulatorImpl::RunOneEvent() [member function]
cls.add_method('RunOneEvent',
'void',
[],
is_pure_virtual=True, is_virtual=True)
## simulator-impl.h (module 'core'): ns3::EventId ns3::SimulatorImpl::Schedule(ns3::Time const & time, ns3::EventImpl * event) [member function]
cls.add_method('Schedule',
'ns3::EventId',
[param('ns3::Time const &', 'time'), param('ns3::EventImpl *', 'event')],
is_pure_virtual=True, is_virtual=True)
## simulator-impl.h (module 'core'): ns3::EventId ns3::SimulatorImpl::ScheduleDestroy(ns3::EventImpl * event) [member function]
cls.add_method('ScheduleDestroy',
'ns3::EventId',
[param('ns3::EventImpl *', 'event')],
is_pure_virtual=True, is_virtual=True)
## simulator-impl.h (module 'core'): ns3::EventId ns3::SimulatorImpl::ScheduleNow(ns3::EventImpl * event) [member function]
cls.add_method('ScheduleNow',
'ns3::EventId',
[param('ns3::EventImpl *', 'event')],
is_pure_virtual=True, is_virtual=True)
## simulator-impl.h (module 'core'): void ns3::SimulatorImpl::ScheduleWithContext(uint32_t context, ns3::Time const & time, ns3::EventImpl * event) [member function]
cls.add_method('ScheduleWithContext',
'void',
[param('uint32_t', 'context'), param('ns3::Time const &', 'time'), param('ns3::EventImpl *', 'event')],
is_pure_virtual=True, is_virtual=True)
## simulator-impl.h (module 'core'): void ns3::SimulatorImpl::SetScheduler(ns3::ObjectFactory schedulerFactory) [member function]
cls.add_method('SetScheduler',
'void',
[param('ns3::ObjectFactory', 'schedulerFactory')],
is_pure_virtual=True, is_virtual=True)
## simulator-impl.h (module 'core'): void ns3::SimulatorImpl::Stop() [member function]
cls.add_method('Stop',
'void',
[],
is_pure_virtual=True, is_virtual=True)
## simulator-impl.h (module 'core'): void ns3::SimulatorImpl::Stop(ns3::Time const & time) [member function]
cls.add_method('Stop',
'void',
[param('ns3::Time const &', 'time')],
is_pure_virtual=True, is_virtual=True)
return
def register_Ns3Time_methods(root_module, cls):
cls.add_binary_numeric_operator('+', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', 'right'))
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('>')
cls.add_binary_comparison_operator('!=')
cls.add_inplace_numeric_operator('+=', param('ns3::Time const &', 'right'))
cls.add_inplace_numeric_operator('-=', param('ns3::Time const &', 'right'))
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('<=')
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('>=')
## nstime.h (module 'core'): ns3::Time::Time() [constructor]
cls.add_constructor([])
## nstime.h (module 'core'): ns3::Time::Time(ns3::Time const & o) [copy constructor]
cls.add_constructor([param('ns3::Time const &', 'o')])
## nstime.h (module 'core'): ns3::Time::Time(double v) [constructor]
cls.add_constructor([param('double', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(int v) [constructor]
cls.add_constructor([param('int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long int v) [constructor]
cls.add_constructor([param('long int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long long int v) [constructor]
cls.add_constructor([param('long long int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(unsigned int v) [constructor]
cls.add_constructor([param('unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long unsigned int v) [constructor]
cls.add_constructor([param('long unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long long unsigned int v) [constructor]
cls.add_constructor([param('long long unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(std::string const & s) [constructor]
cls.add_constructor([param('std::string const &', 's')])
## nstime.h (module 'core'): ns3::Time::Time(ns3::int64x64_t const & value) [constructor]
cls.add_constructor([param('ns3::int64x64_t const &', 'value')])
## nstime.h (module 'core'): int ns3::Time::Compare(ns3::Time const & o) const [member function]
cls.add_method('Compare',
'int',
[param('ns3::Time const &', 'o')],
is_const=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & from, ns3::Time::Unit timeUnit) [member function]
cls.add_method('From',
'ns3::Time',
[param('ns3::int64x64_t const &', 'from'), param('ns3::Time::Unit', 'timeUnit')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & value) [member function]
cls.add_method('From',
'ns3::Time',
[param('ns3::int64x64_t const &', 'value')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::FromDouble(double value, ns3::Time::Unit timeUnit) [member function]
cls.add_method('FromDouble',
'ns3::Time',
[param('double', 'value'), param('ns3::Time::Unit', 'timeUnit')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::FromInteger(uint64_t value, ns3::Time::Unit timeUnit) [member function]
cls.add_method('FromInteger',
'ns3::Time',
[param('uint64_t', 'value'), param('ns3::Time::Unit', 'timeUnit')],
is_static=True)
## nstime.h (module 'core'): double ns3::Time::GetDouble() const [member function]
cls.add_method('GetDouble',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetFemtoSeconds() const [member function]
cls.add_method('GetFemtoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetInteger() const [member function]
cls.add_method('GetInteger',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetMicroSeconds() const [member function]
cls.add_method('GetMicroSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetMilliSeconds() const [member function]
cls.add_method('GetMilliSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetNanoSeconds() const [member function]
cls.add_method('GetNanoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetPicoSeconds() const [member function]
cls.add_method('GetPicoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): static ns3::Time::Unit ns3::Time::GetResolution() [member function]
cls.add_method('GetResolution',
'ns3::Time::Unit',
[],
is_static=True)
## nstime.h (module 'core'): double ns3::Time::GetSeconds() const [member function]
cls.add_method('GetSeconds',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetTimeStep() const [member function]
cls.add_method('GetTimeStep',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsNegative() const [member function]
cls.add_method('IsNegative',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsPositive() const [member function]
cls.add_method('IsPositive',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsStrictlyNegative() const [member function]
cls.add_method('IsStrictlyNegative',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsStrictlyPositive() const [member function]
cls.add_method('IsStrictlyPositive',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsZero() const [member function]
cls.add_method('IsZero',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): static void ns3::Time::SetResolution(ns3::Time::Unit resolution) [member function]
cls.add_method('SetResolution',
'void',
[param('ns3::Time::Unit', 'resolution')],
is_static=True)
## nstime.h (module 'core'): ns3::int64x64_t ns3::Time::To(ns3::Time::Unit timeUnit) const [member function]
cls.add_method('To',
'ns3::int64x64_t',
[param('ns3::Time::Unit', 'timeUnit')],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::ToDouble(ns3::Time::Unit timeUnit) const [member function]
cls.add_method('ToDouble',
'double',
[param('ns3::Time::Unit', 'timeUnit')],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::ToInteger(ns3::Time::Unit timeUnit) const [member function]
cls.add_method('ToInteger',
'int64_t',
[param('ns3::Time::Unit', 'timeUnit')],
is_const=True)
return
def register_Ns3TraceSourceAccessor_methods(root_module, cls):
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor(ns3::TraceSourceAccessor const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TraceSourceAccessor const &', 'arg0')])
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor() [constructor]
cls.add_constructor([])
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Connect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]
cls.add_method('Connect',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::ConnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]
cls.add_method('ConnectWithoutContext',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Disconnect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]
cls.add_method('Disconnect',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::DisconnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]
cls.add_method('DisconnectWithoutContext',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3Trailer_methods(root_module, cls):
cls.add_output_stream_operator()
## trailer.h (module 'network'): ns3::Trailer::Trailer() [constructor]
cls.add_constructor([])
## trailer.h (module 'network'): ns3::Trailer::Trailer(ns3::Trailer const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Trailer const &', 'arg0')])
## trailer.h (module 'network'): uint32_t ns3::Trailer::Deserialize(ns3::Buffer::Iterator end) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'end')],
is_pure_virtual=True, is_virtual=True)
## trailer.h (module 'network'): uint32_t ns3::Trailer::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trailer.h (module 'network'): static ns3::TypeId ns3::Trailer::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## trailer.h (module 'network'): void ns3::Trailer::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trailer.h (module 'network'): void ns3::Trailer::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeAccessor_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor(ns3::AttributeAccessor const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeAccessor const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::AttributeAccessor::Get(ns3::ObjectBase const * object, ns3::AttributeValue & attribute) const [member function]
cls.add_method('Get',
'bool',
[param('ns3::ObjectBase const *', 'object'), param('ns3::AttributeValue &', 'attribute')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasGetter() const [member function]
cls.add_method('HasGetter',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasSetter() const [member function]
cls.add_method('HasSetter',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::Set(ns3::ObjectBase * object, ns3::AttributeValue const & value) const [member function]
cls.add_method('Set',
'bool',
[param('ns3::ObjectBase *', 'object', transfer_ownership=False), param('ns3::AttributeValue const &', 'value')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeChecker_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker(ns3::AttributeChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeChecker const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::AttributeChecker::Check(ns3::AttributeValue const & value) const [member function]
cls.add_method('Check',
'bool',
[param('ns3::AttributeValue const &', 'value')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeChecker::Copy(ns3::AttributeValue const & source, ns3::AttributeValue & destination) const [member function]
cls.add_method('Copy',
'bool',
[param('ns3::AttributeValue const &', 'source'), param('ns3::AttributeValue &', 'destination')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::CreateValidValue(ns3::AttributeValue const & value) const [member function]
cls.add_method('CreateValidValue',
'ns3::Ptr< ns3::AttributeValue >',
[param('ns3::AttributeValue const &', 'value')],
is_const=True)
## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetUnderlyingTypeInformation() const [member function]
cls.add_method('GetUnderlyingTypeInformation',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetValueTypeName() const [member function]
cls.add_method('GetValueTypeName',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeChecker::HasUnderlyingTypeInformation() const [member function]
cls.add_method('HasUnderlyingTypeInformation',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeValue_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue(ns3::AttributeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeValue const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_pure_virtual=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::AttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3CallbackChecker_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker(ns3::CallbackChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackChecker const &', 'arg0')])
return
def register_Ns3CallbackImplBase_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase(ns3::CallbackImplBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackImplBase const &', 'arg0')])
## callback.h (module 'core'): bool ns3::CallbackImplBase::IsEqual(ns3::Ptr<ns3::CallbackImplBase const> other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ptr< ns3::CallbackImplBase const >', 'other')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3CallbackValue_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackValue const &', 'arg0')])
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackBase const & base) [constructor]
cls.add_constructor([param('ns3::CallbackBase const &', 'base')])
## callback.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::CallbackValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## callback.h (module 'core'): bool ns3::CallbackValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## callback.h (module 'core'): std::string ns3::CallbackValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## callback.h (module 'core'): void ns3::CallbackValue::Set(ns3::CallbackBase base) [member function]
cls.add_method('Set',
'void',
[param('ns3::CallbackBase', 'base')])
return
def register_Ns3DistributedSimulatorImpl_methods(root_module, cls):
## distributed-simulator-impl.h (module 'mpi'): ns3::DistributedSimulatorImpl::DistributedSimulatorImpl(ns3::DistributedSimulatorImpl const & arg0) [copy constructor]
cls.add_constructor([param('ns3::DistributedSimulatorImpl const &', 'arg0')])
## distributed-simulator-impl.h (module 'mpi'): ns3::DistributedSimulatorImpl::DistributedSimulatorImpl() [constructor]
cls.add_constructor([])
## distributed-simulator-impl.h (module 'mpi'): void ns3::DistributedSimulatorImpl::Cancel(ns3::EventId const & ev) [member function]
cls.add_method('Cancel',
'void',
[param('ns3::EventId const &', 'ev')],
is_virtual=True)
## distributed-simulator-impl.h (module 'mpi'): void ns3::DistributedSimulatorImpl::Destroy() [member function]
cls.add_method('Destroy',
'void',
[],
is_virtual=True)
## distributed-simulator-impl.h (module 'mpi'): uint32_t ns3::DistributedSimulatorImpl::GetContext() const [member function]
cls.add_method('GetContext',
'uint32_t',
[],
is_const=True, is_virtual=True)
## distributed-simulator-impl.h (module 'mpi'): ns3::Time ns3::DistributedSimulatorImpl::GetDelayLeft(ns3::EventId const & id) const [member function]
cls.add_method('GetDelayLeft',
'ns3::Time',
[param('ns3::EventId const &', 'id')],
is_const=True, is_virtual=True)
## distributed-simulator-impl.h (module 'mpi'): ns3::Time ns3::DistributedSimulatorImpl::GetMaximumSimulationTime() const [member function]
cls.add_method('GetMaximumSimulationTime',
'ns3::Time',
[],
is_const=True, is_virtual=True)
## distributed-simulator-impl.h (module 'mpi'): uint32_t ns3::DistributedSimulatorImpl::GetSystemId() const [member function]
cls.add_method('GetSystemId',
'uint32_t',
[],
is_const=True, is_virtual=True)
## distributed-simulator-impl.h (module 'mpi'): static ns3::TypeId ns3::DistributedSimulatorImpl::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## distributed-simulator-impl.h (module 'mpi'): bool ns3::DistributedSimulatorImpl::IsExpired(ns3::EventId const & ev) const [member function]
cls.add_method('IsExpired',
'bool',
[param('ns3::EventId const &', 'ev')],
is_const=True, is_virtual=True)
## distributed-simulator-impl.h (module 'mpi'): bool ns3::DistributedSimulatorImpl::IsFinished() const [member function]
cls.add_method('IsFinished',
'bool',
[],
is_const=True, is_virtual=True)
## distributed-simulator-impl.h (module 'mpi'): ns3::Time ns3::DistributedSimulatorImpl::Next() const [member function]
cls.add_method('Next',
'ns3::Time',
[],
is_const=True, is_virtual=True)
## distributed-simulator-impl.h (module 'mpi'): ns3::Time ns3::DistributedSimulatorImpl::Now() const [member function]
cls.add_method('Now',
'ns3::Time',
[],
is_const=True, is_virtual=True)
## distributed-simulator-impl.h (module 'mpi'): void ns3::DistributedSimulatorImpl::Remove(ns3::EventId const & ev) [member function]
cls.add_method('Remove',
'void',
[param('ns3::EventId const &', 'ev')],
is_virtual=True)
## distributed-simulator-impl.h (module 'mpi'): void ns3::DistributedSimulatorImpl::Run() [member function]
cls.add_method('Run',
'void',
[],
is_virtual=True)
## distributed-simulator-impl.h (module 'mpi'): void ns3::DistributedSimulatorImpl::RunOneEvent() [member function]
cls.add_method('RunOneEvent',
'void',
[],
is_virtual=True)
## distributed-simulator-impl.h (module 'mpi'): ns3::EventId ns3::DistributedSimulatorImpl::Schedule(ns3::Time const & time, ns3::EventImpl * event) [member function]
cls.add_method('Schedule',
'ns3::EventId',
[param('ns3::Time const &', 'time'), param('ns3::EventImpl *', 'event')],
is_virtual=True)
## distributed-simulator-impl.h (module 'mpi'): ns3::EventId ns3::DistributedSimulatorImpl::ScheduleDestroy(ns3::EventImpl * event) [member function]
cls.add_method('ScheduleDestroy',
'ns3::EventId',
[param('ns3::EventImpl *', 'event')],
is_virtual=True)
## distributed-simulator-impl.h (module 'mpi'): ns3::EventId ns3::DistributedSimulatorImpl::ScheduleNow(ns3::EventImpl * event) [member function]
cls.add_method('ScheduleNow',
'ns3::EventId',
[param('ns3::EventImpl *', 'event')],
is_virtual=True)
## distributed-simulator-impl.h (module 'mpi'): void ns3::DistributedSimulatorImpl::ScheduleWithContext(uint32_t context, ns3::Time const & time, ns3::EventImpl * event) [member function]
cls.add_method('ScheduleWithContext',
'void',
[param('uint32_t', 'context'), param('ns3::Time const &', 'time'), param('ns3::EventImpl *', 'event')],
is_virtual=True)
## distributed-simulator-impl.h (module 'mpi'): void ns3::DistributedSimulatorImpl::SetScheduler(ns3::ObjectFactory schedulerFactory) [member function]
cls.add_method('SetScheduler',
'void',
[param('ns3::ObjectFactory', 'schedulerFactory')],
is_virtual=True)
## distributed-simulator-impl.h (module 'mpi'): void ns3::DistributedSimulatorImpl::Stop() [member function]
cls.add_method('Stop',
'void',
[],
is_virtual=True)
## distributed-simulator-impl.h (module 'mpi'): void ns3::DistributedSimulatorImpl::Stop(ns3::Time const & time) [member function]
cls.add_method('Stop',
'void',
[param('ns3::Time const &', 'time')],
is_virtual=True)
## distributed-simulator-impl.h (module 'mpi'): void ns3::DistributedSimulatorImpl::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='private', is_virtual=True)
return
def register_Ns3EmptyAttributeValue_methods(root_module, cls):
## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue(ns3::EmptyAttributeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EmptyAttributeValue const &', 'arg0')])
## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EmptyAttributeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, visibility='private', is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
visibility='private', is_virtual=True)
## attribute.h (module 'core'): std::string ns3::EmptyAttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, visibility='private', is_virtual=True)
return
def register_Ns3EventImpl_methods(root_module, cls):
## event-impl.h (module 'core'): ns3::EventImpl::EventImpl(ns3::EventImpl const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EventImpl const &', 'arg0')])
## event-impl.h (module 'core'): ns3::EventImpl::EventImpl() [constructor]
cls.add_constructor([])
## event-impl.h (module 'core'): void ns3::EventImpl::Cancel() [member function]
cls.add_method('Cancel',
'void',
[])
## event-impl.h (module 'core'): void ns3::EventImpl::Invoke() [member function]
cls.add_method('Invoke',
'void',
[])
## event-impl.h (module 'core'): bool ns3::EventImpl::IsCancelled() [member function]
cls.add_method('IsCancelled',
'bool',
[])
## event-impl.h (module 'core'): void ns3::EventImpl::Notify() [member function]
cls.add_method('Notify',
'void',
[],
is_pure_virtual=True, visibility='protected', is_virtual=True)
return
def register_Ns3MpiReceiver_methods(root_module, cls):
## mpi-receiver.h (module 'mpi'): ns3::MpiReceiver::MpiReceiver() [constructor]
cls.add_constructor([])
## mpi-receiver.h (module 'mpi'): ns3::MpiReceiver::MpiReceiver(ns3::MpiReceiver const & arg0) [copy constructor]
cls.add_constructor([param('ns3::MpiReceiver const &', 'arg0')])
## mpi-receiver.h (module 'mpi'): static ns3::TypeId ns3::MpiReceiver::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## mpi-receiver.h (module 'mpi'): void ns3::MpiReceiver::Receive(ns3::Ptr<ns3::Packet> p) [member function]
cls.add_method('Receive',
'void',
[param('ns3::Ptr< ns3::Packet >', 'p')])
## mpi-receiver.h (module 'mpi'): void ns3::MpiReceiver::SetReceiveCallback(ns3::Callback<void, ns3::Ptr<ns3::Packet>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> callback) [member function]
cls.add_method('SetReceiveCallback',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::Packet >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')])
return
def register_Ns3NixVector_methods(root_module, cls):
cls.add_output_stream_operator()
## nix-vector.h (module 'network'): ns3::NixVector::NixVector() [constructor]
cls.add_constructor([])
## nix-vector.h (module 'network'): ns3::NixVector::NixVector(ns3::NixVector const & o) [copy constructor]
cls.add_constructor([param('ns3::NixVector const &', 'o')])
## nix-vector.h (module 'network'): void ns3::NixVector::AddNeighborIndex(uint32_t newBits, uint32_t numberOfBits) [member function]
cls.add_method('AddNeighborIndex',
'void',
[param('uint32_t', 'newBits'), param('uint32_t', 'numberOfBits')])
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::BitCount(uint32_t numberOfNeighbors) const [member function]
cls.add_method('BitCount',
'uint32_t',
[param('uint32_t', 'numberOfNeighbors')],
is_const=True)
## nix-vector.h (module 'network'): ns3::Ptr<ns3::NixVector> ns3::NixVector::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::NixVector >',
[],
is_const=True)
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::Deserialize(uint32_t const * buffer, uint32_t size) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('uint32_t const *', 'buffer'), param('uint32_t', 'size')])
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::ExtractNeighborIndex(uint32_t numberOfBits) [member function]
cls.add_method('ExtractNeighborIndex',
'uint32_t',
[param('uint32_t', 'numberOfBits')])
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::GetRemainingBits() [member function]
cls.add_method('GetRemainingBits',
'uint32_t',
[])
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::Serialize(uint32_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint32_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
return
def register_Ns3ObjectFactoryChecker_methods(root_module, cls):
## object-factory.h (module 'core'): ns3::ObjectFactoryChecker::ObjectFactoryChecker() [constructor]
cls.add_constructor([])
## object-factory.h (module 'core'): ns3::ObjectFactoryChecker::ObjectFactoryChecker(ns3::ObjectFactoryChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectFactoryChecker const &', 'arg0')])
return
def register_Ns3ObjectFactoryValue_methods(root_module, cls):
## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue() [constructor]
cls.add_constructor([])
## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue(ns3::ObjectFactoryValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectFactoryValue const &', 'arg0')])
## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue(ns3::ObjectFactory const & value) [constructor]
cls.add_constructor([param('ns3::ObjectFactory const &', 'value')])
## object-factory.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::ObjectFactoryValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## object-factory.h (module 'core'): bool ns3::ObjectFactoryValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## object-factory.h (module 'core'): ns3::ObjectFactory ns3::ObjectFactoryValue::Get() const [member function]
cls.add_method('Get',
'ns3::ObjectFactory',
[],
is_const=True)
## object-factory.h (module 'core'): std::string ns3::ObjectFactoryValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## object-factory.h (module 'core'): void ns3::ObjectFactoryValue::Set(ns3::ObjectFactory const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::ObjectFactory const &', 'value')])
return
def register_Ns3Packet_methods(root_module, cls):
cls.add_output_stream_operator()
## packet.h (module 'network'): ns3::Packet::Packet() [constructor]
cls.add_constructor([])
## packet.h (module 'network'): ns3::Packet::Packet(ns3::Packet const & o) [copy constructor]
cls.add_constructor([param('ns3::Packet const &', 'o')])
## packet.h (module 'network'): ns3::Packet::Packet(uint32_t size) [constructor]
cls.add_constructor([param('uint32_t', 'size')])
## packet.h (module 'network'): ns3::Packet::Packet(uint8_t const * buffer, uint32_t size, bool magic) [constructor]
cls.add_constructor([param('uint8_t const *', 'buffer'), param('uint32_t', 'size'), param('bool', 'magic')])
## packet.h (module 'network'): ns3::Packet::Packet(uint8_t const * buffer, uint32_t size) [constructor]
cls.add_constructor([param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## packet.h (module 'network'): void ns3::Packet::AddAtEnd(ns3::Ptr<const ns3::Packet> packet) [member function]
cls.add_method('AddAtEnd',
'void',
[param('ns3::Ptr< ns3::Packet const >', 'packet')])
## packet.h (module 'network'): void ns3::Packet::AddByteTag(ns3::Tag const & tag) const [member function]
cls.add_method('AddByteTag',
'void',
[param('ns3::Tag const &', 'tag')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::AddHeader(ns3::Header const & header) [member function]
cls.add_method('AddHeader',
'void',
[param('ns3::Header const &', 'header')])
## packet.h (module 'network'): void ns3::Packet::AddPacketTag(ns3::Tag const & tag) const [member function]
cls.add_method('AddPacketTag',
'void',
[param('ns3::Tag const &', 'tag')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::AddPaddingAtEnd(uint32_t size) [member function]
cls.add_method('AddPaddingAtEnd',
'void',
[param('uint32_t', 'size')])
## packet.h (module 'network'): void ns3::Packet::AddTrailer(ns3::Trailer const & trailer) [member function]
cls.add_method('AddTrailer',
'void',
[param('ns3::Trailer const &', 'trailer')])
## packet.h (module 'network'): ns3::PacketMetadata::ItemIterator ns3::Packet::BeginItem() const [member function]
cls.add_method('BeginItem',
'ns3::PacketMetadata::ItemIterator',
[],
is_const=True)
## packet.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Packet::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::Packet >',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::CopyData(uint8_t * buffer, uint32_t size) const [member function]
cls.add_method('CopyData',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::CopyData(std::ostream * os, uint32_t size) const [member function]
cls.add_method('CopyData',
'void',
[param('std::ostream *', 'os'), param('uint32_t', 'size')],
is_const=True)
## packet.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Packet::CreateFragment(uint32_t start, uint32_t length) const [member function]
cls.add_method('CreateFragment',
'ns3::Ptr< ns3::Packet >',
[param('uint32_t', 'start'), param('uint32_t', 'length')],
is_const=True)
## packet.h (module 'network'): static void ns3::Packet::EnableChecking() [member function]
cls.add_method('EnableChecking',
'void',
[],
is_static=True)
## packet.h (module 'network'): static void ns3::Packet::EnablePrinting() [member function]
cls.add_method('EnablePrinting',
'void',
[],
is_static=True)
## packet.h (module 'network'): bool ns3::Packet::FindFirstMatchingByteTag(ns3::Tag & tag) const [member function]
cls.add_method('FindFirstMatchingByteTag',
'bool',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h (module 'network'): ns3::ByteTagIterator ns3::Packet::GetByteTagIterator() const [member function]
cls.add_method('GetByteTagIterator',
'ns3::ByteTagIterator',
[],
is_const=True)
## packet.h (module 'network'): ns3::Ptr<ns3::NixVector> ns3::Packet::GetNixVector() const [member function]
cls.add_method('GetNixVector',
'ns3::Ptr< ns3::NixVector >',
[],
is_const=True)
## packet.h (module 'network'): ns3::PacketTagIterator ns3::Packet::GetPacketTagIterator() const [member function]
cls.add_method('GetPacketTagIterator',
'ns3::PacketTagIterator',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::GetSize() const [member function]
cls.add_method('GetSize',
'uint32_t',
[],
is_const=True)
## packet.h (module 'network'): uint64_t ns3::Packet::GetUid() const [member function]
cls.add_method('GetUid',
'uint64_t',
[],
is_const=True)
## packet.h (module 'network'): uint8_t const * ns3::Packet::PeekData() const [member function]
cls.add_method('PeekData',
'uint8_t const *',
[],
deprecated=True, is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::PeekHeader(ns3::Header & header) const [member function]
cls.add_method('PeekHeader',
'uint32_t',
[param('ns3::Header &', 'header')],
is_const=True)
## packet.h (module 'network'): bool ns3::Packet::PeekPacketTag(ns3::Tag & tag) const [member function]
cls.add_method('PeekPacketTag',
'bool',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::PeekTrailer(ns3::Trailer & trailer) [member function]
cls.add_method('PeekTrailer',
'uint32_t',
[param('ns3::Trailer &', 'trailer')])
## packet.h (module 'network'): void ns3::Packet::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::PrintByteTags(std::ostream & os) const [member function]
cls.add_method('PrintByteTags',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::PrintPacketTags(std::ostream & os) const [member function]
cls.add_method('PrintPacketTags',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::RemoveAllByteTags() [member function]
cls.add_method('RemoveAllByteTags',
'void',
[])
## packet.h (module 'network'): void ns3::Packet::RemoveAllPacketTags() [member function]
cls.add_method('RemoveAllPacketTags',
'void',
[])
## packet.h (module 'network'): void ns3::Packet::RemoveAtEnd(uint32_t size) [member function]
cls.add_method('RemoveAtEnd',
'void',
[param('uint32_t', 'size')])
## packet.h (module 'network'): void ns3::Packet::RemoveAtStart(uint32_t size) [member function]
cls.add_method('RemoveAtStart',
'void',
[param('uint32_t', 'size')])
## packet.h (module 'network'): uint32_t ns3::Packet::RemoveHeader(ns3::Header & header) [member function]
cls.add_method('RemoveHeader',
'uint32_t',
[param('ns3::Header &', 'header')])
## packet.h (module 'network'): bool ns3::Packet::RemovePacketTag(ns3::Tag & tag) [member function]
cls.add_method('RemovePacketTag',
'bool',
[param('ns3::Tag &', 'tag')])
## packet.h (module 'network'): uint32_t ns3::Packet::RemoveTrailer(ns3::Trailer & trailer) [member function]
cls.add_method('RemoveTrailer',
'uint32_t',
[param('ns3::Trailer &', 'trailer')])
## packet.h (module 'network'): uint32_t ns3::Packet::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::SetNixVector(ns3::Ptr<ns3::NixVector> arg0) [member function]
cls.add_method('SetNixVector',
'void',
[param('ns3::Ptr< ns3::NixVector >', 'arg0')])
return
def register_Ns3TimeChecker_methods(root_module, cls):
## nstime.h (module 'core'): ns3::TimeChecker::TimeChecker() [constructor]
cls.add_constructor([])
## nstime.h (module 'core'): ns3::TimeChecker::TimeChecker(ns3::TimeChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TimeChecker const &', 'arg0')])
return
def register_Ns3TimeValue_methods(root_module, cls):
## nstime.h (module 'core'): ns3::TimeValue::TimeValue() [constructor]
cls.add_constructor([])
## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::TimeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TimeValue const &', 'arg0')])
## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::Time const & value) [constructor]
cls.add_constructor([param('ns3::Time const &', 'value')])
## nstime.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TimeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## nstime.h (module 'core'): bool ns3::TimeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## nstime.h (module 'core'): ns3::Time ns3::TimeValue::Get() const [member function]
cls.add_method('Get',
'ns3::Time',
[],
is_const=True)
## nstime.h (module 'core'): std::string ns3::TimeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## nstime.h (module 'core'): void ns3::TimeValue::Set(ns3::Time const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Time const &', 'value')])
return
def register_Ns3TypeIdChecker_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker(ns3::TypeIdChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeIdChecker const &', 'arg0')])
return
def register_Ns3TypeIdValue_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeIdValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeIdValue const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeId const & value) [constructor]
cls.add_constructor([param('ns3::TypeId const &', 'value')])
## type-id.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TypeIdValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## type-id.h (module 'core'): bool ns3::TypeIdValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeIdValue::Get() const [member function]
cls.add_method('Get',
'ns3::TypeId',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeIdValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## type-id.h (module 'core'): void ns3::TypeIdValue::Set(ns3::TypeId const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::TypeId const &', 'value')])
return
def register_functions(root_module):
module = root_module
register_functions_ns3_FatalImpl(module.get_submodule('FatalImpl'), root_module)
return
def register_functions_ns3_FatalImpl(module, root_module):
return
def main():
out = FileCodeSink(sys.stdout)
root_module = module_init()
register_types(root_module)
register_methods(root_module)
register_functions(root_module)
root_module.generate(out)
if __name__ == '__main__':
main()
| gpl-2.0 |
SnakeJenny/TensorFlow | tensorflow/contrib/keras/python/keras/engine/__init__.py | 58 | 1415 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Keras Engine: graph topology and training loop functionality.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.keras.python.keras.engine.topology import get_source_inputs
from tensorflow.contrib.keras.python.keras.engine.topology import Input
from tensorflow.contrib.keras.python.keras.engine.topology import InputLayer
from tensorflow.contrib.keras.python.keras.engine.topology import InputSpec
from tensorflow.contrib.keras.python.keras.engine.topology import Layer
from tensorflow.contrib.keras.python.keras.engine.training import Model
# Note: topology.Node is an internal class,
# it isn't meant to be used by Keras users.
| apache-2.0 |
falkolab/titanium_mobile | support/common/simplejson/tests/test_unicode.py | 123 | 2327 | from unittest import TestCase
import simplejson as json
class TestUnicode(TestCase):
def test_encoding1(self):
encoder = json.JSONEncoder(encoding='utf-8')
u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
s = u.encode('utf-8')
ju = encoder.encode(u)
js = encoder.encode(s)
self.assertEquals(ju, js)
def test_encoding2(self):
u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
s = u.encode('utf-8')
ju = json.dumps(u, encoding='utf-8')
js = json.dumps(s, encoding='utf-8')
self.assertEquals(ju, js)
def test_encoding3(self):
u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
j = json.dumps(u)
self.assertEquals(j, '"\\u03b1\\u03a9"')
def test_encoding4(self):
u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
j = json.dumps([u])
self.assertEquals(j, '["\\u03b1\\u03a9"]')
def test_encoding5(self):
u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
j = json.dumps(u, ensure_ascii=False)
self.assertEquals(j, u'"%s"' % (u,))
def test_encoding6(self):
u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
j = json.dumps([u], ensure_ascii=False)
self.assertEquals(j, u'["%s"]' % (u,))
def test_big_unicode_encode(self):
u = u'\U0001d120'
self.assertEquals(json.dumps(u), '"\\ud834\\udd20"')
self.assertEquals(json.dumps(u, ensure_ascii=False), u'"\U0001d120"')
def test_big_unicode_decode(self):
u = u'z\U0001d120x'
self.assertEquals(json.loads('"' + u + '"'), u)
self.assertEquals(json.loads('"z\\ud834\\udd20x"'), u)
def test_unicode_decode(self):
for i in range(0, 0xd7ff):
u = unichr(i)
s = '"\\u%04x"' % (i,)
self.assertEquals(json.loads(s), u)
def test_default_encoding(self):
self.assertEquals(json.loads(u'{"a": "\xe9"}'.encode('utf-8')),
{'a': u'\xe9'})
def test_unicode_preservation(self):
self.assertEquals(type(json.loads(u'""')), unicode)
self.assertEquals(type(json.loads(u'"a"')), unicode)
self.assertEquals(type(json.loads(u'["a"]')[0]), unicode) | apache-2.0 |
ggchappell/GraphR | genramsey.py | 1 | 11135 | #!/usr/bin/env python3
# genramsey.py
# Glenn G. Chappell
# Date: 26 Aug 2016
# Requires Python 3.
"""Functions for finding generalized Ramsey numbers & related extremal
graphs.
CONVENTIONS
See isograph.py for our graph representation. A set is represented as a
sorted list or tuple of its elements.
*Predicates* in this file are functions taking a graph and a set of
vertices of that graph, and returning bool. Given a predicate f, and a
graph g, we say a set s of vertices of g is an *f-set* in g if f(g, s)
is True.
An *induced-hereditary* predicate is a predicate f such that, if h is an
induced subgraph of a graph g, and s is a set of vertices of h, then s
is an f-set in h iff s is an f-set in g.
Given predicates f1 & f2 and nonnegative integers b1 & b2, a
*counterexample* graph is a graph that contains no f1-set of order b1
and no f2-set of order b2. In other words, it is a counterexample to the
statement that every graph contains either an f1-set of order b1 or an
f2-set of order b2.
An *extremal* graph is a counterexample graph of maximum order.
Predicates:
is_independent(g, s)
Return bool: True if s is an independent set in graph g.
is_clique(g, s)
Return bool: True if s induces a complete subgraph of graph g.
Checking for f-Sets:
has_fset(f, b, g)
Return bool: True if graph g contains an f-set of order b.
has_fset_with_last(f, b, g)
Return bool: True if graph g contains an f-set of order b that
contains vertex n-1 of g.
Finding Extremal Graphs:
extremals(f1, f2, b1, b2, printflag=None)
f1, f2 are induced-hereditary predicates. Return (n, gs), where n is
the least order for which no counterexample graphs exist (and so n-1
is the order of all extremal graphs), and gs is a list of all
extremal graphs (exactly one from each isomorphism class). If
printflag is True, prints, one on each line, pairs of the form u v,
where u is an integer from 0 to n, and v is the number of
counterexample graphs of order u.
"""
import isograph # for graphs, isomorphic, powerset, unique_iso
import itertools # for combinations, count
import sys # for argv, exit
# ----------------------------------------------------------------------
# Predicates
# ----------------------------------------------------------------------
def is_independent(g, s):
"""Predicate. Return True if s is an independent set in g.
s is a list or tuple whose items are vertices of g. We determine
whether s, considered as a set of vertices, is an independent set
in g. We return True if so.
This function is an induced-hereditary predicate.
Arguments:
g -- graph
s -- list or tuple of vertices of g; represents set of vertices
See isograph.py for our graph representation.
>>> g = [ [2,3,4], [2,3,4], [0,1], [0,1], [0,1] ]
>>> s = [0,1]
>>> is_independent(g, s)
True
>>> s = [2,3,4]
>>> is_independent(g, s)
True
>>> s = [1,3]
>>> is_independent(g, s)
False
"""
for v in s:
for x in g[v]:
if x in s:
return False
return True
def is_clique(g, s):
"""Predicate. Return True if s is a clique in g.
s is a list or tuple whose items are vertices of g. We determine
whether s, considered as a set of vertices, induces a complete
subgraph of g. We return True if so.
This function is an induced-hereditary predicate.
Arguments:
g -- graph
s -- list or tuple of vertices of g; represents set of vertices
See isograph.py for our graph representation.
>>> g = [ [1], [0,2,3], [1,3], [1,2] ]
>>> s = [0,1]
>>> is_clique(g, s)
True
>>> s = [1,2,3]
>>> is_clique(g, s)
True
>>> s = [0,2]
>>> is_clique(g, s)
False
"""
for v in s:
for x in s:
if x != v and x not in g[v]:
return False
return True
# ----------------------------------------------------------------------
# Checking for f-Sets
# ----------------------------------------------------------------------
def has_fset(f, b, g):
"""Return True if g contains an f-set of order b.
Arguments:
f -- predicate
Given a graph and a subset of its vertex set, returns bool.
b -- nonnegative int
We search for an order-b f-set.
g -- graph
See isograph.py for our graph representation.
>>> g = [ [3], [3], [3], [0,1,2] ]
>>> has_fset(is_independent, -1, g)
False
>>> has_fset(is_independent, 0, g)
True
>>> has_fset(is_independent, 1, g)
True
>>> has_fset(is_independent, 2, g)
True
>>> has_fset(is_independent, 3, g)
True
>>> has_fset(is_independent, 4, g)
False
>>> has_fset(is_clique, 1, g)
True
>>> has_fset(is_clique, 2, g)
True
>>> has_fset(is_clique, 3, g)
False
"""
if b < 0:
return False
n = len(g)
if b > n:
return False
for s in itertools.combinations(range(n), b):
if f(g, s):
return True
return False
def has_fset_with_last(f, b, g):
"""Return True if g contains f-set of order b containing vertex n-1.
Arguments:
f -- predicate
Given a graph and a subset of its vertex set, returns bool.
g -- graph
b -- nonnegative int
We search for an order-b f-set.
See isograph.py for our graph representation.
>>> g = [ [3], [3], [3], [0,1,2] ]
>>> has_fset_with_last(is_independent, -1, g)
False
>>> has_fset_with_last(is_independent, 0, g)
False
>>> has_fset_with_last(is_independent, 1, g)
True
>>> has_fset_with_last(is_independent, 2, g)
False
>>> has_fset_with_last(is_independent, 3, g)
False
>>> has_fset_with_last(is_independent, 4, g)
False
>>> has_fset_with_last(is_clique, 1, g)
True
>>> has_fset_with_last(is_clique, 2, g)
True
>>> has_fset_with_last(is_clique, 3, g)
False
"""
if b < 1:
return False
n = len(g)
if b > n:
return False
for ss in itertools.combinations(range(n-1), b-1):
s = ss + (n-1,)
if f(g, s):
return True
return False
# ----------------------------------------------------------------------
# Finding Extremal Graphs
# ----------------------------------------------------------------------
def _counterexamples_zero(f1, f2, b1, b2):
"""Yield all counterexample graphs of order zero.
Arguments:
f1 -- predicate
f2 -- predicate
b1 -- nonnegative int
b2 -- nonnegative int
See isograph.py for our graph representation.
"""
gs = list(isograph.graphs(0)) # list of all graphs of order 0
for g in gs:
if not has_fset(f1, b1, g) and not has_fset(f2, b2, g):
yield g
def _counterexamples_up(f1, f2, b1, b2, n, old):
"""Yield counterexample n-graphs, given list for n-1.
Given induced-hereditary predicates f1, f2, and nonnegative integers
b1, b2, yield one graph from each isomorphism class of n-vertex
counterexample graphs, given an iterable (old) yielding all
counterexample graphs of order n-1.
Arguments:
f1 -- induced-hereditary predicate
f2 -- induced-hereditary predicate
b1 -- nonnegative int
b2 -- nonnegative int
n -- positive int
Order of graphs to yield.
old -- iterable yielding graphs of order n-1
Graphs yielded should be all counterexample graphs of order n-1.
See isograph.py for our graph representation.
>>> f1 = is_independent
>>> f2 = is_clique
>>> ce4_0_3_3 = [[[2], [3], [0], [1]], [[1,2], [0,3], [0], [1]],
... [[1,2], [0,3], [0,3], [1,2]]] # order-4 ctrexamples for R_0(3,3)
>>> ce5_0_3_3 = list(_counterexamples_up(f1,f2,3,3,5,ce4_0_3_3))
>>> len(ce5_0_3_3)
1
>>> c5 = [[1,4], [0,2], [1,3], [2,4], [0,3]]
>>> isograph.isomorphic(ce5_0_3_3[0], c5)
True
>>> list(_counterexamples_up(f1,f2,3,3,6,ce5_0_3_3))
[]
"""
# Helper function counterexamples_up_big_list: generates every
# counterexample graph of order n whose subgraph induced by vertices
# 0 .. n-2 is an item in old.
def counterexamples_up_big_list(f1, f2, n, b1, b2, old):
for oldg in old:
for vset in isograph.powerset(range(n-1)):
g = oldg + [list(vset)]
for v in vset:
g[v] = g[v]+[n-1]
# NOT g[v] += ... or g[v].append(...),
# to avoid changing items in oldg
# Now g is candidate graph.
# Yield it if no order-b1 f1-set & no order-b2 f2-set
if (not has_fset_with_last(f1, b1, g) and
not has_fset_with_last(f2, b2, g)):
yield g
return isograph.unique_iso(
counterexamples_up_big_list(f1, f2, n, b1, b2, old))
def extremals(f1, f2, b1, b2, printflag=None):
"""Return 1 + order of extremal graphs, list of extremal graphs.
Return (n, gs), where n is 1 + order of an extremal graph, or 0 if
no counterexample graph exists, and gs is a list of all extremal
graphs.
If printflag is True, prints, one on each line, pairs of the form
u v, where u is an integer from 0 to n, and v is the number of
counterexample graphs of order u.
Arguments:
f1 -- induced-hereditary predicate
f2 -- induced-hereditary predicate
b1 -- nonnegative int
b2 -- nonnegative int
printflag -- optional bool: whether to print ongoing messages
Default is False.
See isograph.py for our graph representation.
>>> f1 = is_independent
>>> f2 = is_clique
>>> extremals(f1, f2, 3, 3)
(6, [[[2, 3], [3, 4], [0, 4], [0, 1], [1, 2]]])
>>> extremals(f1, f2, 3, 3, True)
Order & number of counterexample graphs:
0 1
1 1
2 2
3 2
4 3
5 1
6 0
(6, [[[2, 3], [3, 4], [0, 4], [0, 1], [1, 2]]])
"""
if printflag:
print("Order & number of counterexample graphs:")
gs = list(_counterexamples_zero(f1, f2, b1, b2))
howmany = len(gs)
if printflag:
print(0, howmany)
if howmany == 0:
return (0, [])
for n in itertools.count(1):
oldgs = gs
gs = list(_counterexamples_up(f1, f2, b1, b2, n, oldgs))
howmany = len(gs)
if printflag:
print(n, howmany)
if howmany == 0:
return (n, oldgs)
# ----------------------------------------------------------------------
# Main program
# ----------------------------------------------------------------------
def main(argv=None):
"""Run doctests; verbose mode if argv[1] is "--Test".
If argv is not given, then sys.argv is used.
"""
if argv is None:
argv = sys.argv
import doctest
verbose = (len(argv) >= 2 and argv[1] == "--Test")
if verbose:
print("Running doctests (verbose mode)")
else:
print("Running doctests")
doctest.testmod(verbose=verbose)
return 0
# Execute main() if running as program, not if imported as module
if __name__ == "__main__":
sys.exit(main())
| mit |
Taranys/Sick-Beard | lib/hachoir_parser/network/tcpdump.py | 90 | 16398 | """
Tcpdump parser
Source:
* libpcap source code (file savefile.c)
* RFC 791 (IPv4)
* RFC 792 (ICMP)
* RFC 793 (TCP)
* RFC 1122 (Requirements for Internet Hosts)
Author: Victor Stinner
Creation: 23 march 2006
"""
from lib.hachoir_parser import Parser
from lib.hachoir_core.field import (FieldSet, ParserError,
Enum, Bytes, NullBytes, RawBytes,
UInt8, UInt16, UInt32, Int32, TimestampUnix32,
Bit, Bits, NullBits)
from lib.hachoir_core.endian import NETWORK_ENDIAN, LITTLE_ENDIAN
from lib.hachoir_core.tools import humanDuration
from lib.hachoir_core.text_handler import textHandler, hexadecimal
from lib.hachoir_core.tools import createDict
from lib.hachoir_parser.network.common import MAC48_Address, IPv4_Address, IPv6_Address
def diff(field):
return humanDuration(field.value*1000)
class Layer(FieldSet):
endian = NETWORK_ENDIAN
def parseNext(self, parent):
return None
class ARP(Layer):
opcode_name = {
1: "request",
2: "reply"
}
endian = NETWORK_ENDIAN
def createFields(self):
yield UInt16(self, "hw_type")
yield UInt16(self, "proto_type")
yield UInt8(self, "hw_size")
yield UInt8(self, "proto_size")
yield Enum(UInt16(self, "opcode"), ARP.opcode_name)
yield MAC48_Address(self, "src_mac")
yield IPv4_Address(self, "src_ip")
yield MAC48_Address(self, "dst_mac")
yield IPv4_Address(self, "dst_ip")
def createDescription(self):
desc = "ARP: %s" % self["opcode"].display
opcode = self["opcode"].value
src_ip = self["src_ip"].display
dst_ip = self["dst_ip"].display
if opcode == 1:
desc += ", %s ask %s" % (dst_ip, src_ip)
elif opcode == 2:
desc += " from %s" % src_ip
return desc
class TCP_Option(FieldSet):
NOP = 1
MAX_SEGMENT = 2
WINDOW_SCALE = 3
SACK = 4
TIMESTAMP = 8
code_name = {
NOP: "NOP",
MAX_SEGMENT: "Max segment size",
WINDOW_SCALE: "Window scale",
SACK: "SACK permitted",
TIMESTAMP: "Timestamp"
}
def __init__(self, *args):
FieldSet.__init__(self, *args)
if self["code"].value != self.NOP:
self._size = self["length"].value * 8
else:
self._size = 8
def createFields(self):
yield Enum(UInt8(self, "code", "Code"), self.code_name)
code = self["code"].value
if code == self.NOP:
return
yield UInt8(self, "length", "Option size in bytes")
if code == self.MAX_SEGMENT:
yield UInt16(self, "max_seg", "Maximum segment size")
elif code == self.WINDOW_SCALE:
yield UInt8(self, "win_scale", "Window scale")
elif code == self.TIMESTAMP:
yield UInt32(self, "ts_val", "Timestamp value")
yield UInt32(self, "ts_ecr", "Timestamp echo reply")
else:
size = (self.size - self.current_size) // 8
if size:
yield RawBytes(self, "data", size)
def createDescription(self):
return "TCP option: %s" % self["code"].display
class TCP(Layer):
port_name = {
13: "daytime",
20: "ftp data",
21: "ftp",
23: "telnet",
25: "smtp",
53: "dns",
63: "dhcp/bootp",
80: "HTTP",
110: "pop3",
119: "nntp",
123: "ntp",
139: "netbios session service",
1863: "MSNMS",
6667: "IRC"
}
def createFields(self):
yield Enum(UInt16(self, "src"), self.port_name)
yield Enum(UInt16(self, "dst"), self.port_name)
yield UInt32(self, "seq_num")
yield UInt32(self, "ack_num")
yield Bits(self, "hdrlen", 6, "Header lenght")
yield NullBits(self, "reserved", 2, "Reserved")
yield Bit(self, "cgst", "Congestion Window Reduced")
yield Bit(self, "ecn-echo", "ECN-echo")
yield Bit(self, "urg", "Urgent")
yield Bit(self, "ack", "Acknowledge")
yield Bit(self, "psh", "Push mmode")
yield Bit(self, "rst", "Reset connection")
yield Bit(self, "syn", "Synchronize")
yield Bit(self, "fin", "Stop the connection")
yield UInt16(self, "winsize", "Windows size")
yield textHandler(UInt16(self, "checksum"), hexadecimal)
yield UInt16(self, "urgent")
size = self["hdrlen"].value*8 - self.current_size
while 0 < size:
option = TCP_Option(self, "option[]")
yield option
size -= option.size
def parseNext(self, parent):
return None
def createDescription(self):
src = self["src"].value
dst = self["dst"].value
if src < 32768:
src = self["src"].display
else:
src = None
if dst < 32768:
dst = self["dst"].display
else:
dst = None
desc = "TCP"
if src != None and dst != None:
desc += " (%s->%s)" % (src, dst)
elif src != None:
desc += " (%s->)" % (src)
elif dst != None:
desc += " (->%s)" % (dst)
# Get flags
flags = []
if self["syn"].value:
flags.append("SYN")
if self["ack"].value:
flags.append("ACK")
if self["fin"].value:
flags.append("FIN")
if self["rst"].value:
flags.append("RST")
if flags:
desc += " [%s]" % (",".join(flags))
return desc
class UDP(Layer):
port_name = {
12: "daytime",
22: "ssh",
53: "DNS",
67: "dhcp/bootp",
80: "http",
110: "pop3",
123: "ntp",
137: "netbios name service",
138: "netbios datagram service"
}
def createFields(self):
yield Enum(UInt16(self, "src"), UDP.port_name)
yield Enum(UInt16(self, "dst"), UDP.port_name)
yield UInt16(self, "length")
yield textHandler(UInt16(self, "checksum"), hexadecimal)
def createDescription(self):
return "UDP (%s->%s)" % (self["src"].display, self["dst"].display)
class ICMP(Layer):
REJECT = 3
PONG = 0
PING = 8
type_desc = {
PONG: "Pong",
REJECT: "Reject",
PING: "Ping"
}
reject_reason = {
0: "net unreachable",
1: "host unreachable",
2: "protocol unreachable",
3: "port unreachable",
4: "fragmentation needed and DF set",
5: "source route failed",
6: "Destination network unknown error",
7: "Destination host unknown error",
8: "Source host isolated error",
9: "Destination network administratively prohibited",
10: "Destination host administratively prohibited",
11: "Unreachable network for Type Of Service",
12: "Unreachable host for Type Of Service.",
13: "Communication administratively prohibited",
14: "Host precedence violation",
15: "Precedence cutoff in effect"
}
def createFields(self):
# Type
yield Enum(UInt8(self, "type"), self.type_desc)
type = self["type"].value
# Code
field = UInt8(self, "code")
if type == 3:
field = Enum(field, self.reject_reason)
yield field
# Options
yield textHandler(UInt16(self, "checksum"), hexadecimal)
if type in (self.PING, self.PONG): # and self["code"].value == 0:
yield UInt16(self, "id")
yield UInt16(self, "seq_num")
# follow: ping data
elif type == self.REJECT:
yield NullBytes(self, "empty", 2)
yield UInt16(self, "hop_mtu", "Next-Hop MTU")
def createDescription(self):
type = self["type"].value
if type in (self.PING, self.PONG):
return "%s (num=%s)" % (self["type"].display, self["seq_num"].value)
else:
return "ICMP (%s)" % self["type"].display
def parseNext(self, parent):
if self["type"].value == self.REJECT:
return IPv4(parent, "rejected_ipv4")
else:
return None
class ICMPv6(Layer):
ECHO_REQUEST = 128
ECHO_REPLY = 129
TYPE_DESC = {
128: "Echo request",
129: "Echo reply",
}
def createFields(self):
yield Enum(UInt8(self, "type"), self.TYPE_DESC)
yield UInt8(self, "code")
yield textHandler(UInt16(self, "checksum"), hexadecimal)
if self['type'].value in (self.ECHO_REQUEST, self.ECHO_REPLY):
yield UInt16(self, "id")
yield UInt16(self, "sequence")
def createDescription(self):
if self['type'].value in (self.ECHO_REQUEST, self.ECHO_REPLY):
return "%s (num=%s)" % (self["type"].display, self["sequence"].value)
else:
return "ICMPv6 (%s)" % self["type"].display
class IP(Layer):
PROTOCOL_INFO = {
1: ("icmp", ICMP, "ICMP"),
6: ("tcp", TCP, "TCP"),
17: ("udp", UDP, "UDP"),
58: ("icmpv6", ICMPv6, "ICMPv6"),
60: ("ipv6_opts", None, "IPv6 destination option"),
}
PROTOCOL_NAME = createDict(PROTOCOL_INFO, 2)
def parseNext(self, parent):
proto = self["protocol"].value
if proto not in self.PROTOCOL_INFO:
return None
name, parser, desc = self.PROTOCOL_INFO[proto]
if not parser:
return None
return parser(parent, name)
class IPv4(IP):
precedence_name = {
7: "Network Control",
6: "Internetwork Control",
5: "CRITIC/ECP",
4: "Flash Override",
3: "Flash",
2: "Immediate",
1: "Priority",
0: "Routine",
}
def __init__(self, *args):
FieldSet.__init__(self, *args)
self._size = self["hdr_size"].value * 32
def createFields(self):
yield Bits(self, "version", 4, "Version")
yield Bits(self, "hdr_size", 4, "Header size divided by 5")
# Type of service
yield Enum(Bits(self, "precedence", 3, "Precedence"), self.precedence_name)
yield Bit(self, "low_delay", "If set, low delay, else normal delay")
yield Bit(self, "high_throu", "If set, high throughput, else normal throughput")
yield Bit(self, "high_rel", "If set, high relibility, else normal")
yield NullBits(self, "reserved[]", 2, "(reserved for future use)")
yield UInt16(self, "length")
yield UInt16(self, "id")
yield NullBits(self, "reserved[]", 1)
yield Bit(self, "df", "Don't fragment")
yield Bit(self, "more_frag", "There are more fragments? if not set, it's the last one")
yield Bits(self, "frag_ofst_lo", 5)
yield UInt8(self, "frag_ofst_hi")
yield UInt8(self, "ttl", "Type to live")
yield Enum(UInt8(self, "protocol"), self.PROTOCOL_NAME)
yield textHandler(UInt16(self, "checksum"), hexadecimal)
yield IPv4_Address(self, "src")
yield IPv4_Address(self, "dst")
size = (self.size - self.current_size) // 8
if size:
yield RawBytes(self, "options", size)
def createDescription(self):
return "IPv4 (%s>%s)" % (self["src"].display, self["dst"].display)
class IPv6(IP):
static_size = 40 * 8
endian = NETWORK_ENDIAN
def createFields(self):
yield Bits(self, "version", 4, "Version (6)")
yield Bits(self, "traffic", 8, "Traffic class")
yield Bits(self, "flow", 20, "Flow label")
yield Bits(self, "length", 16, "Payload length")
yield Enum(Bits(self, "protocol", 8, "Next header"), self.PROTOCOL_NAME)
yield Bits(self, "hop_limit", 8, "Hop limit")
yield IPv6_Address(self, "src")
yield IPv6_Address(self, "dst")
def createDescription(self):
return "IPv6 (%s>%s)" % (self["src"].display, self["dst"].display)
class Layer2(Layer):
PROTO_INFO = {
0x0800: ("ipv4", IPv4, "IPv4"),
0x0806: ("arp", ARP, "ARP"),
0x86dd: ("ipv6", IPv6, "IPv6"),
}
PROTO_DESC = createDict(PROTO_INFO, 2)
def parseNext(self, parent):
try:
name, parser, desc = self.PROTO_INFO[ self["protocol"].value ]
return parser(parent, name)
except KeyError:
return None
class Unicast(Layer2):
packet_type_name = {
0: "Unicast to us"
}
def createFields(self):
yield Enum(UInt16(self, "packet_type"), self.packet_type_name)
yield UInt16(self, "addr_type", "Link-layer address type")
yield UInt16(self, "addr_length", "Link-layer address length")
length = self["addr_length"].value
length = 8 # FIXME: Should we use addr_length or not?
if length:
yield RawBytes(self, "source", length)
yield Enum(UInt16(self, "protocol"), self.PROTO_DESC)
class Ethernet(Layer2):
static_size = 14*8
def createFields(self):
yield MAC48_Address(self, "dst")
yield MAC48_Address(self, "src")
yield Enum(UInt16(self, "protocol"), self.PROTO_DESC)
def createDescription(self):
return "Ethernet: %s>%s (%s)" % \
(self["src"].display, self["dst"].display, self["protocol"].display)
class Packet(FieldSet):
endian = LITTLE_ENDIAN
def __init__(self, parent, name, parser, first_name):
FieldSet.__init__(self, parent, name)
self._size = (16 + self["caplen"].value) * 8
self._first_parser = parser
self._first_name = first_name
def createFields(self):
yield TimestampUnix32(self, "ts_epoch", "Timestamp (Epoch)")
yield UInt32(self, "ts_nanosec", "Timestamp (nano second)")
yield UInt32(self, "caplen", "length of portion present")
yield UInt32(self, "len", "length this packet (off wire)")
# Read different layers
field = self._first_parser(self, self._first_name)
while field:
yield field
field = field.parseNext(self)
# Read data if any
size = (self.size - self.current_size) // 8
if size:
yield RawBytes(self, "data", size)
def getTimestamp(self):
nano_sec = float(self["ts_nanosec"].value) / 100
from datetime import timedelta
return self["ts_epoch"].value + timedelta(microseconds=nano_sec)
def createDescription(self):
t0 = self["/packet[0]"].getTimestamp()
# ts = max(self.getTimestamp() - t0, t0)
ts = self.getTimestamp() - t0
#text = ["%1.6f: " % ts]
text = ["%s: " % ts]
if "icmp" in self:
text.append(self["icmp"].description)
elif "tcp" in self:
text.append(self["tcp"].description)
elif "udp" in self:
text.append(self["udp"].description)
elif "arp" in self:
text.append(self["arp"].description)
else:
text.append("Packet")
return "".join(text)
class TcpdumpFile(Parser):
PARSER_TAGS = {
"id": "tcpdump",
"category": "misc",
"min_size": 24*8,
"description": "Tcpdump file (network)",
"magic": (("\xd4\xc3\xb2\xa1", 0),),
}
endian = LITTLE_ENDIAN
LINK_TYPE = {
1: ("ethernet", Ethernet),
113: ("unicast", Unicast),
}
LINK_TYPE_DESC = createDict(LINK_TYPE, 0)
def validate(self):
if self["id"].value != "\xd4\xc3\xb2\xa1":
return "Wrong file signature"
if self["link_type"].value not in self.LINK_TYPE:
return "Unknown link type"
return True
def createFields(self):
yield Bytes(self, "id", 4, "Tcpdump identifier")
yield UInt16(self, "maj_ver", "Major version")
yield UInt16(self, "min_ver", "Minor version")
yield Int32(self, "this_zone", "GMT to local time zone correction")
yield Int32(self, "sigfigs", "accuracy of timestamps")
yield UInt32(self, "snap_len", "max length saved portion of each pkt")
yield Enum(UInt32(self, "link_type", "data link type"), self.LINK_TYPE_DESC)
link = self["link_type"].value
if link not in self.LINK_TYPE:
raise ParserError("Unknown link type: %s" % link)
name, parser = self.LINK_TYPE[link]
while self.current_size < self.size:
yield Packet(self, "packet[]", parser, name)
| gpl-3.0 |
leighpauls/k2cro4 | tools/deep_memory_profiler/PRESUBMIT.py | 116 | 1425 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Top-level presubmit script for deep_memory_profiler.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts for
details on the presubmit API built into gcl.
"""
def CommonChecks(input_api, output_api):
import sys
def join(*args):
return input_api.os_path.join(input_api.PresubmitLocalPath(), *args)
output = []
sys_path_backup = sys.path
try:
sys.path = [
join('..', 'find_runtime_symbols'),
] + sys.path
output.extend(input_api.canned_checks.RunPylint(input_api, output_api))
finally:
sys.path = sys_path_backup
output.extend(
input_api.canned_checks.RunUnitTestsInDirectory(
input_api, output_api,
input_api.os_path.join(input_api.PresubmitLocalPath(), 'tests'),
whitelist=[r'.+_test\.py$']))
if input_api.is_committing:
output.extend(input_api.canned_checks.PanProjectChecks(input_api,
output_api,
owners_check=False))
return output
def CheckChangeOnUpload(input_api, output_api):
return CommonChecks(input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
return CommonChecks(input_api, output_api)
| bsd-3-clause |
acehanks/projects | tripadvisor_scrapy/tripad/settings.py | 1 | 3128 | # -*- coding: utf-8 -*-
# Scrapy settings for tripad project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'tripad'
SPIDER_MODULES = ['tripad.spiders']
NEWSPIDER_MODULE = 'tripad.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'tripad (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'tripad.middlewares.MyCustomSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'tripad.middlewares.MyCustomDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'tripad.pipelines.SomePipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
| mit |
zakuro9715/lettuce | tests/integration/lib/Django-1.3/django/contrib/gis/gdal/envelope.py | 321 | 7044 | """
The GDAL/OGR library uses an Envelope structure to hold the bounding
box information for a geometry. The envelope (bounding box) contains
two pairs of coordinates, one for the lower left coordinate and one
for the upper right coordinate:
+----------o Upper right; (max_x, max_y)
| |
| |
| |
Lower left (min_x, min_y) o----------+
"""
from ctypes import Structure, c_double
from django.contrib.gis.gdal.error import OGRException
# The OGR definition of an Envelope is a C structure containing four doubles.
# See the 'ogr_core.h' source file for more information:
# http://www.gdal.org/ogr/ogr__core_8h-source.html
class OGREnvelope(Structure):
"Represents the OGREnvelope C Structure."
_fields_ = [("MinX", c_double),
("MaxX", c_double),
("MinY", c_double),
("MaxY", c_double),
]
class Envelope(object):
"""
The Envelope object is a C structure that contains the minimum and
maximum X, Y coordinates for a rectangle bounding box. The naming
of the variables is compatible with the OGR Envelope structure.
"""
def __init__(self, *args):
"""
The initialization function may take an OGREnvelope structure, 4-element
tuple or list, or 4 individual arguments.
"""
if len(args) == 1:
if isinstance(args[0], OGREnvelope):
# OGREnvelope (a ctypes Structure) was passed in.
self._envelope = args[0]
elif isinstance(args[0], (tuple, list)):
# A tuple was passed in.
if len(args[0]) != 4:
raise OGRException('Incorrect number of tuple elements (%d).' % len(args[0]))
else:
self._from_sequence(args[0])
else:
raise TypeError('Incorrect type of argument: %s' % str(type(args[0])))
elif len(args) == 4:
# Individiual parameters passed in.
# Thanks to ww for the help
self._from_sequence(map(float, args))
else:
raise OGRException('Incorrect number (%d) of arguments.' % len(args))
# Checking the x,y coordinates
if self.min_x > self.max_x:
raise OGRException('Envelope minimum X > maximum X.')
if self.min_y > self.max_y:
raise OGRException('Envelope minimum Y > maximum Y.')
def __eq__(self, other):
"""
Returns True if the envelopes are equivalent; can compare against
other Envelopes and 4-tuples.
"""
if isinstance(other, Envelope):
return (self.min_x == other.min_x) and (self.min_y == other.min_y) and \
(self.max_x == other.max_x) and (self.max_y == other.max_y)
elif isinstance(other, tuple) and len(other) == 4:
return (self.min_x == other[0]) and (self.min_y == other[1]) and \
(self.max_x == other[2]) and (self.max_y == other[3])
else:
raise OGRException('Equivalence testing only works with other Envelopes.')
def __str__(self):
"Returns a string representation of the tuple."
return str(self.tuple)
def _from_sequence(self, seq):
"Initializes the C OGR Envelope structure from the given sequence."
self._envelope = OGREnvelope()
self._envelope.MinX = seq[0]
self._envelope.MinY = seq[1]
self._envelope.MaxX = seq[2]
self._envelope.MaxY = seq[3]
def expand_to_include(self, *args):
"""
Modifies the envelope to expand to include the boundaries of
the passed-in 2-tuple (a point), 4-tuple (an extent) or
envelope.
"""
# We provide a number of different signatures for this method,
# and the logic here is all about converting them into a
# 4-tuple single parameter which does the actual work of
# expanding the envelope.
if len(args) == 1:
if isinstance(args[0], Envelope):
return self.expand_to_include(args[0].tuple)
elif hasattr(args[0], 'x') and hasattr(args[0], 'y'):
return self.expand_to_include(args[0].x, args[0].y, args[0].x, args[0].y)
elif isinstance(args[0], (tuple, list)):
# A tuple was passed in.
if len(args[0]) == 2:
return self.expand_to_include((args[0][0], args[0][1], args[0][0], args[0][1]))
elif len(args[0]) == 4:
(minx, miny, maxx, maxy) = args[0]
if minx < self._envelope.MinX:
self._envelope.MinX = minx
if miny < self._envelope.MinY:
self._envelope.MinY = miny
if maxx > self._envelope.MaxX:
self._envelope.MaxX = maxx
if maxy > self._envelope.MaxY:
self._envelope.MaxY = maxy
else:
raise OGRException('Incorrect number of tuple elements (%d).' % len(args[0]))
else:
raise TypeError('Incorrect type of argument: %s' % str(type(args[0])))
elif len(args) == 2:
# An x and an y parameter were passed in
return self.expand_to_include((args[0], args[1], args[0], args[1]))
elif len(args) == 4:
# Individiual parameters passed in.
return self.expand_to_include(args)
else:
raise OGRException('Incorrect number (%d) of arguments.' % len(args[0]))
@property
def min_x(self):
"Returns the value of the minimum X coordinate."
return self._envelope.MinX
@property
def min_y(self):
"Returns the value of the minimum Y coordinate."
return self._envelope.MinY
@property
def max_x(self):
"Returns the value of the maximum X coordinate."
return self._envelope.MaxX
@property
def max_y(self):
"Returns the value of the maximum Y coordinate."
return self._envelope.MaxY
@property
def ur(self):
"Returns the upper-right coordinate."
return (self.max_x, self.max_y)
@property
def ll(self):
"Returns the lower-left coordinate."
return (self.min_x, self.min_y)
@property
def tuple(self):
"Returns a tuple representing the envelope."
return (self.min_x, self.min_y, self.max_x, self.max_y)
@property
def wkt(self):
"Returns WKT representing a Polygon for this envelope."
# TODO: Fix significant figures.
return 'POLYGON((%s %s,%s %s,%s %s,%s %s,%s %s))' % \
(self.min_x, self.min_y, self.min_x, self.max_y,
self.max_x, self.max_y, self.max_x, self.min_y,
self.min_x, self.min_y)
| gpl-3.0 |
bfirsh/django-old | django/contrib/localflavor/uy/forms.py | 310 | 2083 | # -*- coding: utf-8 -*-
"""
UY-specific form helpers.
"""
import re
from django.core.validators import EMPTY_VALUES
from django.forms.fields import Select, RegexField
from django.forms import ValidationError
from django.utils.translation import ugettext_lazy as _
from django.contrib.localflavor.uy.util import get_validation_digit
class UYDepartamentSelect(Select):
"""
A Select widget that uses a list of Uruguayan departaments as its choices.
"""
def __init__(self, attrs=None):
from uy_departaments import DEPARTAMENT_CHOICES
super(UYDepartamentSelect, self).__init__(attrs, choices=DEPARTAMENT_CHOICES)
class UYCIField(RegexField):
"""
A field that validates Uruguayan 'Cedula de identidad' (CI) numbers.
"""
default_error_messages = {
'invalid': _("Enter a valid CI number in X.XXX.XXX-X,"
"XXXXXXX-X or XXXXXXXX format."),
'invalid_validation_digit': _("Enter a valid CI number."),
}
def __init__(self, *args, **kwargs):
super(UYCIField, self).__init__(r'(?P<num>(\d{6,7}|(\d\.)?\d{3}\.\d{3}))-?(?P<val>\d)',
*args, **kwargs)
def clean(self, value):
"""
Validates format and validation digit.
The official format is [X.]XXX.XXX-X but usually dots and/or slash are
omitted so, when validating, those characters are ignored if found in
the correct place. The three typically used formats are supported:
[X]XXXXXXX, [X]XXXXXX-X and [X.]XXX.XXX-X.
"""
value = super(UYCIField, self).clean(value)
if value in EMPTY_VALUES:
return u''
match = self.regex.match(value)
if not match:
raise ValidationError(self.error_messages['invalid'])
number = int(match.group('num').replace('.', ''))
validation_digit = int(match.group('val'))
if not validation_digit == get_validation_digit(number):
raise ValidationError(self.error_messages['invalid_validation_digit'])
return value
| bsd-3-clause |
mandre/kolla | tools/version-check.py | 2 | 5041 | #!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import logging
import os
import re
import sys
import bs4
from oslo_config import cfg
import pkg_resources
import prettytable
import requests
PROJECT_ROOT = os.path.abspath(os.path.join(
os.path.dirname(os.path.realpath(__file__)), '..'))
# NOTE(SamYaple): Update the search patch to prefer PROJECT_ROOT as the source
# of packages to import if we are using local tools/build.py
# instead of pip installed kolla-build tool
if PROJECT_ROOT not in sys.path:
sys.path.insert(0, PROJECT_ROOT)
from kolla.common import config as common_config
logging.basicConfig(format="%(message)s")
LOG = logging.getLogger('version-check')
# Filter list for non-projects
NOT_PROJECTS = [
'nova-novncproxy',
'nova-spicehtml5proxy',
'openstack-base',
'profiles'
]
TARBALLS_BASE_URL = 'http://tarballs.openstack.org'
VERSIONS = {'local': dict()}
def retrieve_upstream_versions():
upstream_versions = dict()
for project in VERSIONS['local']:
winner = None
series = VERSIONS['local'][project].split('.')[0]
base = '{}/{}'.format(TARBALLS_BASE_URL, project)
LOG.debug("Getting latest version for project %s from %s",
project, base)
r = requests.get(base)
s = bs4.BeautifulSoup(r.text, 'html.parser')
for link in s.find_all('a'):
version = link.get('href')
if (version.endswith('.tar.gz') and
version.startswith('{}-{}'.format(project, series))):
split = '{}-|.tar.gz'.format(project)
candidate = re.split(split, version)[1]
# Ignore 2014, 2015 versions as they are older
if candidate.startswith('201'):
continue
if not winner or more_recent(candidate, winner):
winner = candidate
if not winner:
LOG.warning("Could not find a version for %s", project)
continue
if '-' in winner:
winner = winner.split('-')[1]
upstream_versions[project] = winner
LOG.debug("Found latest version %s for project %s", winner, project)
VERSIONS['upstream'] = collections.OrderedDict(
sorted(upstream_versions.items()))
def retrieve_local_versions(conf):
for section in common_config.SOURCES:
if section in NOT_PROJECTS:
continue
project = section.split('-')[0]
if section not in conf.list_all_sections():
LOG.debug("Project %s not found in configuration file, using "
"default from kolla.common.config", project)
raw_version = common_config.SOURCES[section]['location']
else:
raw_version = getattr(conf, section).location
version = raw_version.split('/')[-1].split('.tar.gz')[0]
if '-' in version:
version = version.split('-')[1]
LOG.debug("Use local version %s for project %s", version, project)
VERSIONS['local'][project] = version
def more_recent(candidate, reference):
return pkg_resources.parse_version(candidate) > \
pkg_resources.parse_version(reference)
def diff_link(project, old_ref, new_ref):
return "https://github.com/openstack/{}/compare/{}...{}".format(
project, old_ref, new_ref)
def compare_versions():
up_to_date = True
result = prettytable.PrettyTable(["Project", "Current version",
"Latest version", "Comparing changes"])
result.align = "l"
for project in VERSIONS['upstream']:
if project not in VERSIONS['local']:
continue
upstream_version = VERSIONS['upstream'][project]
local_version = VERSIONS['local'][project]
if more_recent(upstream_version, local_version):
result.add_row([
project,
VERSIONS['local'][project],
VERSIONS['upstream'][project],
diff_link(project, local_version, upstream_version)
])
up_to_date = False
if up_to_date:
result = "Everything is up to date"
print(result)
def main():
conf = cfg.ConfigOpts()
common_config.parse(conf, sys.argv[1:], prog='version-check')
if conf.debug:
LOG.setLevel(logging.DEBUG)
retrieve_local_versions(conf)
retrieve_upstream_versions()
compare_versions()
if __name__ == '__main__':
main()
| apache-2.0 |
wzbozon/statsmodels | statsmodels/sandbox/tests/maketests_mlabwrap.py | 37 | 9022 | '''generate py modules with test cases and results from mlabwrap
currently matlab: princomp, garchar, garchma
'''
import numpy as np
from numpy.testing import assert_array_almost_equal
from numpy import array
xo = array([[ -419, -731, -1306, -1294],
[ 6, 529, -200, -437],
[ -27, -833, -6, -564],
[ -304, -273, -502, -739],
[ 1377, -912, 927, 280],
[ -375, -517, -514, 49],
[ 247, -504, 123, -259],
[ 712, 534, -773, 286],
[ 195, -1080, 3256, -178],
[ -854, 75, -706, -1084],
[-1219, -612, -15, -203],
[ 550, -628, -483, -2686],
[ -365, 1376, -1266, 317],
[ -489, 544, -195, 431],
[ -656, 854, 840, -723],
[ 16, -1385, -880, -460],
[ 258, -2252, 96, 54],
[ 2049, -750, -1115, 381],
[ -65, 280, -777, 416],
[ 755, 82, -806, 1027],
[ -39, -170, -2134, 743],
[ -859, 780, 746, -133],
[ 762, 252, -450, -459],
[ -941, -202, 49, -202],
[ -54, 115, 455, 388],
[-1348, 1246, 1430, -480],
[ 229, -535, -1831, 1524],
[ -651, -167, 2116, 483],
[-1249, -1373, 888, -1092],
[ -75, -2162, 486, -496],
[ 2436, -1627, -1069, 162],
[ -63, 560, -601, 587],
[ -60, 1051, -277, 1323],
[ 1329, -1294, 68, 5],
[ 1532, -633, -923, 696],
[ 669, 895, -1762, -375],
[ 1129, -548, 2064, 609],
[ 1320, 573, 2119, 270],
[ -213, -412, -2517, 1685],
[ 73, -979, 1312, -1220],
[-1360, -2107, -237, 1522],
[ -645, 205, -543, -169],
[ -212, 1072, 543, -128],
[ -352, -129, -605, -904],
[ 511, 85, 167, -1914],
[ 1515, 1862, 942, 1622],
[ -465, 623, -495, -89],
[-1396, -979, 1758, 128],
[ -255, -47, 980, 501],
[-1282, -58, -49, -610],
[ -889, -1177, -492, 494],
[ 1415, 1146, 696, -722],
[ 1237, -224, -1609, -64],
[ -528, -1625, 231, 883],
[ -327, 1636, -476, -361],
[ -781, 793, 1882, 234],
[ -506, -561, 1988, -810],
[-1233, 1467, -261, 2164],
[ 53, 1069, 824, 2123],
[-1200, -441, -321, 339],
[ 1606, 298, -995, 1292],
[-1740, -672, -1628, -129],
[-1450, -354, 224, -657],
[-2556, 1006, -706, -1453],
[ -717, -463, 345, -1821],
[ 1056, -38, -420, -455],
[ -523, 565, 425, 1138],
[-1030, -187, 683, 78],
[ -214, -312, -1171, -528],
[ 819, 736, -265, 423],
[ 1339, 351, 1142, 579],
[ -387, -126, -1573, 2346],
[ 969, 2, 327, -134],
[ 163, 227, 90, 2021],
[ 1022, -1076, 174, 304],
[ 1042, 1317, 311, 880],
[ 2018, -840, 295, 2651],
[ -277, 566, 1147, -189],
[ 20, 467, 1262, 263],
[ -663, 1061, -1552, -1159],
[ 1830, 391, 2534, -199],
[ -487, 752, -1061, 351],
[-2138, -556, -367, -457],
[ -868, -411, -559, 726],
[ 1770, 819, -892, -363],
[ 553, -736, -169, -490],
[ 388, -503, 809, -821],
[ -516, -1452, -192, 483],
[ 493, 2904, 1318, 2591],
[ 175, 584, -1001, 1675],
[ 1316, -1596, -460, 1500],
[ 1212, 214, -644, -696],
[ -501, 338, 1197, -841],
[ -587, -469, -1101, 24],
[-1205, 1910, 659, 1232],
[ -150, 398, 594, 394],
[ 34, -663, 235, -334],
[-1580, 647, 239, -351],
[-2177, -345, 1215, -1494],
[ 1923, 329, -152, 1128]])
x = xo/1000.
class HoldIt(object):
def __init__(self, name):
self.name = name
def save(self, what=None, filename=None, header=True, useinstant=True,
comment=None):
if what is None:
what = (i for i in self.__dict__ if i[0] != '_')
if header:
txt = ['import numpy as np\nfrom numpy import array\n\n']
if useinstant:
txt.append('class Holder(object):\n pass\n\n')
else:
txt = []
if useinstant:
txt.append('%s = Holder()' % self.name)
prefix = '%s.' % self.name
else:
prefix = ''
if not comment is None:
txt.append("%scomment = '%s'" % (prefix, comment))
for x in what:
txt.append('%s%s = %s' % (prefix, x, repr(getattr(self,x))))
txt.extend(['','']) #add empty lines at end
if not filename is None:
file(filename, 'a+').write('\n'.join(txt))
return txt
def generate_princomp(xo, filen='testsave.py'):
# import mlabwrap only when run as script
import mlabwrap
from mlabwrap import mlab
np.set_printoptions(precision=14, linewidth=100)
data = HoldIt('data')
data.xo = xo
data.save(filename='testsave.py', comment='generated data, divide by 1000')
res_princomp = HoldIt('princomp1')
res_princomp.coef, res_princomp.factors, res_princomp.values = \
mlab.princomp(x, nout=3)
res_princomp.save(filename=filen, header=False,
comment='mlab.princomp(x, nout=3)')
res_princomp = HoldIt('princomp2')
res_princomp.coef, res_princomp.factors, res_princomp.values = \
mlab.princomp(x[:20,], nout=3)
np.set_printoptions(precision=14, linewidth=100)
res_princomp.save(filename=filen, header=False,
comment='mlab.princomp(x[:20,], nout=3)')
res_princomp = HoldIt('princomp3')
res_princomp.coef, res_princomp.factors, res_princomp.values = \
mlab.princomp(x[:20,]-x[:20,].mean(0), nout=3)
np.set_printoptions(precision=14, linewidth=100)
res_princomp.save(filename=filen, header=False,
comment='mlab.princomp(x[:20,]-x[:20,].mean(0), nout=3)')
def generate_armarep(filen='testsave.py'):
# import mlabwrap only when run as script
import mlabwrap
from mlabwrap import mlab
res_armarep = HoldIt('armarep')
res_armarep.ar = np.array([1., -0.5, +0.8])
res_armarep.ma = np.array([1., -0.6, 0.08])
res_armarep.marep = mlab.garchma(-res_armarep.ar[1:], res_armarep.ma[1:], 20)
res_armarep.arrep = mlab.garchar(-res_armarep.ar[1:], res_armarep.ma[1:], 20)
res_armarep.save(filename=filen, header=False,
comment=("''mlab.garchma(-res_armarep.ar[1:], res_armarep.ma[1:], 20)\n" +
"mlab.garchar(-res_armarep.ar[1:], res_armarep.ma[1:], 20)''"))
def exampletest():
from statsmodels.sandbox import tsa
arrep = tsa.arma_impulse_response(res_armarep.ma, res_armarep.ar, nobs=21)[1:]
marep = tsa.arma_impulse_response(res_armarep.ar, res_armarep.ma, nobs=21)[1:]
assert_array_almost_equal(res_armarep.marep.ravel(), marep, 14)
#difference in sign convention to matlab for AR term
assert_array_almost_equal(-res_armarep.arrep.ravel(), arrep, 14)
if __name__ == '__main__':
import mlabwrap
from mlabwrap import mlab
import savedrvs
xo = savedrvs.rvsdata.xar2
x100 = xo[-100:]/1000.
x1000 = xo/1000.
filen = 'testsavetls.py'
res_pacf = HoldIt('mlpacf')
res_pacf.comment = 'mlab.parcorr(x, [], 2, nout=3)'
res_pacf.pacf100, res_pacf.lags100, res_pacf.bounds100 = \
mlab.parcorr(x100, [], 2, nout=3)
res_pacf.pacf1000, res_pacf.lags1000, res_pacf.bounds1000 = \
mlab.parcorr(x1000, [], 2, nout=3)
res_pacf.save(filename=filen, header=True)
res_acf = HoldIt('mlacf')
res_acf.comment = 'mlab.autocorr(x, [], 2, nout=3)'
res_acf.acf100, res_acf.lags100, res_acf.bounds100 = \
mlab.autocorr(x100, [], 2, nout=3)
res_acf.acf1000, res_acf.lags1000, res_acf.bounds1000 = \
mlab.autocorr(x1000, [], 2, nout=3)
res_acf.save(filename=filen, header=False)
res_ccf = HoldIt('mlccf')
res_ccf.comment = 'mlab.crosscorr(x[4:], x[:-4], [], 2, nout=3)'
res_ccf.ccf100, res_ccf.lags100, res_ccf.bounds100 = \
mlab.crosscorr(x100[4:], x100[:-4], [], 2, nout=3)
res_ccf.ccf1000, res_ccf.lags1000, res_ccf.bounds1000 = \
mlab.crosscorr(x1000[4:], x1000[:-4], [], 2, nout=3)
res_ccf.save(filename=filen, header=False)
res_ywar = HoldIt('mlywar')
res_ywar.comment = "mlab.ar(x100-x100.mean(), 10, 'yw').a.ravel()"
mbaryw = mlab.ar(x100-x100.mean(), 10, 'yw')
res_ywar.arcoef100 = np.array(mbaryw.a.ravel())
mbaryw = mlab.ar(x1000-x1000.mean(), 20, 'yw')
res_ywar.arcoef1000 = np.array(mbaryw.a.ravel())
res_ywar.save(filename=filen, header=False)
| bsd-3-clause |
zestrada/nova-cs498cc | smoketests/test_sysadmin.py | 13 | 13989 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import random
import shutil
import sys
import tempfile
import time
# If ../nova/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python...
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
os.pardir,
os.pardir))
if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
sys.path.insert(0, possible_topdir)
from smoketests import base
from smoketests import flags
FLAGS = flags.FLAGS
flags.DEFINE_string('bundle_kernel', 'random.kernel',
'Local kernel file to use for bundling tests')
flags.DEFINE_string('bundle_image', 'random.image',
'Local image file to use for bundling tests')
TEST_PREFIX = 'test%s' % int(random.random() * 1000000)
TEST_BUCKET = '%s_bucket' % TEST_PREFIX
TEST_KEY = '%s_key' % TEST_PREFIX
TEST_GROUP = '%s_group' % TEST_PREFIX
class ImageTests(base.UserSmokeTestCase):
def test_001_can_bundle_image(self):
self.data['tempdir'] = tempfile.mkdtemp()
self.assertTrue(self.bundle_image(FLAGS.bundle_image,
self.data['tempdir']))
def test_002_can_upload_image(self):
try:
self.assertTrue(self.upload_image(TEST_BUCKET,
FLAGS.bundle_image,
self.data['tempdir']))
finally:
if os.path.exists(self.data['tempdir']):
shutil.rmtree(self.data['tempdir'])
def test_003_can_register_image(self):
image_id = self.conn.register_image('%s/%s.manifest.xml' %
(TEST_BUCKET, FLAGS.bundle_image))
self.assert_(image_id is not None)
self.data['image_id'] = image_id
def test_004_can_bundle_kernel(self):
self.assertTrue(self.bundle_image(FLAGS.bundle_kernel, kernel=True))
def test_005_can_upload_kernel(self):
self.assertTrue(self.upload_image(TEST_BUCKET, FLAGS.bundle_kernel))
def test_006_can_register_kernel(self):
kernel_id = self.conn.register_image('%s/%s.manifest.xml' %
(TEST_BUCKET, FLAGS.bundle_kernel))
self.assert_(kernel_id is not None)
self.data['kernel_id'] = kernel_id
def test_007_images_are_available_within_10_seconds(self):
for i in xrange(10):
image = self.conn.get_image(self.data['image_id'])
if image and image.state == 'available':
break
time.sleep(1)
else:
self.assert_(False) # wasn't available within 10 seconds
self.assert_(image.type == 'machine')
for i in xrange(10):
kernel = self.conn.get_image(self.data['kernel_id'])
if kernel and kernel.state == 'available':
break
time.sleep(1)
else:
self.assert_(False) # wasn't available within 10 seconds
self.assert_(kernel.type == 'kernel')
def test_008_can_describe_image_attribute(self):
attrs = self.conn.get_image_attribute(self.data['image_id'],
'launchPermission')
self.assert_(attrs.name, 'launch_permission')
def test_009_can_add_image_launch_permission(self):
image = self.conn.get_image(self.data['image_id'])
self.assertEqual(image.id, self.data['image_id'])
self.assertEqual(image.is_public, False)
self.conn.modify_image_attribute(image_id=self.data['image_id'],
operation='add',
attribute='launchPermission',
groups='all')
image = self.conn.get_image(self.data['image_id'])
self.assertEqual(image.id, self.data['image_id'])
self.assertEqual(image.is_public, True)
def test_010_can_see_launch_permission(self):
attrs = self.conn.get_image_attribute(self.data['image_id'],
'launchPermission')
self.assertEqual(attrs.name, 'launch_permission')
self.assertEqual(attrs.attrs['groups'][0], 'all')
def test_011_can_remove_image_launch_permission(self):
image = self.conn.get_image(self.data['image_id'])
self.assertEqual(image.id, self.data['image_id'])
self.assertEqual(image.is_public, True)
self.conn.modify_image_attribute(image_id=self.data['image_id'],
operation='remove',
attribute='launchPermission',
groups='all')
image = self.conn.get_image(self.data['image_id'])
self.assertEqual(image.id, self.data['image_id'])
self.assertEqual(image.is_public, False)
def test_012_private_image_shows_in_list(self):
images = self.conn.get_all_images()
image_ids = [image.id for image in images]
self.assertTrue(self.data['image_id'] in image_ids)
def test_013_user_can_deregister_kernel(self):
self.assertTrue(self.conn.deregister_image(self.data['kernel_id']))
def test_014_can_deregister_image(self):
self.assertTrue(self.conn.deregister_image(self.data['image_id']))
def test_015_can_delete_bundle(self):
self.assertTrue(self.delete_bundle_bucket(TEST_BUCKET))
class InstanceTests(base.UserSmokeTestCase):
def test_001_can_create_keypair(self):
key = self.create_key_pair(self.conn, TEST_KEY)
self.assertEqual(key.name, TEST_KEY)
def test_002_can_create_instance_with_keypair(self):
reservation = self.conn.run_instances(FLAGS.test_image,
key_name=TEST_KEY,
instance_type='m1.tiny')
self.assertEqual(len(reservation.instances), 1)
self.data['instance'] = reservation.instances[0]
def test_003_instance_runs_within_60_seconds(self):
instance = self.data['instance']
# allow 60 seconds to exit pending with IP
if not self.wait_for_running(self.data['instance']):
self.fail('instance failed to start')
self.data['instance'].update()
ip = self.data['instance'].private_ip_address
self.failIf(ip == '0.0.0.0')
if FLAGS.use_ipv6:
ipv6 = self.data['instance'].dns_name_v6
self.failIf(ipv6 is None)
def test_004_can_ping_private_ip(self):
if not self.wait_for_ping(self.data['instance'].private_ip_address):
self.fail('could not ping instance')
if FLAGS.use_ipv6:
if not self.wait_for_ping(self.data['instance'].dns_name_v6,
"ping6"):
self.fail('could not ping instance v6')
def test_005_can_ssh_to_private_ip(self):
if not self.wait_for_ssh(self.data['instance'].private_ip_address,
TEST_KEY):
self.fail('could not ssh to instance')
if FLAGS.use_ipv6:
if not self.wait_for_ssh(self.data['instance'].dns_name_v6,
TEST_KEY):
self.fail('could not ssh to instance v6')
def test_999_tearDown(self):
self.delete_key_pair(self.conn, TEST_KEY)
self.conn.terminate_instances([self.data['instance'].id])
class VolumeTests(base.UserSmokeTestCase):
def setUp(self):
super(VolumeTests, self).setUp()
self.device = '/dev/vdb'
def test_000_setUp(self):
self.create_key_pair(self.conn, TEST_KEY)
reservation = self.conn.run_instances(FLAGS.test_image,
instance_type='m1.tiny',
key_name=TEST_KEY)
self.data['instance'] = reservation.instances[0]
if not self.wait_for_running(self.data['instance']):
self.fail('instance failed to start')
self.data['instance'].update()
if not self.wait_for_ping(self.data['instance'].private_ip_address):
self.fail('could not ping instance')
if not self.wait_for_ssh(self.data['instance'].private_ip_address,
TEST_KEY):
self.fail('could not ssh to instance')
def test_001_can_create_volume(self):
volume = self.conn.create_volume(1, 'nova')
self.assertEqual(volume.size, 1)
self.data['volume'] = volume
# Give network time to find volume.
time.sleep(5)
def test_002_can_attach_volume(self):
volume = self.data['volume']
for x in xrange(10):
volume.update()
if volume.status.startswith('available'):
break
time.sleep(1)
else:
self.fail('cannot attach volume with state %s' % volume.status)
# Give volume some time to be ready.
time.sleep(5)
volume.attach(self.data['instance'].id, self.device)
# wait
for x in xrange(10):
volume.update()
if volume.status.startswith('in-use'):
break
time.sleep(1)
else:
self.fail('volume never got to in use')
self.assertTrue(volume.status.startswith('in-use'))
def test_003_can_mount_volume(self):
ip = self.data['instance'].private_ip_address
conn = self.connect_ssh(ip, TEST_KEY)
# NOTE(dprince): give some time for volume to show up in partitions
stdin, stdout, stderr = conn.exec_command(
'COUNT="0";'
'until cat /proc/partitions | grep "%s\\$"; do '
'[ "$COUNT" -eq "60" ] && exit 1;'
'COUNT=$(( $COUNT + 1 ));'
'sleep 1; '
'done'
% self.device.rpartition('/')[2])
out = stdout.read()
if not out.strip().endswith(self.device.rpartition('/')[2]):
self.fail('Timeout waiting for volume partition in instance. %s %s'
% (out, stderr.read()))
# NOTE(vish): this will create a dev for images that don't have
# udev rules
stdin, stdout, stderr = conn.exec_command(
'grep %s /proc/partitions | '
'`awk \'{print "mknod /dev/"\\$4" b "\\$1" "\\$2}\'`'
% self.device.rpartition('/')[2])
exec_list = []
exec_list.append('mkdir -p /mnt/vol')
exec_list.append('/sbin/mke2fs %s' % self.device)
exec_list.append('mount %s /mnt/vol' % self.device)
exec_list.append('echo success')
stdin, stdout, stderr = conn.exec_command(' && '.join(exec_list))
out = stdout.read()
conn.close()
if not out.strip().endswith('success'):
self.fail('Unable to mount: %s %s' % (out, stderr.read()))
def test_004_can_write_to_volume(self):
ip = self.data['instance'].private_ip_address
conn = self.connect_ssh(ip, TEST_KEY)
# FIXME(devcamcar): This doesn't fail if the volume hasn't been mounted
stdin, stdout, stderr = conn.exec_command(
'echo hello > /mnt/vol/test.txt')
err = stderr.read()
conn.close()
if len(err) > 0:
self.fail('Unable to write to mount: %s' % (err))
def test_005_volume_is_correct_size(self):
ip = self.data['instance'].private_ip_address
conn = self.connect_ssh(ip, TEST_KEY)
stdin, stdout, stderr = conn.exec_command(
"cat /sys/class/block/%s/size" % self.device.rpartition('/')[2])
out = stdout.read().strip()
conn.close()
# NOTE(vish): 1G bytes / 512 bytes per block
expected_size = 1024 * 1024 * 1024 / 512
self.assertEquals('%s' % (expected_size,), out,
'Volume is not the right size: %s %s. Expected: %s' %
(out, stderr.read(), expected_size))
def test_006_me_can_umount_volume(self):
ip = self.data['instance'].private_ip_address
conn = self.connect_ssh(ip, TEST_KEY)
stdin, stdout, stderr = conn.exec_command('umount /mnt/vol')
err = stderr.read()
conn.close()
if len(err) > 0:
self.fail('Unable to unmount: %s' % (err))
def test_007_me_can_detach_volume(self):
result = self.conn.detach_volume(volume_id=self.data['volume'].id)
self.assertTrue(result)
volume = self.data['volume']
for x in xrange(10):
volume.update()
if volume.status.startswith('available'):
break
time.sleep(1)
else:
self.fail('cannot detach volume. state %s' % volume.status)
def test_008_me_can_delete_volume(self):
result = self.conn.delete_volume(self.data['volume'].id)
self.assertTrue(result)
def test_999_tearDown(self):
self.conn.terminate_instances([self.data['instance'].id])
self.conn.delete_key_pair(TEST_KEY)
| apache-2.0 |
tedelhourani/ansible | lib/ansible/plugins/inventory/virtualbox.py | 2 | 7488 | # Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
name: virtualbox
plugin_type: inventory
short_description: virtualbox inventory source
description:
- Get inventory hosts from the local virtualbox installation.
- Uses a <name>.vbox.yaml (or .vbox.yml) YAML configuration file.
- The inventory_hostname is always the 'Name' of the virtualbox instance.
options:
running_only:
description: toggles showing all vms vs only those currently running
type: boolean
default: False
settings_password_file:
description: provide a file containing the settings password (equivalent to --settingspwfile)
network_info_path:
description: property path to query for network information (ansible_host)
default: "/VirtualBox/GuestInfo/Net/0/V4/IP"
query:
description: create vars from virtualbox properties
type: dictionary
default: {}
compose:
description: create vars from jinja2 expressions, these are created AFTER the query block
type: dictionary
default: {}
groups:
description: add hosts to group based on Jinja2 conditionals, these also run after query block
type: dictionary
default: {}
'''
EXAMPLES = '''
# file must be named vbox.yaml or vbox.yml
simple_config_file:
plugin: virtualbox
settings_password_file: /etc/virtulbox/secrets
query:
logged_in_users: /VirtualBox/GuestInfo/OS/LoggedInUsersList
compose:
ansible_connection: ('indows' in vbox_Guest_OS)|ternary('winrm', 'ssh')
'''
import os
from collections import MutableMapping
from subprocess import Popen, PIPE
from ansible.errors import AnsibleParserError
from ansible.module_utils._text import to_bytes, to_native, to_text
from ansible.plugins.inventory import BaseInventoryPlugin
class InventoryModule(BaseInventoryPlugin):
''' Host inventory parser for ansible using local virtualbox. '''
NAME = 'virtualbox'
VBOX = b"VBoxManage"
def _query_vbox_data(self, host, property_path):
ret = None
try:
cmd = [self.VBOX, b'guestproperty', b'get', to_bytes(host, errors='surrogate_or_strict'), to_bytes(property_path, errors='surrogate_or_strict')]
x = Popen(cmd, stdout=PIPE)
ipinfo = to_text(x.stdout.read(), errors='surrogate_or_strict')
if 'Value' in ipinfo:
a, ip = ipinfo.split(':', 1)
ret = ip.strip()
except:
pass
return ret
def _set_variables(self, hostvars, data):
# set vars in inventory from hostvars
for host in hostvars:
# create vars from vbox properties
if data.get('query') and isinstance(data['query'], MutableMapping):
for varname in data['query']:
hostvars[host][varname] = self._query_vbox_data(host, data['query'][varname])
# create composite vars
self._set_composite_vars(data.get('compose'), hostvars, host)
# actually update inventory
for key in hostvars[host]:
self.inventory.set_variable(host, key, hostvars[host][key])
# constructed groups based on conditionals
self._add_host_to_composed_groups(data.get('groups'), hostvars, host)
def _populate_from_source(self, source_data, config_data):
hostvars = {}
prevkey = pref_k = ''
current_host = None
# needed to possibly set ansible_host
netinfo = config_data.get('network_info_path', "/VirtualBox/GuestInfo/Net/0/V4/IP")
for line in source_data:
try:
k, v = line.split(':', 1)
except:
# skip non splitable
continue
if k.strip() == '':
# skip empty
continue
v = v.strip()
# found host
if k.startswith('Name') and ',' not in v: # some setting strings appear in Name
current_host = v
if current_host not in hostvars:
hostvars[current_host] = {}
self.inventory.add_host(current_host)
# try to get network info
netdata = self._query_vbox_data(current_host, netinfo)
if netdata:
self.inventory.set_variable(current_host, 'ansible_host', netdata)
# found groups
elif k == 'Groups':
for group in v.split('/'):
if group:
self.inventory.add_group(group)
self.inventory.add_child(group, current_host)
continue
else:
# found vars, accumulate in hostvars for clean inventory set
pref_k = 'vbox_' + k.strip().replace(' ', '_')
if k.startswith(' '):
if prevkey not in hostvars[current_host]:
hostvars[current_host][prevkey] = {}
hostvars[current_host][prevkey][pref_k] = v
else:
if v != '':
hostvars[current_host][pref_k] = v
prevkey = pref_k
self._set_variables(hostvars, config_data)
def verify_file(self, path):
valid = False
if super(InventoryModule, self).verify_file(path):
if path.endswith(('.vbox.yaml', '.vbox.yml')):
valid = True
return valid
def parse(self, inventory, loader, path, cache=True):
super(InventoryModule, self).parse(inventory, loader, path)
cache_key = self.get_cache_prefix(path)
# file is config file
try:
config_data = self.loader.load_from_file(path)
except Exception as e:
raise AnsibleParserError(to_native(e))
if not config_data or config_data.get('plugin') != self.NAME:
# this is not my config file
raise AnsibleParserError("Incorrect plugin name in file: %s" % config_data.get('plugin', 'none found'))
source_data = None
if cache and cache_key in self._cache:
try:
source_data = self._cache[cache_key]
except KeyError:
pass
if not source_data:
b_pwfile = to_bytes(config_data.get('settings_password_file'), errors='surrogate_or_strict')
running = config_data.get('running_only', False)
# start getting data
cmd = [self.VBOX, b'list', b'-l']
if running:
cmd.append(b'runningvms')
else:
cmd.append(b'vms')
if b_pwfile and os.path.exists(b_pwfile):
cmd.append(b'--settingspwfile')
cmd.append(b_pwfile)
try:
p = Popen(cmd, stdout=PIPE)
except Exception as e:
AnsibleParserError(to_native(e))
source_data = p.stdout.read()
self._cache[cache_key] = to_text(source_data, errors='surrogate_or_strict')
self._populate_from_source(source_data.splitlines(), config_data)
| gpl-3.0 |
hurricup/intellij-community | python/testData/debug/test_dataframe.py | 23 | 1309 | import pandas as pd
import numpy as np
df1 = pd.DataFrame({'row': [0, 1, 2],
'One_X': [1.1, 1.1, 1.1],
'One_Y': [1.2, 1.2, 1.2],
'Two_X': [1.11, 1.11, 1.11],
'Two_Y': [1.22, 1.22, 1.22]})
print(df1) ###line 8
df2 = pd.DataFrame({'row': [0, 1, 2],
'One_X': [1.1, 1.1, 1.1],
'One_Y': [1.2, 1.2, 1.2],
'Two_X': [1.11, 1.11, 1.11],
'Two_Y': [1.22, 1.22, 1.22],
'LABELS': ['A', 'B', 'C']})
print(df2) ##line 16
df3 = pd.DataFrame(data={'Province' : ['ON','QC','BC','AL','AL','MN','ON'],
'City' : ['Toronto','Montreal','Vancouver','Calgary','Edmonton','Winnipeg','Windsor'],
'Sales' : [13,6,16,8,4,3,1]})
table = pd.pivot_table(df3,values=['Sales'],index=['Province'],columns=['City'],aggfunc=np.sum,margins=True)
table.stack('City')
print(df3)
df4 = pd.DataFrame({'row': np.random.random(10000),
'One_X': np.random.random(10000),
'One_Y': np.random.random(10000),
'Two_X': np.random.random(10000),
'Two_Y': np.random.random(10000),
'LABELS': ['A'] * 10000})
print(df4) ##line 31
| apache-2.0 |
tuffery/Frog2 | frowns/Smarts/Expressions.py | 3 | 3374 | """A set of objects that can create logical expressions via
combining logical operators and Smarts Primitives.
"""
class NotMatch:
def __init__(self, child):
self.child = child
def __eq__(self, obj):
return not self.child == obj
def __str__(self):
return "not (%s)" % (self.child,)
class AndMatch:
def __init__(self, left, right):
self.left = left
self.right = right
def __eq__(self, obj):
return self.left == obj and self.right == obj
def __str__(self):
return "AND(%s, %s)" % (self.left, self.right)
class OrMatch:
def __init__(self, left, right):
self.left = left
self.right = right
def __eq__(self, obj):
return (self.left == obj) or (self.right == obj)
def __str__(self):
return "OR(%s, %s)" % (self.left, self.right)
bool_unary_not = 76
bool_strong_and = 77
bool_or = 78
bool_weak_and = 79
binary_operators = [bool_strong_and, bool_or, bool_weak_and]
boolean_operators = binary_operators + [bool_unary_not]
text_to_bool = {
"&": bool_strong_and,
",": bool_or,
";": bool_weak_and,
"!": bool_unary_not,
}
class ExpressionList:
def __init__(self):
self.matchers = []
def __nonzero__(self):
return len(self.matchers) != 0
def add_matcher(self, obj):
# see if the matcher is not in the boolean list
if self.matchers:
target = self.matchers[-1]
for _matcher in boolean_operators:
if target is _matcher:
break
else:
self.matchers.append(bool_strong_and)
#if self.matchers and self.matchers[-1] not in boolean_operators:
# self.matchers.append(bool_strong_and)
self.matchers.append(obj)
def add_operator(self, op):
assert op in binary_operators or op == bool_unary_not
if __debug__:
if self.matchers:
if op in binary_operators:
s = self.matchers[-1]
for _matcher in binary_operators:
assert s is not _matcher
else:
for _matcher in binary_operators:
assert op is not _matcher
self.matchers.append(op)
def make_matcher(self):
matchers = self.matchers[:]
i = 0
while i < len(matchers):
if matchers[i] is bool_unary_not:
matchers[i:i+2] = [NotMatch(matchers[i+1])]
else:
i = i + 1
i = 1
while i < len(matchers):
if matchers[i] is bool_strong_and:
matchers[i-1:i+2] = [AndMatch(matchers[i-1], matchers[i+1])]
else:
i = i + 1
i = 1
while i < len(matchers):
if matchers[i] is bool_or:
matchers[i-1:i+2] = [OrMatch(matchers[i-1], matchers[i+1])]
else:
i = i + 1
i = 1
while i < len(matchers):
if matchers[i] is bool_weak_and:
matchers[i-1:i+2] = [AndMatch(matchers[i-1], matchers[i+1])]
else:
i = i + 1
assert len(matchers) == 1, matchers
return matchers[0]
class AtomExpression(ExpressionList):
pass
class BondExpression(ExpressionList):
pass
| gpl-3.0 |
slohse/ansible | lib/ansible/modules/cloud/google/gcp_spanner_database.py | 10 | 9062 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ["preview"],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_spanner_database
description:
- A Cloud Spanner Database which is hosted on a Spanner instance.
short_description: Creates a GCP Database
version_added: 2.7
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
state:
description:
- Whether the given object should exist in GCP
choices: ['present', 'absent']
default: 'present'
name:
description:
- A unique identifier for the database, which cannot be changed after the instance
is created. Values are of the form projects/<project>/instances/[a-z][-a-z0-9]*[a-z0-9].
The final segment of the name must be between 6 and 30 characters in length.
required: false
extra_statements:
description:
- 'An optional list of DDL statements to run inside the newly created database. Statements
can create tables, indexes, etc. These statements execute atomically with the creation
of the database: if there is an error in any statement, the database is not created.'
required: false
instance:
description:
- The instance to create the database on.
required: true
extends_documentation_fragment: gcp
'''
EXAMPLES = '''
- name: create a instance
gcp_spanner_instance:
name: "instance-database"
display_name: My Spanner Instance
node_count: 2
labels:
cost_center: ti-1700004
config: regional-us-central1
project: "{{ gcp_project }}"
auth_kind: "{{ gcp_cred_kind }}"
service_account_file: "{{ gcp_cred_file }}"
state: present
register: instance
- name: create a database
gcp_spanner_database:
name: webstore
instance: "{{ instance }}"
project: "test_project"
auth_kind: "service_account"
service_account_file: "/tmp/auth.pem"
state: present
'''
RETURN = '''
name:
description:
- A unique identifier for the database, which cannot be changed after the instance
is created. Values are of the form projects/<project>/instances/[a-z][-a-z0-9]*[a-z0-9].
The final segment of the name must be between 6 and 30 characters in length.
returned: success
type: str
extra_statements:
description:
- 'An optional list of DDL statements to run inside the newly created database. Statements
can create tables, indexes, etc. These statements execute atomically with the creation
of the database: if there is an error in any statement, the database is not created.'
returned: success
type: list
instance:
description:
- The instance to create the database on.
returned: success
type: dict
'''
################################################################################
# Imports
################################################################################
from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, replace_resource_dict
import json
################################################################################
# Main
################################################################################
def main():
"""Main function"""
module = GcpModule(
argument_spec=dict(
state=dict(default='present', choices=['present', 'absent'], type='str'),
name=dict(type='str'),
extra_statements=dict(type='list', elements='str'),
instance=dict(required=True, type='dict')
)
)
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/spanner.admin']
state = module.params['state']
fetch = fetch_resource(module, self_link(module))
changed = False
if fetch:
if state == 'present':
if is_different(module, fetch):
fetch = update(module, self_link(module))
changed = True
else:
delete(module, self_link(module))
fetch = {}
changed = True
else:
if state == 'present':
fetch = create(module, collection(module))
changed = True
else:
fetch = {}
fetch.update({'changed': changed})
module.exit_json(**fetch)
def create(module, link):
auth = GcpSession(module, 'spanner')
return return_if_object(module, auth.post(link, resource_to_request(module)))
def update(module, link):
auth = GcpSession(module, 'spanner')
return return_if_object(module, auth.put(link, resource_to_request(module)))
def delete(module, link):
auth = GcpSession(module, 'spanner')
return return_if_object(module, auth.delete(link))
def resource_to_request(module):
request = {
u'name': module.params.get('name'),
u'extraStatements': module.params.get('extra_statements')
}
request = encode_request(request, module)
return_vals = {}
for k, v in request.items():
if v:
return_vals[k] = v
return return_vals
def fetch_resource(module, link):
auth = GcpSession(module, 'spanner')
return return_if_object(module, auth.get(link))
def self_link(module):
res = {
'project': module.params['project'],
'instance': replace_resource_dict(module.params['instance'], 'name'),
'name': module.params['name']
}
return "https://spanner.googleapis.com/v1/projects/{project}/instances/{instance}/databases/{name}".format(**res)
def collection(module):
res = {
'project': module.params['project'],
'instance': replace_resource_dict(module.params['instance'], 'name')
}
return "https://spanner.googleapis.com/v1/projects/{project}/instances/{instance}/databases".format(**res)
def return_if_object(module, response):
# If not found, return nothing.
if response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst:
module.fail_json(msg="Invalid JSON response with error: %s" % inst)
result = decode_response(result, module)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
def is_different(module, response):
request = resource_to_request(module)
response = response_to_hash(module, response)
request = decode_response(request, module)
# Remove all output-only from response.
response_vals = {}
for k, v in response.items():
if k in request:
response_vals[k] = v
request_vals = {}
for k, v in request.items():
if k in response:
request_vals[k] = v
return GcpRequest(request_vals) != GcpRequest(response_vals)
# Remove unnecessary properties from the response.
# This is for doing comparisons with Ansible's current parameters.
def response_to_hash(module, response):
return {
u'name': response.get(u'name'),
u'extraStatements': module.params.get('extra_statements')
}
def decode_response(response, module):
if not response:
return response
if 'name' not in response:
return response
if '/operations/' in response['name']:
return response
response['name'] = response['name'].split('/')[-1]
return response
def encode_request(request, module):
request['create_statement'] = "CREATE DATABASE `{0}`".format(module.params['name'])
del request['name']
return request
if __name__ == '__main__':
main()
| gpl-3.0 |
surhudm/scipy | scipy/integrate/__init__.py | 35 | 2256 | """
=============================================
Integration and ODEs (:mod:`scipy.integrate`)
=============================================
.. currentmodule:: scipy.integrate
Integrating functions, given function object
============================================
.. autosummary::
:toctree: generated/
quad -- General purpose integration
dblquad -- General purpose double integration
tplquad -- General purpose triple integration
nquad -- General purpose n-dimensional integration
fixed_quad -- Integrate func(x) using Gaussian quadrature of order n
quadrature -- Integrate with given tolerance using Gaussian quadrature
romberg -- Integrate func using Romberg integration
quad_explain -- Print information for use of quad
newton_cotes -- Weights and error coefficient for Newton-Cotes integration
IntegrationWarning -- Warning on issues during integration
Integrating functions, given fixed samples
==========================================
.. autosummary::
:toctree: generated/
trapz -- Use trapezoidal rule to compute integral.
cumtrapz -- Use trapezoidal rule to cumulatively compute integral.
simps -- Use Simpson's rule to compute integral from samples.
romb -- Use Romberg Integration to compute integral from
-- (2**k + 1) evenly-spaced samples.
.. seealso::
:mod:`scipy.special` for orthogonal polynomials (special) for Gaussian
quadrature roots and weights for other weighting factors and regions.
Integrators of ODE systems
==========================
.. autosummary::
:toctree: generated/
odeint -- General integration of ordinary differential equations.
ode -- Integrate ODE using VODE and ZVODE routines.
complex_ode -- Convert a complex-valued ODE to real-valued and integrate.
solve_bvp -- Solve a boundary value problem for a system of ODEs.
"""
from __future__ import division, print_function, absolute_import
from .quadrature import *
from .odepack import *
from .quadpack import *
from ._ode import *
from ._bvp import solve_bvp
__all__ = [s for s in dir() if not s.startswith('_')]
from numpy.testing import Tester
test = Tester().test
| bsd-3-clause |
willhardy/django | tests/model_meta/models.py | 69 | 5124 | from django.contrib.contenttypes.fields import (
GenericForeignKey, GenericRelation,
)
from django.contrib.contenttypes.models import ContentType
from django.db import models
class Relation(models.Model):
pass
class AbstractPerson(models.Model):
# DATA fields
data_abstract = models.CharField(max_length=10)
fk_abstract = models.ForeignKey(Relation, models.CASCADE, related_name='fk_abstract_rel')
# M2M fields
m2m_abstract = models.ManyToManyField(Relation, related_name='m2m_abstract_rel')
friends_abstract = models.ManyToManyField('self', related_name='friends_abstract', symmetrical=True)
following_abstract = models.ManyToManyField('self', related_name='followers_abstract', symmetrical=False)
# VIRTUAL fields
data_not_concrete_abstract = models.ForeignObject(
Relation,
on_delete=models.CASCADE,
from_fields=['abstract_non_concrete_id'],
to_fields=['id'],
related_name='fo_abstract_rel',
)
# GFK fields
content_type_abstract = models.ForeignKey(ContentType, models.CASCADE, related_name='+')
object_id_abstract = models.PositiveIntegerField()
content_object_abstract = GenericForeignKey('content_type_abstract', 'object_id_abstract')
# GR fields
generic_relation_abstract = GenericRelation(Relation)
class Meta:
abstract = True
class BasePerson(AbstractPerson):
# DATA fields
data_base = models.CharField(max_length=10)
fk_base = models.ForeignKey(Relation, models.CASCADE, related_name='fk_base_rel')
# M2M fields
m2m_base = models.ManyToManyField(Relation, related_name='m2m_base_rel')
friends_base = models.ManyToManyField('self', related_name='friends_base', symmetrical=True)
following_base = models.ManyToManyField('self', related_name='followers_base', symmetrical=False)
# VIRTUAL fields
data_not_concrete_base = models.ForeignObject(
Relation,
on_delete=models.CASCADE,
from_fields=['base_non_concrete_id'],
to_fields=['id'],
related_name='fo_base_rel',
)
# GFK fields
content_type_base = models.ForeignKey(ContentType, models.CASCADE, related_name='+')
object_id_base = models.PositiveIntegerField()
content_object_base = GenericForeignKey('content_type_base', 'object_id_base')
# GR fields
generic_relation_base = GenericRelation(Relation)
class Person(BasePerson):
# DATA fields
data_inherited = models.CharField(max_length=10)
fk_inherited = models.ForeignKey(Relation, models.CASCADE, related_name='fk_concrete_rel')
# M2M Fields
m2m_inherited = models.ManyToManyField(Relation, related_name='m2m_concrete_rel')
friends_inherited = models.ManyToManyField('self', related_name='friends_concrete', symmetrical=True)
following_inherited = models.ManyToManyField('self', related_name='followers_concrete', symmetrical=False)
# VIRTUAL fields
data_not_concrete_inherited = models.ForeignObject(
Relation,
on_delete=models.CASCADE,
from_fields=['model_non_concrete_id'],
to_fields=['id'],
related_name='fo_concrete_rel',
)
# GFK fields
content_type_concrete = models.ForeignKey(ContentType, models.CASCADE, related_name='+')
object_id_concrete = models.PositiveIntegerField()
content_object_concrete = GenericForeignKey('content_type_concrete', 'object_id_concrete')
# GR fields
generic_relation_concrete = GenericRelation(Relation)
class ProxyPerson(Person):
class Meta:
proxy = True
class PersonThroughProxySubclass(ProxyPerson):
pass
class Relating(models.Model):
# ForeignKey to BasePerson
baseperson = models.ForeignKey(BasePerson, models.CASCADE, related_name='relating_baseperson')
baseperson_hidden = models.ForeignKey(BasePerson, models.CASCADE, related_name='+')
# ForeignKey to Person
person = models.ForeignKey(Person, models.CASCADE, related_name='relating_person')
person_hidden = models.ForeignKey(Person, models.CASCADE, related_name='+')
# ForeignKey to ProxyPerson
proxyperson = models.ForeignKey(ProxyPerson, models.CASCADE, related_name='relating_proxyperson')
proxyperson_hidden = models.ForeignKey(ProxyPerson, models.CASCADE, related_name='relating_proxyperson_hidden+')
# ManyToManyField to BasePerson
basepeople = models.ManyToManyField(BasePerson, related_name='relating_basepeople')
basepeople_hidden = models.ManyToManyField(BasePerson, related_name='+')
# ManyToManyField to Person
people = models.ManyToManyField(Person, related_name='relating_people')
people_hidden = models.ManyToManyField(Person, related_name='+')
# ParentListTests models
class CommonAncestor(models.Model):
pass
class FirstParent(CommonAncestor):
first_ancestor = models.OneToOneField(CommonAncestor, models.SET_NULL, primary_key=True, parent_link=True)
class SecondParent(CommonAncestor):
second_ancestor = models.OneToOneField(CommonAncestor, models.SET_NULL, primary_key=True, parent_link=True)
class Child(FirstParent, SecondParent):
pass
| bsd-3-clause |
eHealthAfrica/ureport | ureport/tests.py | 2 | 12917 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from dash.api import API
from django.conf import settings
from django.core.urlresolvers import reverse
from django.utils import timezone
from smartmin.tests import SmartminTest
from django.contrib.auth.models import User
from dash.orgs.middleware import SetOrgMiddleware
from mock import Mock, patch
from dash.orgs.models import Org
from django.http.request import HttpRequest
from ureport.jobs.models import JobSource
from ureport.polls.models import Poll
from ureport.public.views import IndexView
from temba_client.v1 import TembaClient
from temba_client.v1.types import Result, Flow, Group, Boundary as TembaBoundary, Field as TembaContactField
from temba_client.v1.types import Contact as TembaContact, Group as TembaGroup
from temba_client.v1.types import Geometry as TembaGeometry
class MockAPI(API): # pragma: no cover
def get_group(self, name):
return dict(group=8, name=name, size=120)
def get_country_geojson(self):
return dict(
type="FeatureCollection",
features=[
dict(
type='Feature',
properties=dict(
id="R3713501",
level=1,
name="Abia"
),
geometry=dict(
type="MultiPolygon",
coordinates=[
[
[
[7, 5]
]
]
]
)
)
]
)
def get_geojson_by_parent_id(self, parent_id):
return dict(type="FeatureCollection",
features=[dict(type='Feature',
properties=dict(id="R3713502",
level=2,
name="Aba North"),
geometry=dict(type="MultiPolygon",
coordinates=[[[[8, 4]]]]
)
)
]
)
def get_ruleset_results(self, ruleset_id, segment=None):
return [dict(open_ended=False,
set=3462,
unset=3694,
categories=[dict(count=2210,
label='Yes'
),
dict(count=1252,
label='No'
)
],
label='All')
]
def get_contact_field_results(self, contact_field_label, segment=None):
return [
dict(
open_ended=False,
set=3462,
unset=3694,
categories=[
dict(
count=2210,
label='Yes'
),
dict(
count=1252,
label='No'
)
],
label='All'
)
]
def get_flows(self, filter=None):
return [
dict(
runs=300,
completed_runs=120,
name='Flow 1',
flow_uuid='uuid-25',
participants=300,
rulesets=[
dict(node='386fc244-cc98-476a-b05e-f8a431a4dd41',
id=8435,
label='Does your community have power'
)
]
)
]
class MockTembaClient(TembaClient):
def get_boundaries(self, pager=None):
geometry = TembaGeometry.create(type='MultiPolygon', coordinates=['COORDINATES'])
return [TembaBoundary.create(boundary='R12345', name='Nigeria', parent=None, level=0, geometry=geometry),
TembaBoundary.create(boundary='R23456', name='Lagos', parent="R12345", level=1, geometry=geometry)]
def get_fields(self, pager=None):
return [TembaContactField.create(key='occupation', label='Activité', value_type='T')]
def get_contacts(self, uuids=None, urns=None, groups=None, after=None, before=None, pager=None):
return [TembaContact.create(
uuid='000-001', name="Ann", urns=['tel:1234'], groups=['000-002'],
fields=dict(state="Lagos", lga="Oyo", gender='Female', born="1990"),
language='eng', modified_on=timezone.now())]
def get_groups(self, uuids=None, name=None, pager=None):
return Group.deserialize_list([dict(uuid="uuid-8", name=name, size=120)])
def get_results(self, ruleset_id=None, contact_field=None, segment=None):
return Result.deserialize_list([dict(open_ended=False,
set=3462,
unset=3694,
categories=[dict(count=2210, label='Yes'),
dict(count=1252, label='No')],
label='All')])
def get_flows(self, uuids=None, archived=None, labels=None, before=None, after=None, pager=None):
return Flow.deserialize_list([dict(runs=300,
completed_runs=120,
name='Flow 1',
uuid='uuid-25',
participants=300,
labels="",
archived=False,
created_on="2015-04-08T12:48:44.320Z",
rulesets=[dict(node='uuid-8435', id="8435", response_type="C",
label='Does your community have power')]
)])
def get_flow(self, uuid):
return Flow.deserialize(dict(runs=300,
completed_runs=120,
name='Flow 1',
uuid='uuid-25',
participants=300,
labels="",
archived=False,
created_on="2015-04-08T12:48:44.320Z",
rulesets=[dict(node='uuid-8435', id="8435", response_type="C",
label='Does your community have power')]
))
class DashTest(SmartminTest):
def setUp(self):
self.superuser = User.objects.create_superuser(username="super", email="super@user.com", password="super")
self.admin = self.create_user("Administrator")
def create_org(self, subdomain, user):
email = subdomain + "@user.com"
first_name = subdomain + "_First"
last_name = subdomain + "_Last"
name = subdomain
orgs = Org.objects.filter(subdomain=subdomain)
if orgs:
org =orgs[0]
org.name = name
org.save()
else:
org = Org.objects.create(subdomain=subdomain, name=name, created_by=user, modified_by=user)
org.administrators.add(user)
self.assertEquals(Org.objects.filter(subdomain=subdomain).count(), 1)
return Org.objects.get(subdomain=subdomain)
def create_poll(self, org, title, flow_uuid, category, user, featured=False):
now = timezone.now()
poll = Poll.objects.create(flow_uuid=flow_uuid,
title=title,
category=category,
is_featured=featured,
org=org,
poll_date=now,
created_by=user,
modified_by=user)
return poll
class UreportJobsTest(DashTest):
FB_SOURCE = 'http://www.facebook.com/%s'
TW_SOURCE = 'http://twitter.com/%s'
RSS_SOURCE = 'http://dummy.rss.com/%s.xml'
def create_fb_job_source(self, org, fb_identifier):
return JobSource.objects.create(source_url=UreportJobsTest.FB_SOURCE % fb_identifier,
source_type=JobSource.FACEBOOK,
org=org,
created_by=self.admin,
modified_by=self.admin)
def create_tw_job_source(self, org, tw_identifier):
return JobSource.objects.create(source_url=UreportJobsTest.TW_SOURCE % tw_identifier,
widget_id='WIDGETID_%s' % tw_identifier,
source_type=JobSource.TWITTER,
org=org,
created_by=self.admin,
modified_by=self.admin)
def create_rss_job_source(self, org, rss_identifier):
return JobSource.objects.create(source_url=UreportJobsTest.RSS_SOURCE % rss_identifier,
source_type=JobSource.RSS,
org=org,
created_by=self.admin,
modified_by=self.admin)
class SetOrgMiddlewareTest(DashTest):
def setUp(self):
super(SetOrgMiddlewareTest, self).setUp()
self.middleware = SetOrgMiddleware()
self.request = Mock(spec=HttpRequest)
self.request.user = User.objects.get(pk=-1)
self.request.path = '/'
self.request.get_host.return_value="ureport.io"
self.request.META = dict(HTTP_HOST=None)
def test_process_request_without_org(self):
response = self.middleware.process_request(self.request)
self.assertEqual(response, None)
self.assertEqual(self.request.org, None)
def test_process_request_with_org(self):
ug_org = self.create_org('uganda', self.admin)
ug_dash_url = ug_org.subdomain + ".ureport.io"
self.request.get_host.return_value = ug_dash_url
response = self.middleware.process_request(self.request)
self.assertEqual(response, None)
self.assertEqual(self.request.org, ug_org)
self.request.user = self.admin
response = self.middleware.process_request(self.request)
self.assertEqual(response, None)
self.assertEqual(self.request.org, ug_org)
self.assertEquals(self.request.user.get_org(), ug_org)
# test invalid subdomain
wrong_subdomain_url = "blabla.ureport.io"
self.request.get_host.return_value=wrong_subdomain_url
response = self.middleware.process_request(self.request)
self.assertEqual(response, None)
self.assertEqual(self.request.org, None)
def test_process_view(self):
with patch('django.core.urlresolvers.ResolverMatch') as resolver_mock:
resolver_mock.url_name.return_value = "public.index"
self.request.resolver_match = resolver_mock
ug_org = self.create_org('uganda', self.admin)
ug_dash_url = ug_org.subdomain + ".ureport.io"
self.request.get_host.return_value=ug_dash_url
# test invalid subdomain
wrong_subdomain_url = "blabla.ureport.io"
self.request.get_host.return_value=wrong_subdomain_url
self.request.org = None
response = self.middleware.process_view(self.request, IndexView.as_view(), [], dict())
self.assertEquals(response.status_code, 302)
self.assertEquals(response.url, reverse(settings.SITE_CHOOSER_URL_NAME))
self.assertEqual(self.request.org, None)
self.assertEquals(self.request.user.get_org(), None)
rw_org = self.create_org('rwanda', self.admin)
wrong_subdomain_url = "blabla.ureport.io"
self.request.get_host.return_value=wrong_subdomain_url
response = self.middleware.process_view(self.request, IndexView.as_view(), [], dict())
self.assertEquals(response.status_code, 302)
self.assertEquals(response.url, reverse(settings.SITE_CHOOSER_URL_NAME))
| agpl-3.0 |
mrquim/repository.mrquim | repo/plugin.video.poseidon/resources/lib/modules/metacache.py | 4 | 3872 | # -*- coding: utf-8 -*-
'''
Poseidon Add-on
Copyright (C) 2016 Poseidon
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import time,hashlib
try: from sqlite3 import dbapi2 as database
except: from pysqlite2 import dbapi2 as database
from resources.lib.modules import control
def fetch(items, lang, user):
try:
t2 = int(time.time())
dbcon = database.connect(control.metacacheFile)
dbcur = dbcon.cursor()
except:
return items
for i in range(0, len(items)):
try:
dbcur.execute("SELECT * FROM meta WHERE (imdb = '%s' and lang = '%s' and user = '%s' and not imdb = '0') or (tvdb = '%s' and lang = '%s' and user = '%s' and not tvdb = '0')" % (items[i]['imdb'], lang, user, items[i]['tvdb'], lang, user))
match = dbcur.fetchone()
t1 = int(match[5])
update = (abs(t2 - t1) / 3600) >= 720
if update == True: raise Exception()
item = eval(match[4].encode('utf-8'))
item = dict((k,v) for k, v in item.iteritems() if not v == '0')
items[i].update(item)
items[i].update({'metacache': True})
except:
pass
return items
def insert(meta):
try:
control.makeFile(control.dataPath)
dbcon = database.connect(control.metacacheFile)
dbcur = dbcon.cursor()
dbcur.execute("CREATE TABLE IF NOT EXISTS meta (""imdb TEXT, ""tvdb TEXT, ""lang TEXT, ""user TEXT, ""item TEXT, ""time TEXT, ""UNIQUE(imdb, tvdb, lang, user)"");")
t = int(time.time())
for m in meta:
try:
i = repr(m['item'])
try: dbcur.execute("DELETE * FROM meta WHERE (imdb = '%s' and lang = '%s' and user = '%s' and not imdb = '0') or (tvdb = '%s' and lang = '%s' and user = '%s' and not tvdb = '0')" % (m['imdb'], m['lang'], m['user'], m['tvdb'], m['lang'], m['user']))
except: pass
dbcur.execute("INSERT INTO meta Values (?, ?, ?, ?, ?, ?)", (m['imdb'], m['tvdb'], m['lang'], m['user'], i, t))
except:
pass
dbcon.commit()
except:
return
def local(items, link, poster, fanart):
try:
dbcon = database.connect(control.metaFile())
dbcur = dbcon.cursor()
args = [i['imdb'] for i in items]
dbcur.execute('SELECT * FROM mv WHERE imdb IN (%s)' % ', '.join(list(map(lambda arg: "'%s'" % arg, args))))
data = dbcur.fetchall()
except:
return items
for i in range(0, len(items)):
try:
item = items[i]
match = [x for x in data if x[1] == item['imdb']][0]
try:
if poster in item and not item[poster] == '0': raise Exception()
if match[2] == '0': raise Exception()
items[i].update({poster: link % ('300', '/%s.jpg' % match[2])})
except:
pass
try:
if fanart in item and not item[fanart] == '0': raise Exception()
if match[3] == '0': raise Exception()
items[i].update({fanart: link % ('1280', '/%s.jpg' % match[3])})
except:
pass
except:
pass
return items
| gpl-2.0 |
GustaCoin/GustaCoin | contrib/bitrpc/bitrpc.py | 2348 | 7835 | from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:8332")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:8332")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a Bitcoin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a Bitcoin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported" | mit |
hortonworks/hortonworks-sandbox | desktop/core/ext-py/django-extensions-0.5/django_extensions/management/commands/compile_pyc.py | 17 | 1482 | from django.core.management.base import NoArgsCommand
from django_extensions.management.utils import get_project_root
from random import choice
from optparse import make_option
from os.path import join as _j
import py_compile
import os
class Command(NoArgsCommand):
option_list = NoArgsCommand.option_list + (
make_option('--path', '-p', action='store', dest='path',
help='Specify path to recurse into'),
)
help = "Compile python bytecode files for the project."
requires_model_validation = False
def handle_noargs(self, **options):
project_root = options.get("path", None)
if not project_root:
project_root = get_project_root()
verbose = int(options.get("verbosity", 1))>1
for root, dirs, files in os.walk(project_root):
for file in files:
ext = os.path.splitext(file)[1]
if ext==".py":
full_path = _j(root, file)
if verbose:
print "%sc" % full_path
py_compile.compile(full_path)
# Backwards compatibility for Django r9110
if not [opt for opt in Command.option_list if opt.dest=='verbosity']:
Command.option_list += (
make_option('--verbosity', '-v', action="store", dest="verbosity",
default='1', type='choice', choices=['0', '1', '2'],
help="Verbosity level; 0=minimal output, 1=normal output, 2=all output"),
)
| apache-2.0 |
neoscoin/neos-electrum-server | src/processor.py | 9 | 7837 | import json
import Queue as queue
import socket
import threading
import time
import sys
from utils import random_string, timestr, print_log
from utils import logger
class Shared:
def __init__(self, config):
self.lock = threading.Lock()
self._stopped = False
self.config = config
self._paused = True
def paused(self):
with self.lock:
return self._paused
def pause(self):
with self.lock:
self._paused = True
def unpause(self):
with self.lock:
self._paused = False
def stop(self):
print_log("Stopping Stratum")
with self.lock:
self._stopped = True
def stopped(self):
with self.lock:
return self._stopped
class Processor(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.daemon = True
self.dispatcher = None
self.queue = queue.Queue()
def process(self, request):
pass
def add_request(self, session, request):
self.queue.put((session, request))
def push_response(self, session, response):
#print "response", response
self.dispatcher.request_dispatcher.push_response(session, response)
def close(self):
pass
def run(self):
while not self.shared.stopped():
try:
session, request = self.queue.get(True, timeout=1)
msg_id = request.get('id')
except:
continue
try:
result = self.process(request)
self.push_response(session, {'id': msg_id, 'result': result})
except BaseException, e:
self.push_response(session, {'id': msg_id, 'error':str(e)})
except:
logger.error("process error", exc_info=True)
self.push_response(session, {'id': msg_id, 'error':'unknown error'})
self.close()
class Dispatcher:
def __init__(self, config):
self.shared = Shared(config)
self.request_dispatcher = RequestDispatcher(self.shared)
self.request_dispatcher.start()
self.response_dispatcher = \
ResponseDispatcher(self.shared, self.request_dispatcher)
self.response_dispatcher.start()
def register(self, prefix, processor):
processor.dispatcher = self
processor.shared = self.shared
processor.start()
self.request_dispatcher.processors[prefix] = processor
class RequestDispatcher(threading.Thread):
def __init__(self, shared):
self.shared = shared
threading.Thread.__init__(self)
self.daemon = True
self.request_queue = queue.Queue()
self.response_queue = queue.Queue()
self.lock = threading.Lock()
self.idlock = threading.Lock()
self.sessions = {}
self.processors = {}
self.lastgc = 0
def push_response(self, session, item):
self.response_queue.put((session, item))
def pop_response(self):
return self.response_queue.get()
def push_request(self, session, item):
self.request_queue.put((session, item))
def pop_request(self):
return self.request_queue.get()
def get_session_by_address(self, address):
for x in self.sessions.values():
if x.address == address:
return x
def run(self):
if self.shared is None:
raise TypeError("self.shared not set in Processor")
while not self.shared.stopped():
session, request = self.pop_request()
try:
self.do_dispatch(session, request)
except:
logger.error('dispatch',exc_info=True)
self.collect_garbage()
self.stop()
def stop(self):
pass
def do_dispatch(self, session, request):
""" dispatch request to the relevant processor """
method = request['method']
params = request.get('params', [])
suffix = method.split('.')[-1]
if session is not None:
if suffix == 'subscribe':
if not session.subscribe_to_service(method, params):
return
prefix = request['method'].split('.')[0]
try:
p = self.processors[prefix]
except:
print_log("error: no processor for", prefix)
return
p.add_request(session, request)
if method in ['server.version']:
try:
session.version = params[0]
session.protocol_version = float(params[1])
except:
pass
def get_sessions(self):
with self.lock:
r = self.sessions.values()
return r
def add_session(self, session):
key = session.key()
with self.lock:
self.sessions[key] = session
def remove_session(self, session):
key = session.key()
with self.lock:
self.sessions.pop(key)
def collect_garbage(self):
# only for HTTP sessions.
now = time.time()
if time.time() - self.lastgc < 60.0:
return
self.lastgc = now
for session in self.sessions.values():
if session.name == "HTTP" and (now - session.time) > session.timeout:
session.stop()
class Session:
def __init__(self, dispatcher):
self.dispatcher = dispatcher
self.bp = self.dispatcher.processors['blockchain']
self._stopped = False
self.lock = threading.Lock()
self.subscriptions = []
self.address = ''
self.name = ''
self.version = 'unknown'
self.protocol_version = 0.
self.time = time.time()
self.max_subscriptions = dispatcher.shared.config.getint('server', 'max_subscriptions')
threading.Timer(2, self.info).start()
def key(self):
return self.address
# Debugging method. Doesn't need to be threadsafe.
def info(self):
if self.subscriptions:
print_log("%4s" % self.name,
"%21s" % self.address,
"%4d" % len(self.subscriptions),
self.version)
def stop(self):
with self.lock:
if self._stopped:
return
self._stopped = True
self.shutdown()
self.dispatcher.remove_session(self)
self.stop_subscriptions()
def shutdown(self):
pass
def stopped(self):
with self.lock:
return self._stopped
def subscribe_to_service(self, method, params):
if self.stopped():
return False
if len(self.subscriptions) > self.max_subscriptions:
print_log("max subscriptions reached", self.address)
return False
# append to self.subscriptions only if this does not raise
self.bp.do_subscribe(method, params, self)
with self.lock:
if (method, params) not in self.subscriptions:
self.subscriptions.append((method,params))
return True
def stop_subscriptions(self):
with self.lock:
s = self.subscriptions[:]
for method, params in s:
self.bp.do_unsubscribe(method, params, self)
with self.lock:
self.subscriptions = []
class ResponseDispatcher(threading.Thread):
def __init__(self, shared, request_dispatcher):
self.shared = shared
self.request_dispatcher = request_dispatcher
threading.Thread.__init__(self)
self.daemon = True
def run(self):
while not self.shared.stopped():
session, response = self.request_dispatcher.pop_response()
session.send_response(response)
| agpl-3.0 |
aklepner/iksh | sites/all/modules/contrib/proj4js/lib/proj4js/tools/pjjs.py | 250 | 3061 | #!/usr/bin/env python
#
# TODO explain
#
# -- Copyright 2007 IGN France / Geoportail project --
#
import sys
import os
import re
SUFFIX_JAVASCRIPT = ".js"
def _pjcat2js_remove(rezDirectory,catName,targetDirectory):
pjCatFilename = os.path.join(rezDirectory, catName)
pjCat = open(pjCatFilename,'r')
comment_re = re.compile("^#")
srsdef_re = re.compile("^<([^>]*)>.* <>$")
l = pjCat.readline()
while len(l) != 0:
if comment_re.search(l) is None:
srsdef_mo = srsdef_re.match(l)
srsdef_fn = os.path.join(targetDirectory, catName+srsdef_mo.group(1)+".js")
if os.path.exists(srsdef_fn):
os.remove(srsdef_fn)
l = pjCat.readline()
pjCat.close()
def _pjcat2js_make(rezDirectory,catName,targetDirectory):
pjCatFilename = os.path.join(rezDirectory, catName)
pjCat = open(pjCatFilename,'r')
comment_re = re.compile("^#")
srsdef_re = re.compile("^<([^>]*)> *(.*) <>$")
l = pjCat.readline()
while len(l) != 0:
if comment_re.search(l) is None:
srsdef_mo = srsdef_re.match(l)
srsdef_fn = os.path.join(targetDirectory, catName+srsdef_mo.group(1)+".js")
srsdef = 'Proj4js.defs["'+catName+':'+srsdef_mo.group(1)+'"]="'+srsdef_mo.group(2)+'";'
file(srsdef_fn,'w').write(srsdef)
l = pjCat.readline()
pjCat.close()
def pjcat2js_clean(rezDirectory,targetDirectory):
if not os.path.isdir(rezDirectory):
return
if not os.path.isdir(targetDirectory):
return
if os.path.abspath(rezDirectory) == '/':
return
if os.path.abspath(targetDirectory) == '/':
return
rezDirectory_name_len = len(rezDirectory)
for root, dirs, filenames in os.walk(rezDirectory):
if 'CVS' in dirs:
dirs.remove('CVS')
if '.svn' in dirs:
dirs.remove('.svn')
for filename in filenames:
if not filename.endswith(SUFFIX_JAVASCRIPT) and not filename.startswith("."):
filepath = os.path.join(root, filename)[rezDirectory_name_len+1:]
filepath = filepath.replace("\\", "/")
_pjcat2js_remove(rezDirectory,filepath,targetDirectory)
def pjcat2js_run(rezDirectory,targetDirectory):
if not os.path.isdir(rezDirectory):
return
if not os.path.isdir(targetDirectory):
return
if os.path.abspath(rezDirectory) == '/':
return
if os.path.abspath(targetDirectory) == '/':
return
rezDirectory_name_len = len(rezDirectory)
for root, dirs, filenames in os.walk(rezDirectory):
if 'CVS' in dirs:
dirs.remove('CVS')
if '.svn' in dirs:
dirs.remove('.svn')
for filename in filenames:
if not filename.endswith(SUFFIX_JAVASCRIPT) and not filename.startswith("."):
filepath = os.path.join(root, filename)[rezDirectory_name_len+1:]
filepath = filepath.replace("\\", "/")
_pjcat2js_make(rezDirectory,filepath,targetDirectory)
| gpl-2.0 |
cedriclaunay/gaffer | python/GafferSceneUI/SceneWriterUI.py | 1 | 2400 | ##########################################################################
#
# Copyright (c) 2013-2014, Image Engine Design inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import Gaffer
import GafferUI
import GafferScene
GafferUI.PlugValueWidget.registerCreator(
GafferScene.SceneWriter,
"fileName",
lambda plug : GafferUI.PathPlugValueWidget( plug,
path = Gaffer.FileSystemPath( "/", filter = Gaffer.FileSystemPath.createStandardFilter() ),
pathChooserDialogueKeywords = {
"bookmarks" : GafferUI.Bookmarks.acquire( plug, category = "sceneCache" ),
"leaf" : True,
},
),
)
GafferUI.Nodule.registerNodule( GafferScene.SceneWriter, "fileName", lambda plug: None )
GafferUI.Nodule.registerNodule( GafferScene.SceneWriter, "out", lambda plug : None )
| bsd-3-clause |
munyirik/python | cpython/Lib/unittest/test/test_assertions.py | 82 | 16470 | import datetime
import warnings
import weakref
import unittest
from itertools import product
class Test_Assertions(unittest.TestCase):
def test_AlmostEqual(self):
self.assertAlmostEqual(1.00000001, 1.0)
self.assertNotAlmostEqual(1.0000001, 1.0)
self.assertRaises(self.failureException,
self.assertAlmostEqual, 1.0000001, 1.0)
self.assertRaises(self.failureException,
self.assertNotAlmostEqual, 1.00000001, 1.0)
self.assertAlmostEqual(1.1, 1.0, places=0)
self.assertRaises(self.failureException,
self.assertAlmostEqual, 1.1, 1.0, places=1)
self.assertAlmostEqual(0, .1+.1j, places=0)
self.assertNotAlmostEqual(0, .1+.1j, places=1)
self.assertRaises(self.failureException,
self.assertAlmostEqual, 0, .1+.1j, places=1)
self.assertRaises(self.failureException,
self.assertNotAlmostEqual, 0, .1+.1j, places=0)
self.assertAlmostEqual(float('inf'), float('inf'))
self.assertRaises(self.failureException, self.assertNotAlmostEqual,
float('inf'), float('inf'))
def test_AmostEqualWithDelta(self):
self.assertAlmostEqual(1.1, 1.0, delta=0.5)
self.assertAlmostEqual(1.0, 1.1, delta=0.5)
self.assertNotAlmostEqual(1.1, 1.0, delta=0.05)
self.assertNotAlmostEqual(1.0, 1.1, delta=0.05)
self.assertAlmostEqual(1.0, 1.0, delta=0.5)
self.assertRaises(self.failureException, self.assertNotAlmostEqual,
1.0, 1.0, delta=0.5)
self.assertRaises(self.failureException, self.assertAlmostEqual,
1.1, 1.0, delta=0.05)
self.assertRaises(self.failureException, self.assertNotAlmostEqual,
1.1, 1.0, delta=0.5)
self.assertRaises(TypeError, self.assertAlmostEqual,
1.1, 1.0, places=2, delta=2)
self.assertRaises(TypeError, self.assertNotAlmostEqual,
1.1, 1.0, places=2, delta=2)
first = datetime.datetime.now()
second = first + datetime.timedelta(seconds=10)
self.assertAlmostEqual(first, second,
delta=datetime.timedelta(seconds=20))
self.assertNotAlmostEqual(first, second,
delta=datetime.timedelta(seconds=5))
def test_assertRaises(self):
def _raise(e):
raise e
self.assertRaises(KeyError, _raise, KeyError)
self.assertRaises(KeyError, _raise, KeyError("key"))
try:
self.assertRaises(KeyError, lambda: None)
except self.failureException as e:
self.assertIn("KeyError not raised", str(e))
else:
self.fail("assertRaises() didn't fail")
try:
self.assertRaises(KeyError, _raise, ValueError)
except ValueError:
pass
else:
self.fail("assertRaises() didn't let exception pass through")
with self.assertRaises(KeyError) as cm:
try:
raise KeyError
except Exception as e:
exc = e
raise
self.assertIs(cm.exception, exc)
with self.assertRaises(KeyError):
raise KeyError("key")
try:
with self.assertRaises(KeyError):
pass
except self.failureException as e:
self.assertIn("KeyError not raised", str(e))
else:
self.fail("assertRaises() didn't fail")
try:
with self.assertRaises(KeyError):
raise ValueError
except ValueError:
pass
else:
self.fail("assertRaises() didn't let exception pass through")
def test_assertRaises_frames_survival(self):
# Issue #9815: assertRaises should avoid keeping local variables
# in a traceback alive.
class A:
pass
wr = None
class Foo(unittest.TestCase):
def foo(self):
nonlocal wr
a = A()
wr = weakref.ref(a)
try:
raise IOError
except IOError:
raise ValueError
def test_functional(self):
self.assertRaises(ValueError, self.foo)
def test_with(self):
with self.assertRaises(ValueError):
self.foo()
Foo("test_functional").run()
self.assertIsNone(wr())
Foo("test_with").run()
self.assertIsNone(wr())
def testAssertNotRegex(self):
self.assertNotRegex('Ala ma kota', r'r+')
try:
self.assertNotRegex('Ala ma kota', r'k.t', 'Message')
except self.failureException as e:
self.assertIn("'kot'", e.args[0])
self.assertIn('Message', e.args[0])
else:
self.fail('assertNotRegex should have failed.')
class TestLongMessage(unittest.TestCase):
"""Test that the individual asserts honour longMessage.
This actually tests all the message behaviour for
asserts that use longMessage."""
def setUp(self):
class TestableTestFalse(unittest.TestCase):
longMessage = False
failureException = self.failureException
def testTest(self):
pass
class TestableTestTrue(unittest.TestCase):
longMessage = True
failureException = self.failureException
def testTest(self):
pass
self.testableTrue = TestableTestTrue('testTest')
self.testableFalse = TestableTestFalse('testTest')
def testDefault(self):
self.assertTrue(unittest.TestCase.longMessage)
def test_formatMsg(self):
self.assertEqual(self.testableFalse._formatMessage(None, "foo"), "foo")
self.assertEqual(self.testableFalse._formatMessage("foo", "bar"), "foo")
self.assertEqual(self.testableTrue._formatMessage(None, "foo"), "foo")
self.assertEqual(self.testableTrue._formatMessage("foo", "bar"), "bar : foo")
# This blows up if _formatMessage uses string concatenation
self.testableTrue._formatMessage(object(), 'foo')
def test_formatMessage_unicode_error(self):
one = ''.join(chr(i) for i in range(255))
# this used to cause a UnicodeDecodeError constructing msg
self.testableTrue._formatMessage(one, '\uFFFD')
def assertMessages(self, methodName, args, errors):
"""
Check that methodName(*args) raises the correct error messages.
errors should be a list of 4 regex that match the error when:
1) longMessage = False and no msg passed;
2) longMessage = False and msg passed;
3) longMessage = True and no msg passed;
4) longMessage = True and msg passed;
"""
def getMethod(i):
useTestableFalse = i < 2
if useTestableFalse:
test = self.testableFalse
else:
test = self.testableTrue
return getattr(test, methodName)
for i, expected_regex in enumerate(errors):
testMethod = getMethod(i)
kwargs = {}
withMsg = i % 2
if withMsg:
kwargs = {"msg": "oops"}
with self.assertRaisesRegex(self.failureException,
expected_regex=expected_regex):
testMethod(*args, **kwargs)
def testAssertTrue(self):
self.assertMessages('assertTrue', (False,),
["^False is not true$", "^oops$", "^False is not true$",
"^False is not true : oops$"])
def testAssertFalse(self):
self.assertMessages('assertFalse', (True,),
["^True is not false$", "^oops$", "^True is not false$",
"^True is not false : oops$"])
def testNotEqual(self):
self.assertMessages('assertNotEqual', (1, 1),
["^1 == 1$", "^oops$", "^1 == 1$",
"^1 == 1 : oops$"])
def testAlmostEqual(self):
self.assertMessages('assertAlmostEqual', (1, 2),
["^1 != 2 within 7 places$", "^oops$",
"^1 != 2 within 7 places$", "^1 != 2 within 7 places : oops$"])
def testNotAlmostEqual(self):
self.assertMessages('assertNotAlmostEqual', (1, 1),
["^1 == 1 within 7 places$", "^oops$",
"^1 == 1 within 7 places$", "^1 == 1 within 7 places : oops$"])
def test_baseAssertEqual(self):
self.assertMessages('_baseAssertEqual', (1, 2),
["^1 != 2$", "^oops$", "^1 != 2$", "^1 != 2 : oops$"])
def testAssertSequenceEqual(self):
# Error messages are multiline so not testing on full message
# assertTupleEqual and assertListEqual delegate to this method
self.assertMessages('assertSequenceEqual', ([], [None]),
["\+ \[None\]$", "^oops$", r"\+ \[None\]$",
r"\+ \[None\] : oops$"])
def testAssertSetEqual(self):
self.assertMessages('assertSetEqual', (set(), set([None])),
["None$", "^oops$", "None$",
"None : oops$"])
def testAssertIn(self):
self.assertMessages('assertIn', (None, []),
['^None not found in \[\]$', "^oops$",
'^None not found in \[\]$',
'^None not found in \[\] : oops$'])
def testAssertNotIn(self):
self.assertMessages('assertNotIn', (None, [None]),
['^None unexpectedly found in \[None\]$', "^oops$",
'^None unexpectedly found in \[None\]$',
'^None unexpectedly found in \[None\] : oops$'])
def testAssertDictEqual(self):
self.assertMessages('assertDictEqual', ({}, {'key': 'value'}),
[r"\+ \{'key': 'value'\}$", "^oops$",
"\+ \{'key': 'value'\}$",
"\+ \{'key': 'value'\} : oops$"])
def testAssertDictContainsSubset(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
self.assertMessages('assertDictContainsSubset', ({'key': 'value'}, {}),
["^Missing: 'key'$", "^oops$",
"^Missing: 'key'$",
"^Missing: 'key' : oops$"])
def testAssertMultiLineEqual(self):
self.assertMessages('assertMultiLineEqual', ("", "foo"),
[r"\+ foo$", "^oops$",
r"\+ foo$",
r"\+ foo : oops$"])
def testAssertLess(self):
self.assertMessages('assertLess', (2, 1),
["^2 not less than 1$", "^oops$",
"^2 not less than 1$", "^2 not less than 1 : oops$"])
def testAssertLessEqual(self):
self.assertMessages('assertLessEqual', (2, 1),
["^2 not less than or equal to 1$", "^oops$",
"^2 not less than or equal to 1$",
"^2 not less than or equal to 1 : oops$"])
def testAssertGreater(self):
self.assertMessages('assertGreater', (1, 2),
["^1 not greater than 2$", "^oops$",
"^1 not greater than 2$",
"^1 not greater than 2 : oops$"])
def testAssertGreaterEqual(self):
self.assertMessages('assertGreaterEqual', (1, 2),
["^1 not greater than or equal to 2$", "^oops$",
"^1 not greater than or equal to 2$",
"^1 not greater than or equal to 2 : oops$"])
def testAssertIsNone(self):
self.assertMessages('assertIsNone', ('not None',),
["^'not None' is not None$", "^oops$",
"^'not None' is not None$",
"^'not None' is not None : oops$"])
def testAssertIsNotNone(self):
self.assertMessages('assertIsNotNone', (None,),
["^unexpectedly None$", "^oops$",
"^unexpectedly None$",
"^unexpectedly None : oops$"])
def testAssertIs(self):
self.assertMessages('assertIs', (None, 'foo'),
["^None is not 'foo'$", "^oops$",
"^None is not 'foo'$",
"^None is not 'foo' : oops$"])
def testAssertIsNot(self):
self.assertMessages('assertIsNot', (None, None),
["^unexpectedly identical: None$", "^oops$",
"^unexpectedly identical: None$",
"^unexpectedly identical: None : oops$"])
def assertMessagesCM(self, methodName, args, func, errors):
"""
Check that the correct error messages are raised while executing:
with method(*args):
func()
*errors* should be a list of 4 regex that match the error when:
1) longMessage = False and no msg passed;
2) longMessage = False and msg passed;
3) longMessage = True and no msg passed;
4) longMessage = True and msg passed;
"""
p = product((self.testableFalse, self.testableTrue),
({}, {"msg": "oops"}))
for (cls, kwargs), err in zip(p, errors):
method = getattr(cls, methodName)
with self.assertRaisesRegex(cls.failureException, err):
with method(*args, **kwargs) as cm:
func()
def testAssertRaises(self):
self.assertMessagesCM('assertRaises', (TypeError,), lambda: None,
['^TypeError not raised$', '^oops$',
'^TypeError not raised$',
'^TypeError not raised : oops$'])
def testAssertRaisesRegex(self):
# test error not raised
self.assertMessagesCM('assertRaisesRegex', (TypeError, 'unused regex'),
lambda: None,
['^TypeError not raised$', '^oops$',
'^TypeError not raised$',
'^TypeError not raised : oops$'])
# test error raised but with wrong message
def raise_wrong_message():
raise TypeError('foo')
self.assertMessagesCM('assertRaisesRegex', (TypeError, 'regex'),
raise_wrong_message,
['^"regex" does not match "foo"$', '^oops$',
'^"regex" does not match "foo"$',
'^"regex" does not match "foo" : oops$'])
def testAssertWarns(self):
self.assertMessagesCM('assertWarns', (UserWarning,), lambda: None,
['^UserWarning not triggered$', '^oops$',
'^UserWarning not triggered$',
'^UserWarning not triggered : oops$'])
def testAssertWarnsRegex(self):
# test error not raised
self.assertMessagesCM('assertWarnsRegex', (UserWarning, 'unused regex'),
lambda: None,
['^UserWarning not triggered$', '^oops$',
'^UserWarning not triggered$',
'^UserWarning not triggered : oops$'])
# test warning raised but with wrong message
def raise_wrong_message():
warnings.warn('foo')
self.assertMessagesCM('assertWarnsRegex', (UserWarning, 'regex'),
raise_wrong_message,
['^"regex" does not match "foo"$', '^oops$',
'^"regex" does not match "foo"$',
'^"regex" does not match "foo" : oops$'])
if __name__ == "__main__":
unittest.main()
| bsd-3-clause |
Evervolv/android_kernel_htc_msm8660 | tools/perf/scripts/python/syscall-counts-by-pid.py | 11180 | 1927 | # system call counts, by pid
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import syscall_name
usage = "perf script -s syscall-counts-by-pid.py [comm]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
try:
syscalls[common_comm][common_pid][id] += 1
except TypeError:
syscalls[common_comm][common_pid][id] = 1
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events by comm/pid:\n\n",
print "%-40s %10s\n" % ("comm [pid]/syscalls", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id, val in sorted(syscalls[comm][pid].iteritems(), \
key = lambda(k, v): (v, k), reverse = True):
print " %-38s %10d\n" % (syscall_name(id), val),
| gpl-2.0 |
dipeshbh/nodebbdb | node_modules/sitemap/env/src/node-v0.12.7-linux-x64/lib/node_modules/npm/node_modules/node-gyp/gyp/gyptest.py | 1752 | 8019 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
__doc__ = """
gyptest.py -- test runner for GYP tests.
"""
import os
import optparse
import subprocess
import sys
class CommandRunner(object):
"""
Executor class for commands, including "commands" implemented by
Python functions.
"""
verbose = True
active = True
def __init__(self, dictionary={}):
self.subst_dictionary(dictionary)
def subst_dictionary(self, dictionary):
self._subst_dictionary = dictionary
def subst(self, string, dictionary=None):
"""
Substitutes (via the format operator) the values in the specified
dictionary into the specified command.
The command can be an (action, string) tuple. In all cases, we
perform substitution on strings and don't worry if something isn't
a string. (It's probably a Python function to be executed.)
"""
if dictionary is None:
dictionary = self._subst_dictionary
if dictionary:
try:
string = string % dictionary
except TypeError:
pass
return string
def display(self, command, stdout=None, stderr=None):
if not self.verbose:
return
if type(command) == type(()):
func = command[0]
args = command[1:]
s = '%s(%s)' % (func.__name__, ', '.join(map(repr, args)))
if type(command) == type([]):
# TODO: quote arguments containing spaces
# TODO: handle meta characters?
s = ' '.join(command)
else:
s = self.subst(command)
if not s.endswith('\n'):
s += '\n'
sys.stdout.write(s)
sys.stdout.flush()
def execute(self, command, stdout=None, stderr=None):
"""
Executes a single command.
"""
if not self.active:
return 0
if type(command) == type(''):
command = self.subst(command)
cmdargs = shlex.split(command)
if cmdargs[0] == 'cd':
command = (os.chdir,) + tuple(cmdargs[1:])
if type(command) == type(()):
func = command[0]
args = command[1:]
return func(*args)
else:
if stdout is sys.stdout:
# Same as passing sys.stdout, except python2.4 doesn't fail on it.
subout = None
else:
# Open pipe for anything else so Popen works on python2.4.
subout = subprocess.PIPE
if stderr is sys.stderr:
# Same as passing sys.stderr, except python2.4 doesn't fail on it.
suberr = None
elif stderr is None:
# Merge with stdout if stderr isn't specified.
suberr = subprocess.STDOUT
else:
# Open pipe for anything else so Popen works on python2.4.
suberr = subprocess.PIPE
p = subprocess.Popen(command,
shell=(sys.platform == 'win32'),
stdout=subout,
stderr=suberr)
p.wait()
if stdout is None:
self.stdout = p.stdout.read()
elif stdout is not sys.stdout:
stdout.write(p.stdout.read())
if stderr not in (None, sys.stderr):
stderr.write(p.stderr.read())
return p.returncode
def run(self, command, display=None, stdout=None, stderr=None):
"""
Runs a single command, displaying it first.
"""
if display is None:
display = command
self.display(display)
return self.execute(command, stdout, stderr)
class Unbuffered(object):
def __init__(self, fp):
self.fp = fp
def write(self, arg):
self.fp.write(arg)
self.fp.flush()
def __getattr__(self, attr):
return getattr(self.fp, attr)
sys.stdout = Unbuffered(sys.stdout)
sys.stderr = Unbuffered(sys.stderr)
def is_test_name(f):
return f.startswith('gyptest') and f.endswith('.py')
def find_all_gyptest_files(directory):
result = []
for root, dirs, files in os.walk(directory):
if '.svn' in dirs:
dirs.remove('.svn')
result.extend([ os.path.join(root, f) for f in files if is_test_name(f) ])
result.sort()
return result
def main(argv=None):
if argv is None:
argv = sys.argv
usage = "gyptest.py [-ahlnq] [-f formats] [test ...]"
parser = optparse.OptionParser(usage=usage)
parser.add_option("-a", "--all", action="store_true",
help="run all tests")
parser.add_option("-C", "--chdir", action="store", default=None,
help="chdir to the specified directory")
parser.add_option("-f", "--format", action="store", default='',
help="run tests with the specified formats")
parser.add_option("-G", '--gyp_option', action="append", default=[],
help="Add -G options to the gyp command line")
parser.add_option("-l", "--list", action="store_true",
help="list available tests and exit")
parser.add_option("-n", "--no-exec", action="store_true",
help="no execute, just print the command line")
parser.add_option("--passed", action="store_true",
help="report passed tests")
parser.add_option("--path", action="append", default=[],
help="additional $PATH directory")
parser.add_option("-q", "--quiet", action="store_true",
help="quiet, don't print test command lines")
opts, args = parser.parse_args(argv[1:])
if opts.chdir:
os.chdir(opts.chdir)
if opts.path:
extra_path = [os.path.abspath(p) for p in opts.path]
extra_path = os.pathsep.join(extra_path)
os.environ['PATH'] = extra_path + os.pathsep + os.environ['PATH']
if not args:
if not opts.all:
sys.stderr.write('Specify -a to get all tests.\n')
return 1
args = ['test']
tests = []
for arg in args:
if os.path.isdir(arg):
tests.extend(find_all_gyptest_files(os.path.normpath(arg)))
else:
if not is_test_name(os.path.basename(arg)):
print >>sys.stderr, arg, 'is not a valid gyp test name.'
sys.exit(1)
tests.append(arg)
if opts.list:
for test in tests:
print test
sys.exit(0)
CommandRunner.verbose = not opts.quiet
CommandRunner.active = not opts.no_exec
cr = CommandRunner()
os.environ['PYTHONPATH'] = os.path.abspath('test/lib')
if not opts.quiet:
sys.stdout.write('PYTHONPATH=%s\n' % os.environ['PYTHONPATH'])
passed = []
failed = []
no_result = []
if opts.format:
format_list = opts.format.split(',')
else:
# TODO: not duplicate this mapping from pylib/gyp/__init__.py
format_list = {
'aix5': ['make'],
'freebsd7': ['make'],
'freebsd8': ['make'],
'openbsd5': ['make'],
'cygwin': ['msvs'],
'win32': ['msvs', 'ninja'],
'linux2': ['make', 'ninja'],
'linux3': ['make', 'ninja'],
'darwin': ['make', 'ninja', 'xcode', 'xcode-ninja'],
}[sys.platform]
for format in format_list:
os.environ['TESTGYP_FORMAT'] = format
if not opts.quiet:
sys.stdout.write('TESTGYP_FORMAT=%s\n' % format)
gyp_options = []
for option in opts.gyp_option:
gyp_options += ['-G', option]
if gyp_options and not opts.quiet:
sys.stdout.write('Extra Gyp options: %s\n' % gyp_options)
for test in tests:
status = cr.run([sys.executable, test] + gyp_options,
stdout=sys.stdout,
stderr=sys.stderr)
if status == 2:
no_result.append(test)
elif status:
failed.append(test)
else:
passed.append(test)
if not opts.quiet:
def report(description, tests):
if tests:
if len(tests) == 1:
sys.stdout.write("\n%s the following test:\n" % description)
else:
fmt = "\n%s the following %d tests:\n"
sys.stdout.write(fmt % (description, len(tests)))
sys.stdout.write("\t" + "\n\t".join(tests) + "\n")
if opts.passed:
report("Passed", passed)
report("Failed", failed)
report("No result from", no_result)
if failed:
return 1
else:
return 0
if __name__ == "__main__":
sys.exit(main())
| gpl-3.0 |
yask123/django | django/core/serializers/__init__.py | 347 | 8194 | """
Interfaces for serializing Django objects.
Usage::
from django.core import serializers
json = serializers.serialize("json", some_queryset)
objects = list(serializers.deserialize("json", json))
To add your own serializers, use the SERIALIZATION_MODULES setting::
SERIALIZATION_MODULES = {
"csv": "path.to.csv.serializer",
"txt": "path.to.txt.serializer",
}
"""
import importlib
from django.apps import apps
from django.conf import settings
from django.core.serializers.base import SerializerDoesNotExist
from django.utils import six
# Built-in serializers
BUILTIN_SERIALIZERS = {
"xml": "django.core.serializers.xml_serializer",
"python": "django.core.serializers.python",
"json": "django.core.serializers.json",
"yaml": "django.core.serializers.pyyaml",
}
_serializers = {}
class BadSerializer(object):
"""
Stub serializer to hold exception raised during registration
This allows the serializer registration to cache serializers and if there
is an error raised in the process of creating a serializer it will be
raised and passed along to the caller when the serializer is used.
"""
internal_use_only = False
def __init__(self, exception):
self.exception = exception
def __call__(self, *args, **kwargs):
raise self.exception
def register_serializer(format, serializer_module, serializers=None):
"""Register a new serializer.
``serializer_module`` should be the fully qualified module name
for the serializer.
If ``serializers`` is provided, the registration will be added
to the provided dictionary.
If ``serializers`` is not provided, the registration will be made
directly into the global register of serializers. Adding serializers
directly is not a thread-safe operation.
"""
if serializers is None and not _serializers:
_load_serializers()
try:
module = importlib.import_module(serializer_module)
except ImportError as exc:
bad_serializer = BadSerializer(exc)
module = type('BadSerializerModule', (object,), {
'Deserializer': bad_serializer,
'Serializer': bad_serializer,
})
if serializers is None:
_serializers[format] = module
else:
serializers[format] = module
def unregister_serializer(format):
"Unregister a given serializer. This is not a thread-safe operation."
if not _serializers:
_load_serializers()
if format not in _serializers:
raise SerializerDoesNotExist(format)
del _serializers[format]
def get_serializer(format):
if not _serializers:
_load_serializers()
if format not in _serializers:
raise SerializerDoesNotExist(format)
return _serializers[format].Serializer
def get_serializer_formats():
if not _serializers:
_load_serializers()
return list(_serializers)
def get_public_serializer_formats():
if not _serializers:
_load_serializers()
return [k for k, v in six.iteritems(_serializers) if not v.Serializer.internal_use_only]
def get_deserializer(format):
if not _serializers:
_load_serializers()
if format not in _serializers:
raise SerializerDoesNotExist(format)
return _serializers[format].Deserializer
def serialize(format, queryset, **options):
"""
Serialize a queryset (or any iterator that returns database objects) using
a certain serializer.
"""
s = get_serializer(format)()
s.serialize(queryset, **options)
return s.getvalue()
def deserialize(format, stream_or_string, **options):
"""
Deserialize a stream or a string. Returns an iterator that yields ``(obj,
m2m_relation_dict)``, where ``obj`` is an instantiated -- but *unsaved* --
object, and ``m2m_relation_dict`` is a dictionary of ``{m2m_field_name :
list_of_related_objects}``.
"""
d = get_deserializer(format)
return d(stream_or_string, **options)
def _load_serializers():
"""
Register built-in and settings-defined serializers. This is done lazily so
that user code has a chance to (e.g.) set up custom settings without
needing to be careful of import order.
"""
global _serializers
serializers = {}
for format in BUILTIN_SERIALIZERS:
register_serializer(format, BUILTIN_SERIALIZERS[format], serializers)
if hasattr(settings, "SERIALIZATION_MODULES"):
for format in settings.SERIALIZATION_MODULES:
register_serializer(format, settings.SERIALIZATION_MODULES[format], serializers)
_serializers = serializers
def sort_dependencies(app_list):
"""Sort a list of (app_config, models) pairs into a single list of models.
The single list of models is sorted so that any model with a natural key
is serialized before a normal model, and any model with a natural key
dependency has it's dependencies serialized first.
"""
# Process the list of models, and get the list of dependencies
model_dependencies = []
models = set()
for app_config, model_list in app_list:
if model_list is None:
model_list = app_config.get_models()
for model in model_list:
models.add(model)
# Add any explicitly defined dependencies
if hasattr(model, 'natural_key'):
deps = getattr(model.natural_key, 'dependencies', [])
if deps:
deps = [apps.get_model(dep) for dep in deps]
else:
deps = []
# Now add a dependency for any FK relation with a model that
# defines a natural key
for field in model._meta.fields:
if field.remote_field:
rel_model = field.remote_field.model
if hasattr(rel_model, 'natural_key') and rel_model != model:
deps.append(rel_model)
# Also add a dependency for any simple M2M relation with a model
# that defines a natural key. M2M relations with explicit through
# models don't count as dependencies.
for field in model._meta.many_to_many:
if field.remote_field.through._meta.auto_created:
rel_model = field.remote_field.model
if hasattr(rel_model, 'natural_key') and rel_model != model:
deps.append(rel_model)
model_dependencies.append((model, deps))
model_dependencies.reverse()
# Now sort the models to ensure that dependencies are met. This
# is done by repeatedly iterating over the input list of models.
# If all the dependencies of a given model are in the final list,
# that model is promoted to the end of the final list. This process
# continues until the input list is empty, or we do a full iteration
# over the input models without promoting a model to the final list.
# If we do a full iteration without a promotion, that means there are
# circular dependencies in the list.
model_list = []
while model_dependencies:
skipped = []
changed = False
while model_dependencies:
model, deps = model_dependencies.pop()
# If all of the models in the dependency list are either already
# on the final model list, or not on the original serialization list,
# then we've found another model with all it's dependencies satisfied.
found = True
for candidate in ((d not in models or d in model_list) for d in deps):
if not candidate:
found = False
if found:
model_list.append(model)
changed = True
else:
skipped.append((model, deps))
if not changed:
raise RuntimeError("Can't resolve dependencies for %s in serialized app list." %
', '.join('%s.%s' % (model._meta.app_label, model._meta.object_name)
for model, deps in sorted(skipped, key=lambda obj: obj[0].__name__))
)
model_dependencies = skipped
return model_list
| bsd-3-clause |
songjq/polystring | scripts/render.py | 1 | 6741 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
render
======
A structure data renderer.
Copyright (C) 2012 Yi-Xin Liu
"""
import argparse
import numpy as np
import scipy.io
from mayavi import mlab
#import matplotlib
#if(not args.display):
#matplotlib.use('Agg')
#mlab.options.offscreen = True #Error in running
import matplotlib.pyplot as plt
from matplotlib import colors
def render_1d(struct_name, data_file, img_file, period, **kwargs):
''' render 1D structure.
:param struct_name: the struct variable name to be rendered
:type struct_name: string
:param data_file: the MAT file containing sturct varible
:type data_file: string
:param img_file: the file name of the image file
:type img_file: string
:param period: how many periods to draw
:type img_file: integer
:param show_img: if True, show image on the screen
:type show_img: bool
:param kwargs: any extra key words arguments will be passed to plot functions
'''
data = scipy.io.loadmat(data_file)
struct = data[struct_name]
struct = np.tile(struct,period)
x = data['x']
Lx = np.size(x)
xa = x[0]
dx = x[1] - xa
rx = np.zeros(Lx*period)
for i in xrange(Lx*period):
rx[i] = i * dx
# No frame, white background
fig = plt.figure(dpi=80, facecolor='w')
# full figure subplot
ax = fig.add_axes([0, 0, 1, 1])
ax.plot(rx,struct,**kwargs)
plt.savefig(img_file)
def render_2d(struct_name, data_file, img_file, period,
levels=None, cmap=None,
**kwargs):
''' Render 2D structure.
:param struct_name: the struct variable name to be rendered
:type struct_name: string
:param data_file: the MAT file containing sturct varible
:type data_file: string
:param img_file: the file name of the image file
:type img_file: string
:param period: how many periods to draw
:type img_file: integer
:param show_img: if True, show image on the screen
:type show_img: bool
:param levels: how many contour levels
:type levels: integer
:param cmap: colormap for contour plot
:type cmap: `Colormap`
:param kwargs: any extra key words arguments will be passed to plot functions
'''
data = scipy.io.loadmat(data_file)
struct = data[struct_name]
repeat = (period,period)
struct = np.tile(struct,repeat)
x = data['x']
y = data['y']
Lx, Ly = np.shape(x)
xa = x[0,0]
dx1 = x[1,0] - xa
dx2 = x[0,1] - xa
yc = y[0,0]
dy1 = y[1,0] - yc
dy2 = y[0,1] - yc
rx = np.zeros((Lx*period,Ly*period))
ry = np.zeros((Lx*period,Ly*period))
for (i,j) in np.ndindex(Lx*period,Ly*period):
rx[i,j] = i * dx1 + j * dx2
ry[i,j] = i * dy1 + j * dy2
dx = rx.max() - rx.min()
dy = ry.max() - ry.min()
w, h = plt.figaspect(float(dy / dx)) # float is must
# No frame, white background, w/h aspect ratio figure
fig = plt.figure(figsize=(w, h), frameon=False,
dpi=80, facecolor='w')
# full figure subplot, no border, no axes
ax = fig.add_axes([0, 0, 1, 1], frameon=False, axisbg='w')
# no ticks
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
# Default: there are 256 contour levels
if levels is None:
step = (struct.max() - struct.min()) / 256
levels = np.arange(struct.min(), struct.max() + step, step)
# Default: colormap is monochromatic red
if cmap is None:
clr = np.zeros((256, 3))
for i in np.arange(256):
clr[i, 0] = i / 255.0
cmap = colors.ListedColormap(clr)
# actual plot
ax.contourf(rx, ry, struct, levels=levels,
cmap=cmap, antialiased=False, **kwargs)
#ax.contourf(rx,ry,struct)
plt.savefig(img_file)
def render_3d(struct_name, data_file, img_file, period, **kwargs):
''' Render 3D structure.
:param struct_name: the struct variable name to be rendered
:type struct_name: string
:param data_file: the MAT file containing sturct varible
:type data_file: string
:param img_file: the file name of the image file
:type img_file: string
:param period: how many periods to draw
:type img_file: integer
:param show_img: if True, show image on the screen
:type show_img: bool
:param kwargs: any extra key words arguments will be passed to plot functions
'''
data = scipy.io.loadmat(data_file)
struct = data[struct_name]
repeat = (period,period,period)
struct = np.tile(struct,repeat)
x = data['x']
y = data['y']
z = data['z']
Lx, Ly, Lz = np.shape(x)
xa = x[0,0,0]
dx1 = x[1,0,0] - xa
dx2 = x[0,1,0] - xa
dx3 = x[0,0,1] - xa
yc = y[0,0,0]
dy1 = y[1,0,0] - yc
dy2 = y[0,1,0] - yc
dy3 = y[0,0,1] - yc
ze = z[0,0,0]
dz1 = z[1,0,0] - ze
dz2 = z[0,1,0] - ze
dz3 = z[0,0,1] - ze
rx = np.zeros((Lx*period,Ly*period,Lz*period))
ry = np.zeros((Lx*period,Ly*period,Lz*period))
rz = np.zeros((Lx*period,Ly*period,Lz*period))
for (i,j,k) in np.ndindex(Lx*period,Ly*period,Lz*period):
rx[i,j,k] = i * dx1 + j * dx2 + k * dx3
ry[i,j,k] = i * dy1 + j * dy2 + k * dy3
rz[i,j,k] = i * dz1 + j * dz2 + k * dz3
mlab.contour3d(rx,ry,rz,struct,**kwargs)
#mlab.pipeline.volume(mlab.pipeline.scalar_field(rx, ry, rz, struct))
mlab.savefig(img_file)
mlab.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-a', '--space',
default='2',
help='the space dimensionality.')
parser.add_argument('-s', '--struct',
default='struct',
help='the variable name in data file to render.')
parser.add_argument('-d', '--data', default='data.mat',
help='the data file to read.')
parser.add_argument('-i', '--image', default='struct.png',
help='the image file to write.')
parser.add_argument('-p', '--period', default=2, type=int,
help='how many periods to render.' + \
'same for each dimension.')
parser.add_argument('-y', '--display', action='store_true',
help='dispaly the image')
args = parser.parse_args()
struct = args.struct
data_file = args.data
img_file = args.image
period = args.period
if(args.space == '1'):
render_1d(struct, data_file, img_file, period)
elif(args.space == '2'):
render_2d(struct, data_file, img_file, period)
else:
render_3d(struct, data_file, img_file, period)
| gpl-3.0 |
eddyb/servo | tests/wpt/web-platform-tests/tools/pytest/testing/test_runner.py | 167 | 20633 | # -*- coding: utf-8 -*-
from __future__ import with_statement
import _pytest._code
import os
import py
import pytest
import sys
from _pytest import runner, main
class TestSetupState:
def test_setup(self, testdir):
ss = runner.SetupState()
item = testdir.getitem("def test_func(): pass")
l = [1]
ss.prepare(item)
ss.addfinalizer(l.pop, colitem=item)
assert l
ss._pop_and_teardown()
assert not l
def test_teardown_exact_stack_empty(self, testdir):
item = testdir.getitem("def test_func(): pass")
ss = runner.SetupState()
ss.teardown_exact(item, None)
ss.teardown_exact(item, None)
ss.teardown_exact(item, None)
def test_setup_fails_and_failure_is_cached(self, testdir):
item = testdir.getitem("""
def setup_module(mod):
raise ValueError(42)
def test_func(): pass
""") # noqa
ss = runner.SetupState()
pytest.raises(ValueError, lambda: ss.prepare(item))
pytest.raises(ValueError, lambda: ss.prepare(item))
def test_teardown_multiple_one_fails(self, testdir):
r = []
def fin1(): r.append('fin1')
def fin2(): raise Exception('oops')
def fin3(): r.append('fin3')
item = testdir.getitem("def test_func(): pass")
ss = runner.SetupState()
ss.addfinalizer(fin1, item)
ss.addfinalizer(fin2, item)
ss.addfinalizer(fin3, item)
with pytest.raises(Exception) as err:
ss._callfinalizers(item)
assert err.value.args == ('oops',)
assert r == ['fin3', 'fin1']
def test_teardown_multiple_fail(self, testdir):
# Ensure the first exception is the one which is re-raised.
# Ideally both would be reported however.
def fin1(): raise Exception('oops1')
def fin2(): raise Exception('oops2')
item = testdir.getitem("def test_func(): pass")
ss = runner.SetupState()
ss.addfinalizer(fin1, item)
ss.addfinalizer(fin2, item)
with pytest.raises(Exception) as err:
ss._callfinalizers(item)
assert err.value.args == ('oops2',)
class BaseFunctionalTests:
def test_passfunction(self, testdir):
reports = testdir.runitem("""
def test_func():
pass
""")
rep = reports[1]
assert rep.passed
assert not rep.failed
assert rep.outcome == "passed"
assert not rep.longrepr
def test_failfunction(self, testdir):
reports = testdir.runitem("""
def test_func():
assert 0
""")
rep = reports[1]
assert not rep.passed
assert not rep.skipped
assert rep.failed
assert rep.when == "call"
assert rep.outcome == "failed"
#assert isinstance(rep.longrepr, ReprExceptionInfo)
def test_skipfunction(self, testdir):
reports = testdir.runitem("""
import pytest
def test_func():
pytest.skip("hello")
""")
rep = reports[1]
assert not rep.failed
assert not rep.passed
assert rep.skipped
assert rep.outcome == "skipped"
#assert rep.skipped.when == "call"
#assert rep.skipped.when == "call"
#assert rep.skipped == "%sreason == "hello"
#assert rep.skipped.location.lineno == 3
#assert rep.skipped.location.path
#assert not rep.skipped.failurerepr
def test_skip_in_setup_function(self, testdir):
reports = testdir.runitem("""
import pytest
def setup_function(func):
pytest.skip("hello")
def test_func():
pass
""")
print(reports)
rep = reports[0]
assert not rep.failed
assert not rep.passed
assert rep.skipped
#assert rep.skipped.reason == "hello"
#assert rep.skipped.location.lineno == 3
#assert rep.skipped.location.lineno == 3
assert len(reports) == 2
assert reports[1].passed # teardown
def test_failure_in_setup_function(self, testdir):
reports = testdir.runitem("""
import pytest
def setup_function(func):
raise ValueError(42)
def test_func():
pass
""")
rep = reports[0]
assert not rep.skipped
assert not rep.passed
assert rep.failed
assert rep.when == "setup"
assert len(reports) == 2
def test_failure_in_teardown_function(self, testdir):
reports = testdir.runitem("""
import pytest
def teardown_function(func):
raise ValueError(42)
def test_func():
pass
""")
print(reports)
assert len(reports) == 3
rep = reports[2]
assert not rep.skipped
assert not rep.passed
assert rep.failed
assert rep.when == "teardown"
#assert rep.longrepr.reprcrash.lineno == 3
#assert rep.longrepr.reprtraceback.reprentries
def test_custom_failure_repr(self, testdir):
testdir.makepyfile(conftest="""
import pytest
class Function(pytest.Function):
def repr_failure(self, excinfo):
return "hello"
""")
reports = testdir.runitem("""
import pytest
def test_func():
assert 0
""")
rep = reports[1]
assert not rep.skipped
assert not rep.passed
assert rep.failed
#assert rep.outcome.when == "call"
#assert rep.failed.where.lineno == 3
#assert rep.failed.where.path.basename == "test_func.py"
#assert rep.failed.failurerepr == "hello"
def test_teardown_final_returncode(self, testdir):
rec = testdir.inline_runsource("""
def test_func():
pass
def teardown_function(func):
raise ValueError(42)
""")
assert rec.ret == 1
def test_exact_teardown_issue90(self, testdir):
rec = testdir.inline_runsource("""
import pytest
class TestClass:
def test_method(self):
pass
def teardown_class(cls):
raise Exception()
def test_func():
import sys
# on python2 exc_info is keept till a function exits
# so we would end up calling test functions while
# sys.exc_info would return the indexerror
# from guessing the lastitem
excinfo = sys.exc_info()
import traceback
assert excinfo[0] is None, \
traceback.format_exception(*excinfo)
def teardown_function(func):
raise ValueError(42)
""")
reps = rec.getreports("pytest_runtest_logreport")
print (reps)
for i in range(2):
assert reps[i].nodeid.endswith("test_method")
assert reps[i].passed
assert reps[2].when == "teardown"
assert reps[2].failed
assert len(reps) == 6
for i in range(3,5):
assert reps[i].nodeid.endswith("test_func")
assert reps[i].passed
assert reps[5].when == "teardown"
assert reps[5].nodeid.endswith("test_func")
assert reps[5].failed
def test_failure_in_setup_function_ignores_custom_repr(self, testdir):
testdir.makepyfile(conftest="""
import pytest
class Function(pytest.Function):
def repr_failure(self, excinfo):
assert 0
""")
reports = testdir.runitem("""
def setup_function(func):
raise ValueError(42)
def test_func():
pass
""")
assert len(reports) == 2
rep = reports[0]
print(rep)
assert not rep.skipped
assert not rep.passed
assert rep.failed
#assert rep.outcome.when == "setup"
#assert rep.outcome.where.lineno == 3
#assert rep.outcome.where.path.basename == "test_func.py"
#assert instanace(rep.failed.failurerepr, PythonFailureRepr)
def test_systemexit_does_not_bail_out(self, testdir):
try:
reports = testdir.runitem("""
def test_func():
raise SystemExit(42)
""")
except SystemExit:
pytest.fail("runner did not catch SystemExit")
rep = reports[1]
assert rep.failed
assert rep.when == "call"
def test_exit_propagates(self, testdir):
try:
testdir.runitem("""
import pytest
def test_func():
raise pytest.exit.Exception()
""")
except pytest.exit.Exception:
pass
else:
pytest.fail("did not raise")
class TestExecutionNonForked(BaseFunctionalTests):
def getrunner(self):
def f(item):
return runner.runtestprotocol(item, log=False)
return f
def test_keyboardinterrupt_propagates(self, testdir):
try:
testdir.runitem("""
def test_func():
raise KeyboardInterrupt("fake")
""")
except KeyboardInterrupt:
pass
else:
pytest.fail("did not raise")
class TestExecutionForked(BaseFunctionalTests):
pytestmark = pytest.mark.skipif("not hasattr(os, 'fork')")
def getrunner(self):
# XXX re-arrange this test to live in pytest-xdist
boxed = pytest.importorskip("xdist.boxed")
return boxed.forked_run_report
def test_suicide(self, testdir):
reports = testdir.runitem("""
def test_func():
import os
os.kill(os.getpid(), 15)
""")
rep = reports[0]
assert rep.failed
assert rep.when == "???"
class TestSessionReports:
def test_collect_result(self, testdir):
col = testdir.getmodulecol("""
def test_func1():
pass
class TestClass:
pass
""")
rep = runner.collect_one_node(col)
assert not rep.failed
assert not rep.skipped
assert rep.passed
locinfo = rep.location
assert locinfo[0] == col.fspath.basename
assert not locinfo[1]
assert locinfo[2] == col.fspath.basename
res = rep.result
assert len(res) == 2
assert res[0].name == "test_func1"
assert res[1].name == "TestClass"
def test_skip_at_module_scope(self, testdir):
col = testdir.getmodulecol("""
import pytest
pytest.skip("hello")
def test_func():
pass
""")
rep = main.collect_one_node(col)
assert not rep.failed
assert not rep.passed
assert rep.skipped
reporttypes = [
runner.BaseReport,
runner.TestReport,
runner.TeardownErrorReport,
runner.CollectReport,
]
@pytest.mark.parametrize('reporttype', reporttypes, ids=[x.__name__ for x in reporttypes])
def test_report_extra_parameters(reporttype):
if hasattr(py.std.inspect, 'signature'):
args = list(py.std.inspect.signature(reporttype.__init__).parameters.keys())[1:]
else:
args = py.std.inspect.getargspec(reporttype.__init__)[0][1:]
basekw = dict.fromkeys(args, [])
report = reporttype(newthing=1, **basekw)
assert report.newthing == 1
def test_callinfo():
ci = runner.CallInfo(lambda: 0, '123')
assert ci.when == "123"
assert ci.result == 0
assert "result" in repr(ci)
ci = runner.CallInfo(lambda: 0/0, '123')
assert ci.when == "123"
assert not hasattr(ci, 'result')
assert ci.excinfo
assert "exc" in repr(ci)
# design question: do we want general hooks in python files?
# then something like the following functional tests makes sense
@pytest.mark.xfail
def test_runtest_in_module_ordering(testdir):
p1 = testdir.makepyfile("""
def pytest_runtest_setup(item): # runs after class-level!
item.function.mylist.append("module")
class TestClass:
def pytest_runtest_setup(self, item):
assert not hasattr(item.function, 'mylist')
item.function.mylist = ['class']
def pytest_funcarg__mylist(self, request):
return request.function.mylist
def pytest_runtest_call(self, item, __multicall__):
try:
__multicall__.execute()
except ValueError:
pass
def test_hello1(self, mylist):
assert mylist == ['class', 'module'], mylist
raise ValueError()
def test_hello2(self, mylist):
assert mylist == ['class', 'module'], mylist
def pytest_runtest_teardown(item):
del item.function.mylist
""")
result = testdir.runpytest(p1)
result.stdout.fnmatch_lines([
"*2 passed*"
])
def test_outcomeexception_exceptionattributes():
outcome = runner.OutcomeException('test')
assert outcome.args[0] == outcome.msg
def test_pytest_exit():
try:
pytest.exit("hello")
except pytest.exit.Exception:
excinfo = _pytest._code.ExceptionInfo()
assert excinfo.errisinstance(KeyboardInterrupt)
def test_pytest_fail():
try:
pytest.fail("hello")
except pytest.fail.Exception:
excinfo = _pytest._code.ExceptionInfo()
s = excinfo.exconly(tryshort=True)
assert s.startswith("Failed")
def test_pytest_fail_notrace(testdir):
testdir.makepyfile("""
import pytest
def test_hello():
pytest.fail("hello", pytrace=False)
def teardown_function(function):
pytest.fail("world", pytrace=False)
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines([
"world",
"hello",
])
assert 'def teardown_function' not in result.stdout.str()
@pytest.mark.parametrize('str_prefix', ['u', ''])
def test_pytest_fail_notrace_non_ascii(testdir, str_prefix):
"""Fix pytest.fail with pytrace=False with non-ascii characters (#1178).
This tests with native and unicode strings containing non-ascii chars.
"""
testdir.makepyfile(u"""
# coding: utf-8
import pytest
def test_hello():
pytest.fail(%s'oh oh: ☺', pytrace=False)
""" % str_prefix)
result = testdir.runpytest()
if sys.version_info[0] >= 3:
result.stdout.fnmatch_lines(['*test_hello*', "oh oh: ☺"])
else:
result.stdout.fnmatch_lines(['*test_hello*', "oh oh: *"])
assert 'def test_hello' not in result.stdout.str()
def test_pytest_no_tests_collected_exit_status(testdir):
result = testdir.runpytest()
result.stdout.fnmatch_lines('*collected 0 items*')
assert result.ret == main.EXIT_NOTESTSCOLLECTED
testdir.makepyfile(test_foo="""
def test_foo():
assert 1
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines('*collected 1 items*')
result.stdout.fnmatch_lines('*1 passed*')
assert result.ret == main.EXIT_OK
result = testdir.runpytest('-k nonmatch')
result.stdout.fnmatch_lines('*collected 1 items*')
result.stdout.fnmatch_lines('*1 deselected*')
assert result.ret == main.EXIT_NOTESTSCOLLECTED
def test_exception_printing_skip():
try:
pytest.skip("hello")
except pytest.skip.Exception:
excinfo = _pytest._code.ExceptionInfo()
s = excinfo.exconly(tryshort=True)
assert s.startswith("Skipped")
def test_importorskip(monkeypatch):
importorskip = pytest.importorskip
def f():
importorskip("asdlkj")
try:
sys = importorskip("sys") # noqa
assert sys == py.std.sys
#path = pytest.importorskip("os.path")
#assert path == py.std.os.path
excinfo = pytest.raises(pytest.skip.Exception, f)
path = py.path.local(excinfo.getrepr().reprcrash.path)
# check that importorskip reports the actual call
# in this test the test_runner.py file
assert path.purebasename == "test_runner"
pytest.raises(SyntaxError, "pytest.importorskip('x y z')")
pytest.raises(SyntaxError, "pytest.importorskip('x=y')")
mod = py.std.types.ModuleType("hello123")
mod.__version__ = "1.3"
monkeypatch.setitem(sys.modules, "hello123", mod)
pytest.raises(pytest.skip.Exception, """
pytest.importorskip("hello123", minversion="1.3.1")
""")
mod2 = pytest.importorskip("hello123", minversion="1.3")
assert mod2 == mod
except pytest.skip.Exception:
print(_pytest._code.ExceptionInfo())
pytest.fail("spurious skip")
def test_importorskip_imports_last_module_part():
ospath = pytest.importorskip("os.path")
assert os.path == ospath
def test_importorskip_dev_module(monkeypatch):
try:
mod = py.std.types.ModuleType("mockmodule")
mod.__version__ = '0.13.0.dev-43290'
monkeypatch.setitem(sys.modules, 'mockmodule', mod)
mod2 = pytest.importorskip('mockmodule', minversion='0.12.0')
assert mod2 == mod
pytest.raises(pytest.skip.Exception, """
pytest.importorskip('mockmodule1', minversion='0.14.0')""")
except pytest.skip.Exception:
print(_pytest._code.ExceptionInfo())
pytest.fail("spurious skip")
def test_pytest_cmdline_main(testdir):
p = testdir.makepyfile("""
import pytest
def test_hello():
assert 1
if __name__ == '__main__':
pytest.cmdline.main([__file__])
""")
import subprocess
popen = subprocess.Popen([sys.executable, str(p)], stdout=subprocess.PIPE)
popen.communicate()
ret = popen.wait()
assert ret == 0
def test_unicode_in_longrepr(testdir):
testdir.makeconftest("""
import py
def pytest_runtest_makereport(__multicall__):
rep = __multicall__.execute()
if rep.when == "call":
rep.longrepr = py.builtin._totext("\\xc3\\xa4", "utf8")
return rep
""")
testdir.makepyfile("""
def test_out():
assert 0
""")
result = testdir.runpytest()
assert result.ret == 1
assert "UnicodeEncodeError" not in result.stderr.str()
def test_failure_in_setup(testdir):
testdir.makepyfile("""
def setup_module():
0/0
def test_func():
pass
""")
result = testdir.runpytest("--tb=line")
assert "def setup_module" not in result.stdout.str()
def test_makereport_getsource(testdir):
testdir.makepyfile("""
def test_foo():
if False: pass
else: assert False
""")
result = testdir.runpytest()
assert 'INTERNALERROR' not in result.stdout.str()
result.stdout.fnmatch_lines(['*else: assert False*'])
def test_makereport_getsource_dynamic_code(testdir, monkeypatch):
"""Test that exception in dynamically generated code doesn't break getting the source line."""
import inspect
original_findsource = inspect.findsource
def findsource(obj, *args, **kwargs):
# Can be triggered by dynamically created functions
if obj.__name__ == 'foo':
raise IndexError()
return original_findsource(obj, *args, **kwargs)
monkeypatch.setattr(inspect, 'findsource', findsource)
testdir.makepyfile("""
import pytest
@pytest.fixture
def foo(missing):
pass
def test_fix(foo):
assert False
""")
result = testdir.runpytest('-vv')
assert 'INTERNALERROR' not in result.stdout.str()
result.stdout.fnmatch_lines(["*test_fix*", "*fixture*'missing'*not found*"])
def test_store_except_info_on_eror():
""" Test that upon test failure, the exception info is stored on
sys.last_traceback and friends.
"""
# Simulate item that raises a specific exception
class ItemThatRaises:
def runtest(self):
raise IndexError('TEST')
try:
runner.pytest_runtest_call(ItemThatRaises())
except IndexError:
pass
# Check that exception info is stored on sys
assert sys.last_type is IndexError
assert sys.last_value.args[0] == 'TEST'
assert sys.last_traceback
| mpl-2.0 |
staystatic/staystatic | nikola/conf.py | 1 | 1050 | from __future__ import unicode_literals
import yaml
BLOG_AUTHOR = "Stay Static" # (translatable)
BLOG_TITLE = "Nikola Stay Static Sample" # (translatable)
SITE_URL = "http://staystatic.github.io/sites/nikola/"
BLOG_EMAIL = "n.tesla@example.com"
BLOG_DESCRIPTION = "Nikola demo for Stay Static" # (translatable)
COMMENT_SYSTEM = None
DEFAULT_LANG = "en"
TRANSLATIONS = {
DEFAULT_LANG: "",
}
NAVIGATION_LINKS = {}
THEME = "base"
THEME_COLOR = '#5670d4'
POSTS = (
("posts/*.md", "posts", "post.tmpl"),
("posts/*.txt", "posts", "post.tmpl"),
)
PAGES = (
("stories/*.md", "", "story.tmpl"),
)
TIMEZONE = "UTC"
COMPILERS = {
"rest": ('.rst', '.txt'),
"markdown": ('.md', '.mdown', '.markdown'),
}
INDEX_PATH = "posts"
COPY_SOURCES = False
SHOW_SOURCELINK = False
PRETTY_URLS = False
DISABLED_PLUGINS = ["robots"]
GLOBAL_CONTEXT = {}
WRITE_TAG_CLOUD = False
GENERATE_RSS = False
DISABLED_PLUGINS = ['classify_page_index', 'classify_sections', 'classify_indexes', 'classify_archive', 'tags', 'sitemap', 'robots', 'create_bundles']
| cc0-1.0 |
sopier/django | django/utils/http.py | 285 | 9978 | from __future__ import unicode_literals
import base64
import calendar
import datetime
import re
import sys
import unicodedata
from binascii import Error as BinasciiError
from email.utils import formatdate
from django.utils import six
from django.utils.datastructures import MultiValueDict
from django.utils.encoding import force_bytes, force_str, force_text
from django.utils.functional import allow_lazy
from django.utils.six.moves.urllib.parse import (
quote, quote_plus, unquote, unquote_plus, urlencode as original_urlencode,
urlparse,
)
ETAG_MATCH = re.compile(r'(?:W/)?"((?:\\.|[^"])*)"')
MONTHS = 'jan feb mar apr may jun jul aug sep oct nov dec'.split()
__D = r'(?P<day>\d{2})'
__D2 = r'(?P<day>[ \d]\d)'
__M = r'(?P<mon>\w{3})'
__Y = r'(?P<year>\d{4})'
__Y2 = r'(?P<year>\d{2})'
__T = r'(?P<hour>\d{2}):(?P<min>\d{2}):(?P<sec>\d{2})'
RFC1123_DATE = re.compile(r'^\w{3}, %s %s %s %s GMT$' % (__D, __M, __Y, __T))
RFC850_DATE = re.compile(r'^\w{6,9}, %s-%s-%s %s GMT$' % (__D, __M, __Y2, __T))
ASCTIME_DATE = re.compile(r'^\w{3} %s %s %s %s$' % (__M, __D2, __T, __Y))
RFC3986_GENDELIMS = str(":/?#[]@")
RFC3986_SUBDELIMS = str("!$&'()*+,;=")
PROTOCOL_TO_PORT = {
'http': 80,
'https': 443,
}
def urlquote(url, safe='/'):
"""
A version of Python's urllib.quote() function that can operate on unicode
strings. The url is first UTF-8 encoded before quoting. The returned string
can safely be used as part of an argument to a subsequent iri_to_uri() call
without double-quoting occurring.
"""
return force_text(quote(force_str(url), force_str(safe)))
urlquote = allow_lazy(urlquote, six.text_type)
def urlquote_plus(url, safe=''):
"""
A version of Python's urllib.quote_plus() function that can operate on
unicode strings. The url is first UTF-8 encoded before quoting. The
returned string can safely be used as part of an argument to a subsequent
iri_to_uri() call without double-quoting occurring.
"""
return force_text(quote_plus(force_str(url), force_str(safe)))
urlquote_plus = allow_lazy(urlquote_plus, six.text_type)
def urlunquote(quoted_url):
"""
A wrapper for Python's urllib.unquote() function that can operate on
the result of django.utils.http.urlquote().
"""
return force_text(unquote(force_str(quoted_url)))
urlunquote = allow_lazy(urlunquote, six.text_type)
def urlunquote_plus(quoted_url):
"""
A wrapper for Python's urllib.unquote_plus() function that can operate on
the result of django.utils.http.urlquote_plus().
"""
return force_text(unquote_plus(force_str(quoted_url)))
urlunquote_plus = allow_lazy(urlunquote_plus, six.text_type)
def urlencode(query, doseq=0):
"""
A version of Python's urllib.urlencode() function that can operate on
unicode strings. The parameters are first cast to UTF-8 encoded strings and
then encoded as per normal.
"""
if isinstance(query, MultiValueDict):
query = query.lists()
elif hasattr(query, 'items'):
query = query.items()
return original_urlencode(
[(force_str(k),
[force_str(i) for i in v] if isinstance(v, (list, tuple)) else force_str(v))
for k, v in query],
doseq)
def cookie_date(epoch_seconds=None):
"""
Formats the time to ensure compatibility with Netscape's cookie standard.
Accepts a floating point number expressed in seconds since the epoch, in
UTC - such as that outputted by time.time(). If set to None, defaults to
the current time.
Outputs a string in the format 'Wdy, DD-Mon-YYYY HH:MM:SS GMT'.
"""
rfcdate = formatdate(epoch_seconds)
return '%s-%s-%s GMT' % (rfcdate[:7], rfcdate[8:11], rfcdate[12:25])
def http_date(epoch_seconds=None):
"""
Formats the time to match the RFC1123 date format as specified by HTTP
RFC2616 section 3.3.1.
Accepts a floating point number expressed in seconds since the epoch, in
UTC - such as that outputted by time.time(). If set to None, defaults to
the current time.
Outputs a string in the format 'Wdy, DD Mon YYYY HH:MM:SS GMT'.
"""
return formatdate(epoch_seconds, usegmt=True)
def parse_http_date(date):
"""
Parses a date format as specified by HTTP RFC2616 section 3.3.1.
The three formats allowed by the RFC are accepted, even if only the first
one is still in widespread use.
Returns an integer expressed in seconds since the epoch, in UTC.
"""
# emails.Util.parsedate does the job for RFC1123 dates; unfortunately
# RFC2616 makes it mandatory to support RFC850 dates too. So we roll
# our own RFC-compliant parsing.
for regex in RFC1123_DATE, RFC850_DATE, ASCTIME_DATE:
m = regex.match(date)
if m is not None:
break
else:
raise ValueError("%r is not in a valid HTTP date format" % date)
try:
year = int(m.group('year'))
if year < 100:
if year < 70:
year += 2000
else:
year += 1900
month = MONTHS.index(m.group('mon').lower()) + 1
day = int(m.group('day'))
hour = int(m.group('hour'))
min = int(m.group('min'))
sec = int(m.group('sec'))
result = datetime.datetime(year, month, day, hour, min, sec)
return calendar.timegm(result.utctimetuple())
except Exception:
six.reraise(ValueError, ValueError("%r is not a valid date" % date), sys.exc_info()[2])
def parse_http_date_safe(date):
"""
Same as parse_http_date, but returns None if the input is invalid.
"""
try:
return parse_http_date(date)
except Exception:
pass
# Base 36 functions: useful for generating compact URLs
def base36_to_int(s):
"""
Converts a base 36 string to an ``int``. Raises ``ValueError` if the
input won't fit into an int.
"""
# To prevent overconsumption of server resources, reject any
# base36 string that is long than 13 base36 digits (13 digits
# is sufficient to base36-encode any 64-bit integer)
if len(s) > 13:
raise ValueError("Base36 input too large")
value = int(s, 36)
# ... then do a final check that the value will fit into an int to avoid
# returning a long (#15067). The long type was removed in Python 3.
if six.PY2 and value > sys.maxint:
raise ValueError("Base36 input too large")
return value
def int_to_base36(i):
"""
Converts an integer to a base36 string
"""
char_set = '0123456789abcdefghijklmnopqrstuvwxyz'
if i < 0:
raise ValueError("Negative base36 conversion input.")
if six.PY2:
if not isinstance(i, six.integer_types):
raise TypeError("Non-integer base36 conversion input.")
if i > sys.maxint:
raise ValueError("Base36 conversion input too large.")
if i < 36:
return char_set[i]
b36 = ''
while i != 0:
i, n = divmod(i, 36)
b36 = char_set[n] + b36
return b36
def urlsafe_base64_encode(s):
"""
Encodes a bytestring in base64 for use in URLs, stripping any trailing
equal signs.
"""
return base64.urlsafe_b64encode(s).rstrip(b'\n=')
def urlsafe_base64_decode(s):
"""
Decodes a base64 encoded string, adding back any trailing equal signs that
might have been stripped.
"""
s = force_bytes(s)
try:
return base64.urlsafe_b64decode(s.ljust(len(s) + len(s) % 4, b'='))
except (LookupError, BinasciiError) as e:
raise ValueError(e)
def parse_etags(etag_str):
"""
Parses a string with one or several etags passed in If-None-Match and
If-Match headers by the rules in RFC 2616. Returns a list of etags
without surrounding double quotes (") and unescaped from \<CHAR>.
"""
etags = ETAG_MATCH.findall(etag_str)
if not etags:
# etag_str has wrong format, treat it as an opaque string then
return [etag_str]
etags = [e.encode('ascii').decode('unicode_escape') for e in etags]
return etags
def quote_etag(etag):
"""
Wraps a string in double quotes escaping contents as necessary.
"""
return '"%s"' % etag.replace('\\', '\\\\').replace('"', '\\"')
def same_origin(url1, url2):
"""
Checks if two URLs are 'same-origin'
"""
p1, p2 = urlparse(url1), urlparse(url2)
try:
o1 = (p1.scheme, p1.hostname, p1.port or PROTOCOL_TO_PORT[p1.scheme])
o2 = (p2.scheme, p2.hostname, p2.port or PROTOCOL_TO_PORT[p2.scheme])
return o1 == o2
except (ValueError, KeyError):
return False
def is_safe_url(url, host=None):
"""
Return ``True`` if the url is a safe redirection (i.e. it doesn't point to
a different host and uses a safe scheme).
Always returns ``False`` on an empty url.
"""
if url is not None:
url = url.strip()
if not url:
return False
# Chrome treats \ completely as /
url = url.replace('\\', '/')
# Chrome considers any URL with more than two slashes to be absolute, but
# urlparse is not so flexible. Treat any url with three slashes as unsafe.
if url.startswith('///'):
return False
url_info = urlparse(url)
# Forbid URLs like http:///example.com - with a scheme, but without a hostname.
# In that URL, example.com is not the hostname but, a path component. However,
# Chrome will still consider example.com to be the hostname, so we must not
# allow this syntax.
if not url_info.netloc and url_info.scheme:
return False
# Forbid URLs that start with control characters. Some browsers (like
# Chrome) ignore quite a few control characters at the start of a
# URL and might consider the URL as scheme relative.
if unicodedata.category(url[0])[0] == 'C':
return False
return ((not url_info.netloc or url_info.netloc == host) and
(not url_info.scheme or url_info.scheme in ['http', 'https']))
| bsd-3-clause |
chrish42/pylearn | pylearn2/utils/tests/test_serial.py | 44 | 4438 | """
Tests for the pylearn2.utils.serial module. Currently only tests
read_bin_lush_matrix and load_train_file methods.
"""
from theano.compat.six.moves import xrange
import pylearn2
from pylearn2.utils.serial import read_bin_lush_matrix, load_train_file
import numpy as np
pylearn2_path = pylearn2.__path__[0]
example_bin_lush_path = pylearn2_path + '/utils/tests/example_bin_lush/'
yaml_path = pylearn2_path + '/utils/tests/'
def test_read_bin_lush_matrix_ubyte_scalar():
"""
Read data from a lush file with uint8 data (scalar).
Note: When you write a scalar from Koray's matlab code it always makes
everything 3D. Writing it straight from lush you might be able to get
a true scalar
"""
path = example_bin_lush_path + 'ubyte_scalar.lushbin'
result = read_bin_lush_matrix(path)
assert str(result.dtype) == 'uint8'
assert len(result.shape) == 3
assert result.shape[0] == 1
assert result.shape[1] == 1
assert result.shape[1] == 1
assert result[0, 0] == 12
def test_read_bin_lush_matrix_ubyte_3tensor():
"""
Read data from a lush file with uint8 data (3D-tensor)
"""
path = example_bin_lush_path + 'ubyte_3tensor.lushbin'
result = read_bin_lush_matrix(path)
assert str(result.dtype) == 'uint8'
assert len(result.shape) == 3
if result.shape != (2, 3, 4):
raise AssertionError(
"ubyte_3tensor.lushbin stores a 3-tensor "
"of shape (2,3,4), but read_bin_lush_matrix thinks it has "
"shape " + str(result.shape)
)
for i in xrange(1, 3):
for j in xrange(1, 4):
for k in xrange(1, 5):
assert result[i-1, j-1, k-1] == i + 3 * j + 12 * k
def test_read_bin_lush_matrix_int_3tensor():
"""
Read data from a lush file with int32 data (3D-tensor)
"""
path = example_bin_lush_path + 'int_3tensor.lushbin'
result = read_bin_lush_matrix(path)
assert str(result.dtype) == 'int32'
assert len(result.shape) == 3
if result.shape != (3, 2, 4):
raise AssertionError(
"ubyte_3tensor.lushbin stores a 3-tensor "
"of shape (3,2,4), but read_bin_lush_matrix thinks it has "
"shape " + str(result.shape)
)
for i in xrange(1, result.shape[0]+1):
for j in xrange(1, result.shape[1]+1):
for k in xrange(1, result.shape[2]+1):
assert (result[i - 1, j - 1, k - 1] ==
(i + 10000 ** j) * ((-2) ** k))
def test_read_bin_lush_matrix_float_3tensor():
"""
Read data from a lush file with float32 data (3D-tensor)
"""
path = example_bin_lush_path + 'float_3tensor.lushbin'
result = read_bin_lush_matrix(path)
assert str(result.dtype) == 'float32'
assert len(result.shape) == 3
if result.shape != (4, 3, 2):
raise AssertionError(
"ubyte_3tensor.lushbin stores a 3-tensor "
"of shape (4,3,2), but read_bin_lush_matrix thinks it has "
"shape " + str(result.shape)
)
for i in xrange(1, result.shape[0] + 1):
for j in xrange(1, result.shape[1] + 1):
for k in xrange(1, result.shape[2] + 1):
assert np.allclose(result[i - 1, j - 1, k - 1],
i + 1.5 * j + 1.7 * k)
def test_read_bin_lush_matrix_double_3tensor():
"""
Read data from a lush file with float64 data (3D-tensor)
"""
path = example_bin_lush_path + 'double_3tensor.lushbin'
result = read_bin_lush_matrix(path)
assert str(result.dtype) == 'float64'
assert len(result.shape) == 3
if result.shape != (4, 2, 3):
raise AssertionError(
"ubyte_3tensor.lushbin stores a 3-tensor "
"of shape (4,2,3), but read_bin_lush_matrix thinks it has "
"shape " + str(result.shape)
)
for i in xrange(1, result.shape[0]+1):
for j in xrange(1, result.shape[1]+1):
for k in xrange(1, result.shape[2]+1):
assert np.allclose(result[i - 1, j - 1, k - 1],
i + 1.5 * j + (-1.7) ** k)
def test_load_train_file():
"""
Loads a YAML file with and without environment variables.
"""
environ = {
'PYLEARN2_DATA_PATH': '/just/a/test/path/'
}
load_train_file(yaml_path + 'test_model.yaml')
load_train_file(yaml_path + 'test_model.yaml', environ=environ)
| bsd-3-clause |
dfunckt/django | tests/admin_registration/tests.py | 46 | 4401 | from __future__ import unicode_literals
from django.contrib import admin
from django.contrib.admin.decorators import register
from django.contrib.admin.sites import site
from django.core.exceptions import ImproperlyConfigured
from django.test import SimpleTestCase
from .models import Location, Person, Place, Traveler
class NameAdmin(admin.ModelAdmin):
list_display = ['name']
save_on_top = True
class CustomSite(admin.AdminSite):
pass
class TestRegistration(SimpleTestCase):
def setUp(self):
self.site = admin.AdminSite()
def test_bare_registration(self):
self.site.register(Person)
self.assertIsInstance(self.site._registry[Person], admin.ModelAdmin)
def test_registration_with_model_admin(self):
self.site.register(Person, NameAdmin)
self.assertIsInstance(self.site._registry[Person], NameAdmin)
def test_prevent_double_registration(self):
self.site.register(Person)
with self.assertRaises(admin.sites.AlreadyRegistered):
self.site.register(Person)
def test_registration_with_star_star_options(self):
self.site.register(Person, search_fields=['name'])
self.assertEqual(self.site._registry[Person].search_fields, ['name'])
def test_star_star_overrides(self):
self.site.register(Person, NameAdmin, search_fields=["name"], list_display=['__str__'])
self.assertEqual(self.site._registry[Person].search_fields, ['name'])
self.assertEqual(self.site._registry[Person].list_display, ['__str__'])
self.assertTrue(self.site._registry[Person].save_on_top)
def test_iterable_registration(self):
self.site.register([Person, Place], search_fields=['name'])
self.assertIsInstance(self.site._registry[Person], admin.ModelAdmin)
self.assertEqual(self.site._registry[Person].search_fields, ['name'])
self.assertIsInstance(self.site._registry[Place], admin.ModelAdmin)
self.assertEqual(self.site._registry[Place].search_fields, ['name'])
def test_abstract_model(self):
"""
Exception is raised when trying to register an abstract model.
Refs #12004.
"""
with self.assertRaises(ImproperlyConfigured):
self.site.register(Location)
def test_is_registered_model(self):
"Checks for registered models should return true."
self.site.register(Person)
self.assertTrue(self.site.is_registered(Person))
def test_is_registered_not_registered_model(self):
"Checks for unregistered models should return false."
self.assertFalse(self.site.is_registered(Person))
class TestRegistrationDecorator(SimpleTestCase):
"""
Tests the register decorator in admin.decorators
For clarity:
@register(Person)
class AuthorAdmin(ModelAdmin):
pass
is functionally equal to (the way it is written in these tests):
AuthorAdmin = register(Person)(AuthorAdmin)
"""
def setUp(self):
self.default_site = site
self.custom_site = CustomSite()
def test_basic_registration(self):
register(Person)(NameAdmin)
self.assertIsInstance(self.default_site._registry[Person], admin.ModelAdmin)
self.default_site.unregister(Person)
def test_custom_site_registration(self):
register(Person, site=self.custom_site)(NameAdmin)
self.assertIsInstance(self.custom_site._registry[Person], admin.ModelAdmin)
def test_multiple_registration(self):
register(Traveler, Place)(NameAdmin)
self.assertIsInstance(self.default_site._registry[Traveler], admin.ModelAdmin)
self.default_site.unregister(Traveler)
self.assertIsInstance(self.default_site._registry[Place], admin.ModelAdmin)
self.default_site.unregister(Place)
def test_wrapped_class_not_a_model_admin(self):
with self.assertRaisesMessage(ValueError, 'Wrapped class must subclass ModelAdmin.'):
register(Person)(CustomSite)
def test_custom_site_not_an_admin_site(self):
with self.assertRaisesMessage(ValueError, 'site must subclass AdminSite'):
register(Person, site=Traveler)(NameAdmin)
def test_empty_models_list_registration_fails(self):
with self.assertRaisesMessage(ValueError, 'At least one model must be passed to register.'):
register()(NameAdmin)
| bsd-3-clause |
FRTNX/grassroot-learning | grassroot-nlu/distance.py | 1 | 1974 | import argparse
import numpy as np
import sys
N = 100; # number of words to return
def generate():
with open('vocab.txt', 'r') as f:
words = [x.rstrip().split(' ')[0] for x in f.readlines()]
with open('vectors.txt', 'r') as f:
vectors = {}
for line in f:
vals = line.rstrip().split(' ')
vectors[vals[0]] = [float(x) for x in vals[1:]]
vocab_size = len(words)
vocab = {w: idx for idx, w in enumerate(words)}
ivocab = {idx: w for idx, w in enumerate(words)}
vector_dim = len(vectors[ivocab[0]])
W = np.zeros((vocab_size, vector_dim))
for word, v in vectors.items():
if word == '<unk>':
continue
W[vocab[word], :] = v
# normalize each word vector to unit variance
W_norm = np.zeros(W.shape)
d = (np.sum(W ** 2, 1) ** (0.5))
W_norm = (W.T / d).T
return (W_norm, vocab, ivocab)
W, vocab, ivocab = generate()
def distance(input_term, W=W, vocab=vocab, ivocab=ivocab):
for idx, term in enumerate(input_term.split(' ')):
if term in vocab:
print('Word: %s Position in vocabulary: %i' % (term, vocab[term]))
if idx == 0:
vec_result = np.copy(W[vocab[term], :])
else:
vec_result += W[vocab[term], :]
else:
print('Word: %s Out of dictionary!\n' % term)
return
vec_norm = np.zeros(vec_result.shape)
d = (np.sum(vec_result ** 2,) ** (0.5))
vec_norm = (vec_result.T / d).T
dist = np.dot(W, vec_norm.T)
for term in input_term.split(' '):
index = vocab[term]
dist[index] = -np.Inf
a = np.argsort(-dist)[:N]
f = open('grassroot-universe-terms.txt', 'r')
universe = f.read().split()
ret_val = {}
for x in a:
if ivocab[x] in universe:
new_val = {ivocab[x]: dist[x]}
ret_val = {**ret_val, **new_val}
return ret_val
| bsd-3-clause |
AOSP-S4-KK/platform_external_chromium_org | chrome/common/extensions/docs/server2/host_file_system_provider_test.py | 23 | 1704 | #!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from copy import deepcopy
import unittest
from extensions_paths import API
from file_system import FileNotFoundError
from host_file_system_provider import HostFileSystemProvider
from object_store_creator import ObjectStoreCreator
from test_data.canned_data import CANNED_API_FILE_SYSTEM_DATA
from test_file_system import TestFileSystem
class HostFileSystemProviderTest(unittest.TestCase):
def setUp(self):
self._idle_path = '%s/idle.json' % API
self._canned_data = deepcopy(CANNED_API_FILE_SYSTEM_DATA)
def _constructor_for_test(self, branch, **optargs):
return TestFileSystem(self._canned_data[branch])
def testWithCaching(self):
creator = HostFileSystemProvider(
ObjectStoreCreator.ForTest(),
constructor_for_test=self._constructor_for_test)
fs = creator.GetBranch('1500')
first_read = fs.ReadSingle(self._idle_path).Get()
self._canned_data['1500']['chrome']['common']['extensions'].get('api'
)['idle.json'] = 'blah blah blah'
second_read = fs.ReadSingle(self._idle_path).Get()
self.assertEqual(first_read, second_read)
def testWithOffline(self):
creator = HostFileSystemProvider(
ObjectStoreCreator.ForTest(),
offline=True,
constructor_for_test=self._constructor_for_test)
fs = creator.GetBranch('1500')
# Offline file system should raise a FileNotFoundError if read is attempted.
self.assertRaises(FileNotFoundError, fs.ReadSingle(self._idle_path).Get)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
ldoktor/autotest | client/shared/progressbar.py | 7 | 2575 | """
Basic text progress bar without fancy curses features
"""
__all__ = ['ProgressBar']
class ProgressBar:
'''
Displays interactively the progress of a given task
Inspired/adapted from code.activestate.com recipe #168639
'''
DEFAULT_WIDTH = 77
def __init__(self, minimum=0, maximum=100, width=DEFAULT_WIDTH, title=''):
'''
Initializes a new progress bar
@type mininum: integer
@param mininum: mininum (initial) value on the progress bar
@type maximum: integer
@param maximum: maximum (final) value on the progress bar
@type width: integer
@param with: number of columns, that is screen width
'''
assert maximum > minimum
self.minimum = minimum
self.maximum = maximum
self.range = maximum - minimum
self.width = width
self.title = title
self.current_amount = minimum
self.update(minimum)
def increment(self, increment, update_screen=True):
'''
Increments the current amount value
'''
self.update(self.current_amount + increment, update_screen)
def update(self, amount, update_screen=True):
'''
Performs sanity checks and update the current amount
'''
if amount < self.minimum: amount = self.minimum
if amount > self.maximum: amount = self.maximum
self.current_amount = amount
if update_screen:
self.update_screen()
def get_screen_text(self):
'''
Builds the actual progress bar text
'''
diff = float(self.current_amount - self.minimum)
done = (diff / float(self.range)) * 100.0
done = int(round(done))
all = self.width - 2
hashes = (done / 100.0) * all
hashes = int(round(hashes))
hashes_text = '#' * hashes
spaces_text = ' ' * (all - hashes)
screen_text = "[%s%s]" % (hashes_text, spaces_text)
percent_text = "%s%%" % done
percent_text_len = len(percent_text)
percent_position = (len(screen_text) / 2) - percent_text_len
screen_text = (screen_text[:percent_position] + percent_text +
screen_text[percent_position + percent_text_len:])
if self.title:
screen_text = '%s: %s' % (self.title,
screen_text)
return screen_text
def update_screen(self):
'''
Prints the updated text to the screen
'''
print self.get_screen_text(), '\r',
| gpl-2.0 |
NeptuneFramework/neptune | neptune/server.py | 1 | 2153 | import os
from socket import (
AF_INET,
SO_REUSEADDR,
SOCK_STREAM,
SOL_SOCKET,
socket
)
from neptune.handler import NRequest
from neptune.adapter import NAdapter
from neptune.router import NRouter
from neptune.session import NSession
class NServer(object):
"""
Neptune Server
"""
def __init__(self, host='', port=7500):
self.nsocket = socket(AF_INET, SOCK_STREAM)
self.nsocket.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
self.host = host
self.port = port
self.nsocket.bind((self.host, self.port))
# TODO: listen here or in run
self.nsocket.listen(3)
self.router = NRouter()
self.session = NSession()
def _process_request(self, request):
route = request.route
method = request.method.lower()
# TODO: Option to add decorators
try:
view_cls = self.router.get_cls(route)
setattr(view_cls, 'request', request)
view_func = getattr(view_cls, method)
return view_func()
except Exception as e:
print(str(e))
def run(self):
"""
The run hanlder
"""
print('Listening on port {}'.format(self.port))
while True:
connection, address = self.nsocket.accept()
print('Got connection from ', address) # Move it to if self.debug
data_recv = connection.recv(4096).decode() # why 4096 ? Think of better variable name too
request = NRequest(data_recv)
# if session_id cookie is there in request,
# set, self.session.curr_sess_id = that_id
if request.cookies.get('session_id'):
# Decrypt if encrypted etc.
self.session.curr_sess_id = request.cookies['session_id']
response = self._process_request(request)
# TODO: Add encryption
if self.session.used:
response.set_cookie(self.session.key, self.session.curr_sess_id)
self.session.clear_curr_sess()
connection.sendall(response.encoded())
connection.close()
| apache-2.0 |
nadgowdas/cargo | agent/codes.py | 1 | 1155 | #
#Copyright IBM Corporation 2015.
#LICENSE: Apache License 2.0 http://opensource.org/licenses/Apache-2.0
"""
Constant variables library
This library holds constants for all the return codes of cargo System.
And possible HTTP code constants in the context of this system.
Available Functions:
-herror: Mapping function from PubSub return codes to standard HTTP status codes
"""
SUCCESS = 0
FAILED = 1
IGNORE_MSG = 2
DUP_REQUEST = 3
NOT_FOUND = 4
NO_MSG_READY = 5
BAD_REQ = 9
HTTP_SUCCESS = 200
HTTP_CREATED = 201
HTTP_NO_CONTENT = 204
HTTP_BAD_REQUEST = 400
HTTP_NOT_FOUND = 404
HTTP_CONFLICT = 409
HTTP_INTERNAL_ERR = 500
def herror(rc):
"""
Map the cargo systems code to the standard HTTP return status code
Args:
rc: cargo system's return code constant
Returns:
corresponding HTTP status code
"""
if rc in (SUCCESS, IGNORE_MSG, DUP_REQUEST):
return HTTP_SUCCESS
if rc == NOT_FOUND:
return HTTP_NOT_FOUND
if rc == NO_MSG_READY:
return HTTP_NO_CONTENT
if rc == BAD_REQ:
return HTTP_BAD_REQUEST
return HTTP_INTERNAL_ERR
| apache-2.0 |
toshywoshy/ansible | lib/ansible/plugins/inventory/docker_swarm.py | 37 | 11922 | # -*- coding: utf-8 -*-
# Copyright (c) 2018, Stefan Heitmueller <stefan.heitmueller@gmx.com>
# Copyright (c) 2018 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
name: docker_swarm
plugin_type: inventory
version_added: '2.8'
author:
- Stefan Heitmüller (@morph027) <stefan.heitmueller@gmx.com>
short_description: Ansible dynamic inventory plugin for Docker swarm nodes.
requirements:
- python >= 2.7
- L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.10.0
extends_documentation_fragment:
- constructed
description:
- Reads inventories from the Docker swarm API.
- Uses a YAML configuration file docker_swarm.[yml|yaml].
- "The plugin returns following groups of swarm nodes: I(all) - all hosts; I(workers) - all worker nodes;
I(managers) - all manager nodes; I(leader) - the swarm leader node;
I(nonleaders) - all nodes except the swarm leader."
options:
plugin:
description: The name of this plugin, it should always be set to C(docker_swarm) for this plugin to
recognize it as it's own.
type: str
required: true
choices: docker_swarm
docker_host:
description:
- Socket of a Docker swarm manager node (C(tcp), C(unix)).
- "Use C(unix://var/run/docker.sock) to connect via local socket."
type: str
required: true
aliases: [ docker_url ]
verbose_output:
description: Toggle to (not) include all available nodes metadata (e.g. C(Platform), C(Architecture), C(OS),
C(EngineVersion))
type: bool
default: yes
tls:
description: Connect using TLS without verifying the authenticity of the Docker host server.
type: bool
default: no
validate_certs:
description: Toggle if connecting using TLS with or without verifying the authenticity of the Docker
host server.
type: bool
default: no
aliases: [ tls_verify ]
client_key:
description: Path to the client's TLS key file.
type: path
aliases: [ tls_client_key, key_path ]
ca_cert:
description: Use a CA certificate when performing server verification by providing the path to a CA
certificate file.
type: path
aliases: [ tls_ca_cert, cacert_path ]
client_cert:
description: Path to the client's TLS certificate file.
type: path
aliases: [ tls_client_cert, cert_path ]
tls_hostname:
description: When verifying the authenticity of the Docker host server, provide the expected name of
the server.
type: str
ssl_version:
description: Provide a valid SSL version number. Default value determined by ssl.py module.
type: str
api_version:
description:
- The version of the Docker API running on the Docker Host.
- Defaults to the latest version of the API supported by docker-py.
type: str
aliases: [ docker_api_version ]
timeout:
description:
- The maximum amount of time in seconds to wait on a response from the API.
- If the value is not specified in the task, the value of environment variable C(DOCKER_TIMEOUT)
will be used instead. If the environment variable is not set, the default value will be used.
type: int
default: 60
aliases: [ time_out ]
include_host_uri:
description: Toggle to return the additional attribute C(ansible_host_uri) which contains the URI of the
swarm leader in format of C(tcp://172.16.0.1:2376). This value may be used without additional
modification as value of option I(docker_host) in Docker Swarm modules when connecting via API.
The port always defaults to C(2376).
type: bool
default: no
include_host_uri_port:
description: Override the detected port number included in I(ansible_host_uri)
type: int
'''
EXAMPLES = '''
# Minimal example using local docker
plugin: docker_swarm
docker_host: unix://var/run/docker.sock
# Minimal example using remote docker
plugin: docker_swarm
docker_host: tcp://my-docker-host:2375
# Example using remote docker with unverified TLS
plugin: docker_swarm
docker_host: tcp://my-docker-host:2376
tls: yes
# Example using remote docker with verified TLS and client certificate verification
plugin: docker_swarm
docker_host: tcp://my-docker-host:2376
validate_certs: yes
ca_cert: /somewhere/ca.pem
client_key: /somewhere/key.pem
client_cert: /somewhere/cert.pem
# Example using constructed features to create groups and set ansible_host
plugin: docker_swarm
docker_host: tcp://my-docker-host:2375
strict: False
keyed_groups:
# add e.g. x86_64 hosts to an arch_x86_64 group
- prefix: arch
key: 'Description.Platform.Architecture'
# add e.g. linux hosts to an os_linux group
- prefix: os
key: 'Description.Platform.OS'
# create a group per node label
# e.g. a node labeled w/ "production" ends up in group "label_production"
# hint: labels containing special characters will be converted to safe names
- key: 'Spec.Labels'
prefix: label
'''
from ansible.errors import AnsibleError
from ansible.module_utils._text import to_native
from ansible.module_utils.docker.common import update_tls_hostname, get_connect_params
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable
from ansible.parsing.utils.addresses import parse_address
try:
import docker
HAS_DOCKER = True
except ImportError:
HAS_DOCKER = False
class InventoryModule(BaseInventoryPlugin, Constructable):
''' Host inventory parser for ansible using Docker swarm as source. '''
NAME = 'docker_swarm'
def _fail(self, msg):
raise AnsibleError(msg)
def _populate(self):
raw_params = dict(
docker_host=self.get_option('docker_host'),
tls=self.get_option('tls'),
tls_verify=self.get_option('validate_certs'),
key_path=self.get_option('client_key'),
cacert_path=self.get_option('ca_cert'),
cert_path=self.get_option('client_cert'),
tls_hostname=self.get_option('tls_hostname'),
api_version=self.get_option('api_version'),
timeout=self.get_option('timeout'),
ssl_version=self.get_option('ssl_version'),
debug=None,
)
update_tls_hostname(raw_params)
connect_params = get_connect_params(raw_params, fail_function=self._fail)
self.client = docker.DockerClient(**connect_params)
self.inventory.add_group('all')
self.inventory.add_group('manager')
self.inventory.add_group('worker')
self.inventory.add_group('leader')
self.inventory.add_group('nonleaders')
if self.get_option('include_host_uri'):
if self.get_option('include_host_uri_port'):
host_uri_port = str(self.get_option('include_host_uri_port'))
elif self.get_option('tls') or self.get_option('validate_certs'):
host_uri_port = '2376'
else:
host_uri_port = '2375'
try:
self.nodes = self.client.nodes.list()
for self.node in self.nodes:
self.node_attrs = self.client.nodes.get(self.node.id).attrs
self.inventory.add_host(self.node_attrs['ID'])
self.inventory.add_host(self.node_attrs['ID'], group=self.node_attrs['Spec']['Role'])
self.inventory.set_variable(self.node_attrs['ID'], 'ansible_host',
self.node_attrs['Status']['Addr'])
if self.get_option('include_host_uri'):
self.inventory.set_variable(self.node_attrs['ID'], 'ansible_host_uri',
'tcp://' + self.node_attrs['Status']['Addr'] + ':' + host_uri_port)
if self.get_option('verbose_output'):
self.inventory.set_variable(self.node_attrs['ID'], 'docker_swarm_node_attributes', self.node_attrs)
if 'ManagerStatus' in self.node_attrs:
if self.node_attrs['ManagerStatus'].get('Leader'):
# This is workaround of bug in Docker when in some cases the Leader IP is 0.0.0.0
# Check moby/moby#35437 for details
swarm_leader_ip = parse_address(self.node_attrs['ManagerStatus']['Addr'])[0] or \
self.node_attrs['Status']['Addr']
if self.get_option('include_host_uri'):
self.inventory.set_variable(self.node_attrs['ID'], 'ansible_host_uri',
'tcp://' + swarm_leader_ip + ':' + host_uri_port)
self.inventory.set_variable(self.node_attrs['ID'], 'ansible_host', swarm_leader_ip)
self.inventory.add_host(self.node_attrs['ID'], group='leader')
else:
self.inventory.add_host(self.node_attrs['ID'], group='nonleaders')
else:
self.inventory.add_host(self.node_attrs['ID'], group='nonleaders')
# Use constructed if applicable
strict = self.get_option('strict')
# Composed variables
self._set_composite_vars(self.get_option('compose'),
self.node_attrs,
self.node_attrs['ID'],
strict=strict)
# Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group
self._add_host_to_composed_groups(self.get_option('groups'),
self.node_attrs,
self.node_attrs['ID'],
strict=strict)
# Create groups based on variable values and add the corresponding hosts to it
self._add_host_to_keyed_groups(self.get_option('keyed_groups'),
self.node_attrs,
self.node_attrs['ID'],
strict=strict)
except Exception as e:
raise AnsibleError('Unable to fetch hosts from Docker swarm API, this was the original exception: %s' %
to_native(e))
def verify_file(self, path):
"""Return the possibly of a file being consumable by this plugin."""
return (
super(InventoryModule, self).verify_file(path) and
path.endswith((self.NAME + '.yaml', self.NAME + '.yml')))
def parse(self, inventory, loader, path, cache=True):
if not HAS_DOCKER:
raise AnsibleError('The Docker swarm dynamic inventory plugin requires the Docker SDK for Python: '
'https://github.com/docker/docker-py.')
super(InventoryModule, self).parse(inventory, loader, path, cache)
self._read_config_data(path)
self._populate()
| gpl-3.0 |
shaanlan/youtube-dl | youtube_dl/extractor/nytimes.py | 116 | 4325 | from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
float_or_none,
int_or_none,
parse_iso8601,
)
class NYTimesBaseIE(InfoExtractor):
def _extract_video_from_id(self, video_id):
video_data = self._download_json(
'http://www.nytimes.com/svc/video/api/v2/video/%s' % video_id,
video_id, 'Downloading video JSON')
title = video_data['headline']
description = video_data.get('summary')
duration = float_or_none(video_data.get('duration'), 1000)
uploader = video_data['byline']
timestamp = parse_iso8601(video_data['publication_date'][:-8])
def get_file_size(file_size):
if isinstance(file_size, int):
return file_size
elif isinstance(file_size, dict):
return int(file_size.get('value', 0))
else:
return 0
formats = [
{
'url': video['url'],
'format_id': video.get('type'),
'vcodec': video.get('video_codec'),
'width': int_or_none(video.get('width')),
'height': int_or_none(video.get('height')),
'filesize': get_file_size(video.get('fileSize')),
} for video in video_data['renditions']
]
self._sort_formats(formats)
thumbnails = [
{
'url': 'http://www.nytimes.com/%s' % image['url'],
'width': int_or_none(image.get('width')),
'height': int_or_none(image.get('height')),
} for image in video_data['images']
]
return {
'id': video_id,
'title': title,
'description': description,
'timestamp': timestamp,
'uploader': uploader,
'duration': duration,
'formats': formats,
'thumbnails': thumbnails,
}
class NYTimesIE(NYTimesBaseIE):
_VALID_URL = r'https?://(?:(?:www\.)?nytimes\.com/video/(?:[^/]+/)+?|graphics8\.nytimes\.com/bcvideo/\d+(?:\.\d+)?/iframe/embed\.html\?videoId=)(?P<id>\d+)'
_TESTS = [{
'url': 'http://www.nytimes.com/video/opinion/100000002847155/verbatim-what-is-a-photocopier.html?playlistId=100000001150263',
'md5': '18a525a510f942ada2720db5f31644c0',
'info_dict': {
'id': '100000002847155',
'ext': 'mov',
'title': 'Verbatim: What Is a Photocopier?',
'description': 'md5:93603dada88ddbda9395632fdc5da260',
'timestamp': 1398631707,
'upload_date': '20140427',
'uploader': 'Brett Weiner',
'duration': 419,
}
}, {
'url': 'http://www.nytimes.com/video/travel/100000003550828/36-hours-in-dubai.html',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
return self._extract_video_from_id(video_id)
class NYTimesArticleIE(NYTimesBaseIE):
_VALID_URL = r'https?://(?:www\.)?nytimes\.com/(.(?<!video))*?/(?:[^/]+/)*(?P<id>[^.]+)(?:\.html)?'
_TESTS = [{
'url': 'http://www.nytimes.com/2015/04/14/business/owner-of-gravity-payments-a-credit-card-processor-is-setting-a-new-minimum-wage-70000-a-year.html?_r=0',
'md5': 'e2076d58b4da18e6a001d53fd56db3c9',
'info_dict': {
'id': '100000003628438',
'ext': 'mov',
'title': 'New Minimum Wage: $70,000 a Year',
'description': 'Dan Price, C.E.O. of Gravity Payments, surprised his 120-person staff by announcing that he planned over the next three years to raise the salary of every employee to $70,000 a year.',
'timestamp': 1429033037,
'upload_date': '20150414',
'uploader': 'Matthew Williams',
}
}, {
'url': 'http://www.nytimes.com/news/minute/2014/03/17/times-minute-whats-next-in-crimea/?_php=true&_type=blogs&_php=true&_type=blogs&_r=1',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
video_id = self._html_search_regex(r'data-videoid="(\d+)"', webpage, 'video id')
return self._extract_video_from_id(video_id)
| unlicense |
shishaochen/TensorFlow-0.8-Win | third_party/grpc/tools/run_tests/package_targets.py | 3 | 5215 | #!/usr/bin/env python2.7
# Copyright 2016, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Definition of targets to build distribution packages."""
import jobset
def create_docker_jobspec(name, dockerfile_dir, shell_command, environ={},
flake_retries=0, timeout_retries=0):
"""Creates jobspec for a task running under docker."""
environ = environ.copy()
environ['RUN_COMMAND'] = shell_command
docker_args=[]
for k,v in environ.iteritems():
docker_args += ['-e', '%s=%s' % (k, v)]
docker_env = {'DOCKERFILE_DIR': dockerfile_dir,
'DOCKER_RUN_SCRIPT': 'tools/run_tests/dockerize/docker_run.sh',
'OUTPUT_DIR': 'artifacts'}
jobspec = jobset.JobSpec(
cmdline=['tools/run_tests/dockerize/build_and_run_docker.sh'] + docker_args,
environ=docker_env,
shortname='build_package.%s' % (name),
timeout_seconds=30*60,
flake_retries=flake_retries,
timeout_retries=timeout_retries)
return jobspec
def create_jobspec(name, cmdline, environ=None, cwd=None, shell=False,
flake_retries=0, timeout_retries=0):
"""Creates jobspec."""
jobspec = jobset.JobSpec(
cmdline=cmdline,
environ=environ,
cwd=cwd,
shortname='build_package.%s' % (name),
timeout_seconds=10*60,
flake_retries=flake_retries,
timeout_retries=timeout_retries,
shell=shell)
return jobspec
class CSharpPackage:
"""Builds C# nuget packages."""
def __init__(self):
self.name = 'csharp_package'
self.labels = ['package', 'csharp', 'windows']
def pre_build_jobspecs(self):
return []
def build_jobspec(self):
return create_jobspec(self.name,
['build_packages.bat'],
cwd='src\\csharp',
shell=True)
def __str__(self):
return self.name
class NodePackage:
"""Builds Node NPM package and collects precompiled binaries"""
def __init__(self):
self.name = 'node_package'
self.labels = ['package', 'node', 'linux']
def pre_build_jobspecs(self):
return []
def build_jobspec(self):
return create_docker_jobspec(
self.name,
'tools/dockerfile/grpc_artifact_linux_x64',
'tools/run_tests/build_package_node.sh')
class RubyPackage:
"""Collects ruby gems created in the artifact phase"""
def __init__(self):
self.name = 'ruby_package'
self.labels = ['package', 'ruby', 'linux']
def pre_build_jobspecs(self):
return []
def build_jobspec(self):
return create_docker_jobspec(
self.name,
'tools/dockerfile/grpc_artifact_linux_x64',
'tools/run_tests/build_package_ruby.sh')
class PythonPackage:
"""Collects python eggs and wheels created in the artifact phase"""
def __init__(self):
self.name = 'python_package'
self.labels = ['package', 'python', 'linux']
def pre_build_jobspecs(self):
return []
def build_jobspec(self):
return create_docker_jobspec(
self.name,
'tools/dockerfile/grpc_artifact_linux_x64',
'tools/run_tests/build_package_python.sh')
class PHPPackage:
"""Copy PHP PECL package artifact"""
def __init__(self):
self.name = 'php_package'
self.labels = ['package', 'php', 'linux']
def pre_build_jobspecs(self):
return []
def build_jobspec(self):
return create_docker_jobspec(
self.name,
'tools/dockerfile/grpc_artifact_linux_x64',
'tools/run_tests/build_package_php.sh')
def targets():
"""Gets list of supported targets"""
return [CSharpPackage(),
NodePackage(),
RubyPackage(),
PythonPackage(),
PHPPackage()]
| apache-2.0 |
Flumotion/flumotion | flumotion/test/test_component_httpserver_httpcached_stats.py | 3 | 5896 | # -*- Mode: Python; test-case-name: flumotion.test.test_common -*-
# vi:si:et:sw=4:sts=4:ts=4
# Flumotion - a streaming media server
# Copyright (C) 2004,2005,2006,2007,2008,2009 Fluendo, S.L.
# Copyright (C) 2010,2011 Flumotion Services, S.A.
# All rights reserved.
#
# This file may be distributed and/or modified under the terms of
# the GNU Lesser General Public License version 2.1 as published by
# the Free Software Foundation.
# This file is distributed without any warranty; without even the implied
# warranty of merchantability or fitness for a particular purpose.
# See "LICENSE.LGPL" in the source distribution for more information.
#
# Headers in this file shall remain intact.
import os
import shutil
import tempfile
import twisted
from twisted.internet import reactor, defer
from twisted.trial import unittest
import twisted.copyright
if twisted.copyright.version == "SVN-Trunk":
SKIP_MSG = "Twisted 2.0.1 thread pool is broken for tests"
else:
SKIP_MSG = None
from twisted.web import resource
from twisted.web.server import Site
from twisted.web.static import File
from flumotion.common.testsuite import TestCase
from flumotion.component.misc.httpserver.httpcached import file_provider
from flumotion.component.misc.httpserver import fileprovider
CACHE_SIZE = 1024 * 1024
twisted.internet.base.DelayedCall.debug = True
class DummyStats(object):
def __init__(self):
self.stats = {}
def update(self, key, val):
self.stats[key] = val
class TestHTTPCachedPlugStats(TestCase):
skip = SKIP_MSG
def setUp(self):
from twisted.python import threadpool
reactor.threadpool = threadpool.ThreadPool(0, 10)
reactor.threadpool.start()
class Hello(resource.Resource):
isLeaf = True
def render_GET(self, request):
return "<html>Hello, world!</html>"
self.src_path = tempfile.mkdtemp(suffix=".src")
self.cache_path = tempfile.mkdtemp(suffix=".cache")
self._resource = None
self.createSrcFile("a", "content of a")
src = File(self.src_path)
src.putChild("hello", Hello())
factory = Site(src)
self.httpserver = reactor.listenTCP(0, factory)
p = self.httpserver.getHost().port
plugProps = {"properties": {"cache-size": CACHE_SIZE,
"cache-dir": self.cache_path,
"virtual-hostname": "localhost",
"virtual-port": p}}
self.plug = \
file_provider.FileProviderHTTPCachedPlug(plugProps)
self.stats = DummyStats()
self.plug.startStatsUpdates(self.stats)
return self.plug.start(None)
def tearDown(self):
d = self.plug.stop(None)
def finish_cleanup(_):
self.plug.stopStatsUpdates()
self.httpserver.stopListening()
shutil.rmtree(self.cache_path, ignore_errors=True)
shutil.rmtree(self.src_path, ignore_errors=True)
reactor.threadpool.stop()
reactor.threadpool = None
d.addCallback(finish_cleanup)
return d
def test404(self):
d = self.plug.getRootPath().child("SuperMan").open()
d.addCallback(lambda x: x.close())
d.addErrback(lambda f: f.trap(fileprovider.NotFoundError))
d.addCallback(lambda x:
self.failIf('cache-hit-count' in self.stats.stats))
d.addCallback(lambda x:
self.failIf('cache-miss-count' in self.stats.stats))
return d
def testMissHit(self):
d = self.plug.getRootPath().child("a").open()
d.addCallback(lambda x: self.checkResourceContent(x, "content of a"))
d.addCallback(lambda x: setattr(self, "_resource", x))
d.addCallback(lambda x: self._resource.getLogFields())
d.addCallback(lambda x: self.failIf(x['cache-status'] == 'cache-hit'))
d.addCallback(lambda x: self._resource.close())
d.addCallback(lambda x:
self.assertEqual(self.stats.stats['cache-miss-count'], 1))
d.addCallback(lambda x:
self.assertEqual(self.stats.stats['cache-hit-count'], 0))
d.addCallback(lambda x:
self.assertEqual(self.stats.stats['temp-hit-count'], 0))
d.addCallback(lambda x: self.plug.getRootPath().child("a").open())
d.addCallback(lambda x: setattr(self, "_resource", x))
d.addCallback(lambda x: self._resource.close())
d.addCallback(lambda x:
self.assertEqual(self.stats.stats['cache-miss-count'], 1))
d.addCallback(lambda x:
self.assertEqual(self.stats.stats['cache-hit-count'], 1))
d.addCallback(lambda x: self.plug.getRootPath().child("a").open())
d.addCallback(lambda x: setattr(self, "_resource", x))
d.addCallback(lambda x: self._resource.close())
d.addCallback(lambda x:
self.assertEqual(self.stats.stats['cache-miss-count'], 1))
d.addCallback(lambda x:
self.assertEqual(self.stats.stats['cache-hit-count'], 2))
return d
### Helper functions ###
def createSrcFile(self, name, data):
fname = os.path.join(self.src_path, name)
testFile = open(fname, "w")
testFile.write(data)
testFile.close()
def checkResourceContent(self, resource, content):
d = resource.read(resource.getsize())
d.addCallback(lambda d: self.failIf(d != content))
d.addCallback(lambda _: resource)
return d
def cleanUpCache(self):
shutil.rmtree(self.cache_path, ignore_errors=True)
os.makedirs(self.cache_path)
def bp(self, result):
import pdb
print str(result)
pdb.set_trace()
return result
def delay(ret, t):
d = defer.Deferred()
reactor.callLater(t, d.callback, ret)
return d
| lgpl-2.1 |
40223240/2015cdb_g3_40223240 | static/Brython3.1.1-20150328-091302/Lib/site-packages/highlight.py | 617 | 2518 | import keyword
import _jsre as re
from browser import html
letters = 'abcdefghijklmnopqrstuvwxyz'
letters += letters.upper()+'_'
digits = '0123456789'
builtin_funcs = ("abs|divmod|input|open|staticmethod|all|enumerate|int|ord|str|any|" +
"eval|isinstance|pow|sum|basestring|execfile|issubclass|print|super|" +
"binfile|iter|property|tuple|bool|filter|len|range|type|bytearray|" +
"float|list|raw_input|unichr|callable|format|locals|reduce|unicode|" +
"chr|frozenset|long|reload|vars|classmethod|getattr|map|repr|xrange|" +
"cmp|globals|max|reversed|zip|compile|hasattr|memoryview|round|" +
"__import__|complex|hash|min|set|apply|delattr|help|next|setattr|" +
"buffer|dict|hex|object|slice|coerce|dir|id|oct|sorted|intern")
kw_pattern = '^('+'|'.join(keyword.kwlist)+')$'
bf_pattern = '^('+builtin_funcs+')$'
def highlight(txt, string_color="blue", comment_color="green",
keyword_color="purple"):
res = html.PRE()
i = 0
name = ''
while i<len(txt):
car = txt[i]
if car in ["'",'"']:
k = i+1
while k<len(txt):
if txt[k]==car:
nb_as = 0
j = k-1
while True:
if txt[j]=='\\':
nb_as+=1
j -= 1
else:
break
if nb_as % 2 == 0:
res <= html.SPAN(txt[i:k+1],
style=dict(color=string_color))
i = k
break
k += 1
elif car == '#': # comment
end = txt.find('\n', i)
if end== -1:
res <= html.SPAN(txt[i:],style=dict(color=comment_color))
break
else:
res <= html.SPAN(txt[i:end],style=dict(color=comment_color))
i = end-1
elif car in letters:
name += car
elif car in digits and name:
name += car
else:
if name:
if re.search(kw_pattern,name):
res <= html.SPAN(name,style=dict(color=keyword_color))
elif re.search(bf_pattern,name):
res <= html.SPAN(name,style=dict(color=keyword_color))
else:
res <= name
name = ''
res <= car
i += 1
res <= name
return res | gpl-3.0 |
jakule/andiff | tests/sanity_check.py | 1 | 5914 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
""" Sanity check for andiff and anpatch applications
"""
import os
import time
import tempfile
import hashlib
import logging
import itertools
import argparse
import subprocess
from sys import stdout
TMP_LOCATION = '/tmp'
""" Location of temporary directory """
class CmdColors:
""" Simple class to color printed output.
Print method will color output iff console is attached to stdout.
This should prevent the situation when output is logged into file.
"""
OK_GREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
END = '\033[0m'
@staticmethod
def __make_color(color, text):
""" Here's actual magic happen.
Returns:
str: Colored text
"""
if stdout.isatty():
return color + text + CmdColors.END
return text
@staticmethod
def make_red(text):
""" Return text in red
Returns:
str: Colored text
"""
return CmdColors.__make_color(CmdColors.FAIL, text)
@staticmethod
def make_green(text):
""" Return text in green
Returns:
str: Colored text
"""
return CmdColors.__make_color(CmdColors.OK_GREEN, text)
def create_tmp_file(tmp_dir, file_size):
""" Create temporary file with optional size
Args:
tmp_dir: Directory where file should be created
file_size: Size of temporary file
Returns:
str: Created filename
"""
tmp_file_fd, tmp_file = tempfile.mkstemp(dir=tmp_dir)
if file_size == 0:
os.close(tmp_file_fd)
return tmp_file
tmp_data = os.urandom(file_size)
os.write(tmp_file_fd, tmp_data)
os.close(tmp_file_fd)
del tmp_data
return tmp_file
def calculate_file_hash(filename):
""" Calculate MD5 check-sum for given file
Args:
filename: Location of file
Returns:
str: MD5 check-sum
"""
with open(filename, 'rb') as file:
return hashlib.md5(file.read()).hexdigest()
def run_application(args):
""" Helper function to run external application and print
execution time.
Args:
args[List]: Application with all arguments
"""
start = time.time()
subprocess.check_call(args, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
done = time.time()
elapsed = done - start
logging.debug('Command took %fs', elapsed)
def run_test(tmp_dir, files_size, andiff_app, anpatch_app):
""" Run actual test
Args:
tmp_dir: Temporary directory for test
files_size: Size of temporary file in bytes
andiff_app: Location of andiff app
anpatch_app: Location of anpatch app
"""
source_file = create_tmp_file(tmp_dir=tmp_dir, file_size=files_size)
logging.debug('Creating source file %s of size %s KB', source_file, files_size)
target_file = create_tmp_file(tmp_dir=tmp_dir, file_size=files_size)
logging.debug('Creating target file %s of size %s KB', target_file, files_size)
patch_file = create_tmp_file(tmp_dir=tmp_dir, file_size=0)
logging.debug('Patch file has been created: %s', patch_file)
logging.debug('Running andiff')
run_application((andiff_app, source_file, target_file, patch_file))
patched_file = create_tmp_file(tmp_dir=tmp_dir, file_size=0)
logging.debug('Patched file has been created: %s', patched_file)
logging.debug('Running anpatch')
run_application((anpatch_app, source_file, patched_file, patch_file))
logging.debug('Calculating hashes')
target_file_md5 = calculate_file_hash(target_file)
logging.debug('Target file: %s', target_file_md5)
patched_file_md5 = calculate_file_hash(patched_file)
logging.debug('Patched file: %s', patched_file_md5)
if patched_file_md5 == target_file_md5:
logging.info('Result: ' + CmdColors.make_green('OK'))
else:
logging.critical('Result: ' + CmdColors.make_red('FAIL'))
raise Exception('Something went wrong. Leaving broken files')
for file_to_remove in [target_file, source_file, patched_file, patch_file]:
os.unlink(file_to_remove)
def main():
""" Main program function """
parser = argparse.ArgumentParser(description='Test andiff and anpatch application')
parser.add_argument('--diff', metavar='andiff', type=str, required=True,
help='Location of andiff application')
parser.add_argument('--patch', metavar='anpatch', type=str, required=True,
help='Location of anpatch application')
parser.add_argument('--size', type=int, default=10, help='Size of test file')
parser.add_argument('--repeat', type=int, default=1, help='Repeat test n times')
parser.add_argument('-v,--verbose', dest='verbose', action='store_true',
help='Repeat test n times')
args = parser.parse_args()
logging_level = logging.INFO
logging_format = '%(message)s'
if args.verbose:
logging_level = logging.DEBUG
logging_format = '%(asctime)s %(message)s'
logging.basicConfig(format=logging_format, level=logging_level)
logging.debug(args)
print('Start andiff sanity check')
logging.debug('Output is attached to console: ' + str(stdout.isatty()))
andiff_app = os.path.abspath(args.diff)
anpatch_app = os.path.abspath(args.patch)
logging.debug('andiff location: %s', andiff_app)
logging.debug('anpatch location: %s', anpatch_app)
tmp_dir = tempfile.mkdtemp(prefix='andiff', dir=TMP_LOCATION)
logging.debug('Temporary directory: %s', tmp_dir)
files_size = args.size * 1024 * 1024
for _ in itertools.repeat(None, args.repeat):
run_test(tmp_dir=tmp_dir, files_size=files_size,
andiff_app=andiff_app, anpatch_app=anpatch_app)
os.rmdir(tmp_dir)
if __name__ == '__main__':
main()
| bsd-2-clause |
ltilve/ChromiumGStreamerBackend | tools/telemetry/third_party/gsutilz/third_party/boto/tests/integration/route53/test_resourcerecordsets.py | 113 | 3293 | # Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from tests.compat import unittest
from tests.integration.route53 import Route53TestCase
from boto.route53.record import ResourceRecordSets
class TestRoute53ResourceRecordSets(Route53TestCase):
def test_add_change(self):
rrs = ResourceRecordSets(self.conn, self.zone.id)
created = rrs.add_change("CREATE", "vpn.%s." % self.base_domain, "A")
created.add_value('192.168.0.25')
rrs.commit()
rrs = ResourceRecordSets(self.conn, self.zone.id)
deleted = rrs.add_change('DELETE', "vpn.%s." % self.base_domain, "A")
deleted.add_value('192.168.0.25')
rrs.commit()
def test_record_count(self):
rrs = ResourceRecordSets(self.conn, self.zone.id)
hosts = 101
for hostid in range(hosts):
rec = "test" + str(hostid) + ".%s" % self.base_domain
created = rrs.add_change("CREATE", rec, "A")
ip = '192.168.0.' + str(hostid)
created.add_value(ip)
# Max 100 changes per commit
if (hostid + 1) % 100 == 0:
rrs.commit()
rrs = ResourceRecordSets(self.conn, self.zone.id)
rrs.commit()
all_records = self.conn.get_all_rrsets(self.zone.id)
# First time around was always fine
i = 0
for rset in all_records:
i += 1
# Second time was a failure
i = 0
for rset in all_records:
i += 1
# Cleanup indivual records
rrs = ResourceRecordSets(self.conn, self.zone.id)
for hostid in range(hosts):
rec = "test" + str(hostid) + ".%s" % self.base_domain
deleted = rrs.add_change("DELETE", rec, "A")
ip = '192.168.0.' + str(hostid)
deleted.add_value(ip)
# Max 100 changes per commit
if (hostid + 1) % 100 == 0:
rrs.commit()
rrs = ResourceRecordSets(self.conn, self.zone.id)
rrs.commit()
# 2nd count should match the number of hosts plus NS/SOA records
records = hosts + 2
self.assertEqual(i, records)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
cherez/youtube-dl | youtube_dl/extractor/youtube.py | 1 | 95453 | # coding: utf-8
from __future__ import unicode_literals
import itertools
import json
import os.path
import re
import time
import traceback
from .common import InfoExtractor, SearchInfoExtractor
from ..jsinterp import JSInterpreter
from ..swfinterp import SWFInterpreter
from ..compat import (
compat_chr,
compat_parse_qs,
compat_urllib_parse,
compat_urllib_parse_unquote,
compat_urllib_parse_unquote_plus,
compat_urllib_parse_urlparse,
compat_urlparse,
compat_str,
)
from ..utils import (
clean_html,
encode_dict,
ExtractorError,
float_or_none,
get_element_by_attribute,
get_element_by_id,
int_or_none,
orderedSet,
parse_duration,
remove_start,
sanitized_Request,
smuggle_url,
str_to_int,
unescapeHTML,
unified_strdate,
unsmuggle_url,
uppercase_escape,
ISO3166Utils,
)
class YoutubeBaseInfoExtractor(InfoExtractor):
"""Provide base functions for Youtube extractors"""
_LOGIN_URL = 'https://accounts.google.com/ServiceLogin'
_TWOFACTOR_URL = 'https://accounts.google.com/signin/challenge'
_NETRC_MACHINE = 'youtube'
# If True it will raise an error if no login info is provided
_LOGIN_REQUIRED = False
def _set_language(self):
self._set_cookie(
'.youtube.com', 'PREF', 'f1=50000000&hl=en',
# YouTube sets the expire time to about two months
expire_time=time.time() + 2 * 30 * 24 * 3600)
def _ids_to_results(self, ids):
return [
self.url_result(vid_id, 'Youtube', video_id=vid_id)
for vid_id in ids]
def _login(self):
"""
Attempt to log in to YouTube.
True is returned if successful or skipped.
False is returned if login failed.
If _LOGIN_REQUIRED is set and no authentication was provided, an error is raised.
"""
(username, password) = self._get_login_info()
# No authentication to be performed
if username is None:
if self._LOGIN_REQUIRED:
raise ExtractorError('No login info available, needed for using %s.' % self.IE_NAME, expected=True)
return True
login_page = self._download_webpage(
self._LOGIN_URL, None,
note='Downloading login page',
errnote='unable to fetch login page', fatal=False)
if login_page is False:
return
galx = self._search_regex(r'(?s)<input.+?name="GALX".+?value="(.+?)"',
login_page, 'Login GALX parameter')
# Log in
login_form_strs = {
'continue': 'https://www.youtube.com/signin?action_handle_signin=true&feature=sign_in_button&hl=en_US&nomobiletemp=1',
'Email': username,
'GALX': galx,
'Passwd': password,
'PersistentCookie': 'yes',
'_utf8': '霱',
'bgresponse': 'js_disabled',
'checkConnection': '',
'checkedDomains': 'youtube',
'dnConn': '',
'pstMsg': '0',
'rmShown': '1',
'secTok': '',
'signIn': 'Sign in',
'timeStmp': '',
'service': 'youtube',
'uilel': '3',
'hl': 'en_US',
}
login_data = compat_urllib_parse.urlencode(encode_dict(login_form_strs)).encode('ascii')
req = sanitized_Request(self._LOGIN_URL, login_data)
login_results = self._download_webpage(
req, None,
note='Logging in', errnote='unable to log in', fatal=False)
if login_results is False:
return False
if re.search(r'id="errormsg_0_Passwd"', login_results) is not None:
raise ExtractorError('Please use your account password and a two-factor code instead of an application-specific password.', expected=True)
# Two-Factor
# TODO add SMS and phone call support - these require making a request and then prompting the user
if re.search(r'(?i)<form[^>]* id="challenge"', login_results) is not None:
tfa_code = self._get_tfa_info('2-step verification code')
if not tfa_code:
self._downloader.report_warning(
'Two-factor authentication required. Provide it either interactively or with --twofactor <code>'
'(Note that only TOTP (Google Authenticator App) codes work at this time.)')
return False
tfa_code = remove_start(tfa_code, 'G-')
tfa_form_strs = self._form_hidden_inputs('challenge', login_results)
tfa_form_strs.update({
'Pin': tfa_code,
'TrustDevice': 'on',
})
tfa_data = compat_urllib_parse.urlencode(encode_dict(tfa_form_strs)).encode('ascii')
tfa_req = sanitized_Request(self._TWOFACTOR_URL, tfa_data)
tfa_results = self._download_webpage(
tfa_req, None,
note='Submitting TFA code', errnote='unable to submit tfa', fatal=False)
if tfa_results is False:
return False
if re.search(r'(?i)<form[^>]* id="challenge"', tfa_results) is not None:
self._downloader.report_warning('Two-factor code expired or invalid. Please try again, or use a one-use backup code instead.')
return False
if re.search(r'(?i)<form[^>]* id="gaia_loginform"', tfa_results) is not None:
self._downloader.report_warning('unable to log in - did the page structure change?')
return False
if re.search(r'smsauth-interstitial-reviewsettings', tfa_results) is not None:
self._downloader.report_warning('Your Google account has a security notice. Please log in on your web browser, resolve the notice, and try again.')
return False
if re.search(r'(?i)<form[^>]* id="gaia_loginform"', login_results) is not None:
self._downloader.report_warning('unable to log in: bad username or password')
return False
return True
def _real_initialize(self):
if self._downloader is None:
return
self._set_language()
if not self._login():
return
class YoutubeEntryListBaseInfoExtractor(InfoExtractor):
# Extract entries from page with "Load more" button
def _entries(self, page, playlist_id):
more_widget_html = content_html = page
for page_num in itertools.count(1):
for entry in self._process_page(content_html):
yield entry
mobj = re.search(r'data-uix-load-more-href="/?(?P<more>[^"]+)"', more_widget_html)
if not mobj:
break
more = self._download_json(
'https://youtube.com/%s' % mobj.group('more'), playlist_id,
'Downloading page #%s' % page_num,
transform_source=uppercase_escape)
content_html = more['content_html']
if not content_html.strip():
# Some webpages show a "Load more" button but they don't
# have more videos
break
more_widget_html = more['load_more_widget_html']
class YoutubePlaylistBaseInfoExtractor(YoutubeEntryListBaseInfoExtractor):
def _process_page(self, content):
for video_id, video_title in self.extract_videos_from_page(content):
yield self.url_result(video_id, 'Youtube', video_id, video_title)
def extract_videos_from_page(self, page):
ids_in_page = []
titles_in_page = []
for mobj in re.finditer(self._VIDEO_RE, page):
# The link with index 0 is not the first video of the playlist (not sure if still actual)
if 'index' in mobj.groupdict() and mobj.group('id') == '0':
continue
video_id = mobj.group('id')
video_title = unescapeHTML(mobj.group('title'))
if video_title:
video_title = video_title.strip()
try:
idx = ids_in_page.index(video_id)
if video_title and not titles_in_page[idx]:
titles_in_page[idx] = video_title
except ValueError:
ids_in_page.append(video_id)
titles_in_page.append(video_title)
return zip(ids_in_page, titles_in_page)
class YoutubePlaylistsBaseInfoExtractor(YoutubeEntryListBaseInfoExtractor):
def _process_page(self, content):
for playlist_id in re.findall(r'href="/?playlist\?list=(.+?)"', content):
yield self.url_result(
'https://www.youtube.com/playlist?list=%s' % playlist_id, 'YoutubePlaylist')
def _real_extract(self, url):
playlist_id = self._match_id(url)
webpage = self._download_webpage(url, playlist_id)
title = self._og_search_title(webpage, fatal=False)
return self.playlist_result(self._entries(webpage, playlist_id), playlist_id, title)
class YoutubeIE(YoutubeBaseInfoExtractor):
IE_DESC = 'YouTube.com'
_VALID_URL = r"""(?x)^
(
(?:https?://|//) # http(s):// or protocol-independent URL
(?:(?:(?:(?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie)?\.com/|
(?:www\.)?deturl\.com/www\.youtube\.com/|
(?:www\.)?pwnyoutube\.com/|
(?:www\.)?yourepeat\.com/|
tube\.majestyc\.net/|
youtube\.googleapis\.com/) # the various hostnames, with wildcard subdomains
(?:.*?\#/)? # handle anchor (#/) redirect urls
(?: # the various things that can precede the ID:
(?:(?:v|embed|e)/(?!videoseries)) # v/ or embed/ or e/
|(?: # or the v= param in all its forms
(?:(?:watch|movie)(?:_popup)?(?:\.php)?/?)? # preceding watch(_popup|.php) or nothing (like /?v=xxxx)
(?:\?|\#!?) # the params delimiter ? or # or #!
(?:.*?&)?? # any other preceding param (like /?s=tuff&v=xxxx)
v=
)
))
|(?:
youtu\.be| # just youtu.be/xxxx
vid\.plus # or vid.plus/xxxx
)/
|(?:www\.)?cleanvideosearch\.com/media/action/yt/watch\?videoId=
)
)? # all until now is optional -> you can pass the naked ID
([0-9A-Za-z_-]{11}) # here is it! the YouTube video ID
(?!.*?&list=) # combined list/video URLs are handled by the playlist IE
(?(1).+)? # if we found the ID, everything can follow
$"""
_NEXT_URL_RE = r'[\?&]next_url=([^&]+)'
_formats = {
'5': {'ext': 'flv', 'width': 400, 'height': 240},
'6': {'ext': 'flv', 'width': 450, 'height': 270},
'13': {'ext': '3gp'},
'17': {'ext': '3gp', 'width': 176, 'height': 144},
'18': {'ext': 'mp4', 'width': 640, 'height': 360},
'22': {'ext': 'mp4', 'width': 1280, 'height': 720},
'34': {'ext': 'flv', 'width': 640, 'height': 360},
'35': {'ext': 'flv', 'width': 854, 'height': 480},
'36': {'ext': '3gp', 'width': 320, 'height': 240},
'37': {'ext': 'mp4', 'width': 1920, 'height': 1080},
'38': {'ext': 'mp4', 'width': 4096, 'height': 3072},
'43': {'ext': 'webm', 'width': 640, 'height': 360},
'44': {'ext': 'webm', 'width': 854, 'height': 480},
'45': {'ext': 'webm', 'width': 1280, 'height': 720},
'46': {'ext': 'webm', 'width': 1920, 'height': 1080},
'59': {'ext': 'mp4', 'width': 854, 'height': 480},
'78': {'ext': 'mp4', 'width': 854, 'height': 480},
# 3d videos
'82': {'ext': 'mp4', 'height': 360, 'format_note': '3D', 'preference': -20},
'83': {'ext': 'mp4', 'height': 480, 'format_note': '3D', 'preference': -20},
'84': {'ext': 'mp4', 'height': 720, 'format_note': '3D', 'preference': -20},
'85': {'ext': 'mp4', 'height': 1080, 'format_note': '3D', 'preference': -20},
'100': {'ext': 'webm', 'height': 360, 'format_note': '3D', 'preference': -20},
'101': {'ext': 'webm', 'height': 480, 'format_note': '3D', 'preference': -20},
'102': {'ext': 'webm', 'height': 720, 'format_note': '3D', 'preference': -20},
# Apple HTTP Live Streaming
'92': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'preference': -10},
'93': {'ext': 'mp4', 'height': 360, 'format_note': 'HLS', 'preference': -10},
'94': {'ext': 'mp4', 'height': 480, 'format_note': 'HLS', 'preference': -10},
'95': {'ext': 'mp4', 'height': 720, 'format_note': 'HLS', 'preference': -10},
'96': {'ext': 'mp4', 'height': 1080, 'format_note': 'HLS', 'preference': -10},
'132': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'preference': -10},
'151': {'ext': 'mp4', 'height': 72, 'format_note': 'HLS', 'preference': -10},
# DASH mp4 video
'133': {'ext': 'mp4', 'height': 240, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
'134': {'ext': 'mp4', 'height': 360, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
'135': {'ext': 'mp4', 'height': 480, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
'136': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
'137': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
'138': {'ext': 'mp4', 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40}, # Height can vary (https://github.com/rg3/youtube-dl/issues/4559)
'160': {'ext': 'mp4', 'height': 144, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
'264': {'ext': 'mp4', 'height': 1440, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
'298': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'fps': 60, 'vcodec': 'h264'},
'299': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'fps': 60, 'vcodec': 'h264'},
'266': {'ext': 'mp4', 'height': 2160, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'vcodec': 'h264'},
# Dash mp4 audio
'139': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'vcodec': 'none', 'abr': 48, 'preference': -50, 'container': 'm4a_dash'},
'140': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'vcodec': 'none', 'abr': 128, 'preference': -50, 'container': 'm4a_dash'},
'141': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'vcodec': 'none', 'abr': 256, 'preference': -50, 'container': 'm4a_dash'},
# Dash webm
'167': {'ext': 'webm', 'height': 360, 'width': 640, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
'168': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
'169': {'ext': 'webm', 'height': 720, 'width': 1280, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
'170': {'ext': 'webm', 'height': 1080, 'width': 1920, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
'218': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
'219': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
'278': {'ext': 'webm', 'height': 144, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'container': 'webm', 'vcodec': 'vp9'},
'242': {'ext': 'webm', 'height': 240, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
'243': {'ext': 'webm', 'height': 360, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
'244': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
'245': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
'246': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
'247': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
'248': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
'271': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
'272': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
'302': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'fps': 60, 'vcodec': 'vp9'},
'303': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'fps': 60, 'vcodec': 'vp9'},
'308': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'fps': 60, 'vcodec': 'vp9'},
'313': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'vcodec': 'vp9'},
'315': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'fps': 60, 'vcodec': 'vp9'},
# Dash webm audio
'171': {'ext': 'webm', 'vcodec': 'none', 'format_note': 'DASH audio', 'abr': 128, 'preference': -50},
'172': {'ext': 'webm', 'vcodec': 'none', 'format_note': 'DASH audio', 'abr': 256, 'preference': -50},
# Dash webm audio with opus inside
'249': {'ext': 'webm', 'vcodec': 'none', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 50, 'preference': -50},
'250': {'ext': 'webm', 'vcodec': 'none', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 70, 'preference': -50},
'251': {'ext': 'webm', 'vcodec': 'none', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 160, 'preference': -50},
# RTMP (unnamed)
'_rtmp': {'protocol': 'rtmp'},
}
IE_NAME = 'youtube'
_TESTS = [
{
'url': 'http://www.youtube.com/watch?v=BaW_jenozKcj&t=1s&end=9',
'info_dict': {
'id': 'BaW_jenozKc',
'ext': 'mp4',
'title': 'youtube-dl test video "\'/\\ä↭𝕐',
'uploader': 'Philipp Hagemeister',
'uploader_id': 'phihag',
'upload_date': '20121002',
'description': 'test chars: "\'/\\ä↭𝕐\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact phihag@phihag.de .',
'categories': ['Science & Technology'],
'tags': ['youtube-dl'],
'like_count': int,
'dislike_count': int,
'start_time': 1,
'end_time': 9,
}
},
{
'url': 'http://www.youtube.com/watch?v=UxxajLWwzqY',
'note': 'Test generic use_cipher_signature video (#897)',
'info_dict': {
'id': 'UxxajLWwzqY',
'ext': 'mp4',
'upload_date': '20120506',
'title': 'Icona Pop - I Love It (feat. Charli XCX) [OFFICIAL VIDEO]',
'description': 'md5:782e8651347686cba06e58f71ab51773',
'tags': ['Icona Pop i love it', 'sweden', 'pop music', 'big beat records', 'big beat', 'charli',
'xcx', 'charli xcx', 'girls', 'hbo', 'i love it', "i don't care", 'icona', 'pop',
'iconic ep', 'iconic', 'love', 'it'],
'uploader': 'Icona Pop',
'uploader_id': 'IconaPop',
}
},
{
'url': 'https://www.youtube.com/watch?v=07FYdnEawAQ',
'note': 'Test VEVO video with age protection (#956)',
'info_dict': {
'id': '07FYdnEawAQ',
'ext': 'mp4',
'upload_date': '20130703',
'title': 'Justin Timberlake - Tunnel Vision (Explicit)',
'description': 'md5:64249768eec3bc4276236606ea996373',
'uploader': 'justintimberlakeVEVO',
'uploader_id': 'justintimberlakeVEVO',
'age_limit': 18,
}
},
{
'url': '//www.YouTube.com/watch?v=yZIXLfi8CZQ',
'note': 'Embed-only video (#1746)',
'info_dict': {
'id': 'yZIXLfi8CZQ',
'ext': 'mp4',
'upload_date': '20120608',
'title': 'Principal Sexually Assaults A Teacher - Episode 117 - 8th June 2012',
'description': 'md5:09b78bd971f1e3e289601dfba15ca4f7',
'uploader': 'SET India',
'uploader_id': 'setindia',
'age_limit': 18,
}
},
{
'url': 'http://www.youtube.com/watch?v=BaW_jenozKcj&v=UxxajLWwzqY',
'note': 'Use the first video ID in the URL',
'info_dict': {
'id': 'BaW_jenozKc',
'ext': 'mp4',
'title': 'youtube-dl test video "\'/\\ä↭𝕐',
'uploader': 'Philipp Hagemeister',
'uploader_id': 'phihag',
'upload_date': '20121002',
'description': 'test chars: "\'/\\ä↭𝕐\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact phihag@phihag.de .',
'categories': ['Science & Technology'],
'tags': ['youtube-dl'],
'like_count': int,
'dislike_count': int,
},
'params': {
'skip_download': True,
},
},
{
'url': 'http://www.youtube.com/watch?v=a9LDPn-MO4I',
'note': '256k DASH audio (format 141) via DASH manifest',
'info_dict': {
'id': 'a9LDPn-MO4I',
'ext': 'm4a',
'upload_date': '20121002',
'uploader_id': '8KVIDEO',
'description': '',
'uploader': '8KVIDEO',
'title': 'UHDTV TEST 8K VIDEO.mp4'
},
'params': {
'youtube_include_dash_manifest': True,
'format': '141',
},
},
# DASH manifest with encrypted signature
{
'url': 'https://www.youtube.com/watch?v=IB3lcPjvWLA',
'info_dict': {
'id': 'IB3lcPjvWLA',
'ext': 'm4a',
'title': 'Afrojack, Spree Wilson - The Spark ft. Spree Wilson',
'description': 'md5:12e7067fa6735a77bdcbb58cb1187d2d',
'uploader': 'AfrojackVEVO',
'uploader_id': 'AfrojackVEVO',
'upload_date': '20131011',
},
'params': {
'youtube_include_dash_manifest': True,
'format': '141',
},
},
# JS player signature function name containing $
{
'url': 'https://www.youtube.com/watch?v=nfWlot6h_JM',
'info_dict': {
'id': 'nfWlot6h_JM',
'ext': 'm4a',
'title': 'Taylor Swift - Shake It Off',
'description': 'md5:95f66187cd7c8b2c13eb78e1223b63c3',
'uploader': 'TaylorSwiftVEVO',
'uploader_id': 'TaylorSwiftVEVO',
'upload_date': '20140818',
},
'params': {
'youtube_include_dash_manifest': True,
'format': '141',
},
},
# Controversy video
{
'url': 'https://www.youtube.com/watch?v=T4XJQO3qol8',
'info_dict': {
'id': 'T4XJQO3qol8',
'ext': 'mp4',
'upload_date': '20100909',
'uploader': 'The Amazing Atheist',
'uploader_id': 'TheAmazingAtheist',
'title': 'Burning Everyone\'s Koran',
'description': 'SUBSCRIBE: http://www.youtube.com/saturninefilms\n\nEven Obama has taken a stand against freedom on this issue: http://www.huffingtonpost.com/2010/09/09/obama-gma-interview-quran_n_710282.html',
}
},
# Normal age-gate video (No vevo, embed allowed)
{
'url': 'http://youtube.com/watch?v=HtVdAasjOgU',
'info_dict': {
'id': 'HtVdAasjOgU',
'ext': 'mp4',
'title': 'The Witcher 3: Wild Hunt - The Sword Of Destiny Trailer',
'description': 're:(?s).{100,}About the Game\n.*?The Witcher 3: Wild Hunt.{100,}',
'uploader': 'The Witcher',
'uploader_id': 'WitcherGame',
'upload_date': '20140605',
'age_limit': 18,
},
},
# Age-gate video with encrypted signature
{
'url': 'http://www.youtube.com/watch?v=6kLq3WMV1nU',
'info_dict': {
'id': '6kLq3WMV1nU',
'ext': 'mp4',
'title': 'Dedication To My Ex (Miss That) (Lyric Video)',
'description': 'md5:33765bb339e1b47e7e72b5490139bb41',
'uploader': 'LloydVEVO',
'uploader_id': 'LloydVEVO',
'upload_date': '20110629',
'age_limit': 18,
},
},
# video_info is None (https://github.com/rg3/youtube-dl/issues/4421)
{
'url': '__2ABJjxzNo',
'info_dict': {
'id': '__2ABJjxzNo',
'ext': 'mp4',
'upload_date': '20100430',
'uploader_id': 'deadmau5',
'description': 'md5:12c56784b8032162bb936a5f76d55360',
'uploader': 'deadmau5',
'title': 'Deadmau5 - Some Chords (HD)',
},
'expected_warnings': [
'DASH manifest missing',
]
},
# Olympics (https://github.com/rg3/youtube-dl/issues/4431)
{
'url': 'lqQg6PlCWgI',
'info_dict': {
'id': 'lqQg6PlCWgI',
'ext': 'mp4',
'upload_date': '20150827',
'uploader_id': 'olympic',
'description': 'HO09 - Women - GER-AUS - Hockey - 31 July 2012 - London 2012 Olympic Games',
'uploader': 'Olympics',
'title': 'Hockey - Women - GER-AUS - London 2012 Olympic Games',
},
'params': {
'skip_download': 'requires avconv',
}
},
# Non-square pixels
{
'url': 'https://www.youtube.com/watch?v=_b-2C3KPAM0',
'info_dict': {
'id': '_b-2C3KPAM0',
'ext': 'mp4',
'stretched_ratio': 16 / 9.,
'upload_date': '20110310',
'uploader_id': 'AllenMeow',
'description': 'made by Wacom from Korea | 字幕&加油添醋 by TY\'s Allen | 感謝heylisa00cavey1001同學熱情提供梗及翻譯',
'uploader': '孫艾倫',
'title': '[A-made] 變態妍字幕版 太妍 我就是這樣的人',
},
},
# url_encoded_fmt_stream_map is empty string
{
'url': 'qEJwOuvDf7I',
'info_dict': {
'id': 'qEJwOuvDf7I',
'ext': 'webm',
'title': 'Обсуждение судебной практики по выборам 14 сентября 2014 года в Санкт-Петербурге',
'description': '',
'upload_date': '20150404',
'uploader_id': 'spbelect',
'uploader': 'Наблюдатели Петербурга',
},
'params': {
'skip_download': 'requires avconv',
}
},
# Extraction from multiple DASH manifests (https://github.com/rg3/youtube-dl/pull/6097)
{
'url': 'https://www.youtube.com/watch?v=FIl7x6_3R5Y',
'info_dict': {
'id': 'FIl7x6_3R5Y',
'ext': 'mp4',
'title': 'md5:7b81415841e02ecd4313668cde88737a',
'description': 'md5:116377fd2963b81ec4ce64b542173306',
'upload_date': '20150625',
'uploader_id': 'dorappi2000',
'uploader': 'dorappi2000',
'formats': 'mincount:33',
},
},
# DASH manifest with segment_list
{
'url': 'https://www.youtube.com/embed/CsmdDsKjzN8',
'md5': '8ce563a1d667b599d21064e982ab9e31',
'info_dict': {
'id': 'CsmdDsKjzN8',
'ext': 'mp4',
'upload_date': '20150501', # According to '<meta itemprop="datePublished"', but in other places it's 20150510
'uploader': 'Airtek',
'description': 'Retransmisión en directo de la XVIII media maratón de Zaragoza.',
'uploader_id': 'UCzTzUmjXxxacNnL8I3m4LnQ',
'title': 'Retransmisión XVIII Media maratón Zaragoza 2015',
},
'params': {
'youtube_include_dash_manifest': True,
'format': '135', # bestvideo
}
},
{
# Multifeed videos (multiple cameras), URL is for Main Camera
'url': 'https://www.youtube.com/watch?v=jqWvoWXjCVs',
'info_dict': {
'id': 'jqWvoWXjCVs',
'title': 'teamPGP: Rocket League Noob Stream',
'description': 'md5:dc7872fb300e143831327f1bae3af010',
},
'playlist': [{
'info_dict': {
'id': 'jqWvoWXjCVs',
'ext': 'mp4',
'title': 'teamPGP: Rocket League Noob Stream (Main Camera)',
'description': 'md5:dc7872fb300e143831327f1bae3af010',
'upload_date': '20150721',
'uploader': 'Beer Games Beer',
'uploader_id': 'beergamesbeer',
},
}, {
'info_dict': {
'id': '6h8e8xoXJzg',
'ext': 'mp4',
'title': 'teamPGP: Rocket League Noob Stream (kreestuh)',
'description': 'md5:dc7872fb300e143831327f1bae3af010',
'upload_date': '20150721',
'uploader': 'Beer Games Beer',
'uploader_id': 'beergamesbeer',
},
}, {
'info_dict': {
'id': 'PUOgX5z9xZw',
'ext': 'mp4',
'title': 'teamPGP: Rocket League Noob Stream (grizzle)',
'description': 'md5:dc7872fb300e143831327f1bae3af010',
'upload_date': '20150721',
'uploader': 'Beer Games Beer',
'uploader_id': 'beergamesbeer',
},
}, {
'info_dict': {
'id': 'teuwxikvS5k',
'ext': 'mp4',
'title': 'teamPGP: Rocket League Noob Stream (zim)',
'description': 'md5:dc7872fb300e143831327f1bae3af010',
'upload_date': '20150721',
'uploader': 'Beer Games Beer',
'uploader_id': 'beergamesbeer',
},
}],
'params': {
'skip_download': True,
},
},
{
'url': 'http://vid.plus/FlRa-iH7PGw',
'only_matching': True,
},
{
# Title with JS-like syntax "};" (see https://github.com/rg3/youtube-dl/issues/7468)
'url': 'https://www.youtube.com/watch?v=lsguqyKfVQg',
'info_dict': {
'id': 'lsguqyKfVQg',
'ext': 'mp4',
'title': '{dark walk}; Loki/AC/Dishonored; collab w/Elflover21',
'description': 'md5:8085699c11dc3f597ce0410b0dcbb34a',
'upload_date': '20151119',
'uploader_id': 'IronSoulElf',
'uploader': 'IronSoulElf',
},
'params': {
'skip_download': True,
},
},
{
# Tags with '};' (see https://github.com/rg3/youtube-dl/issues/7468)
'url': 'https://www.youtube.com/watch?v=Ms7iBXnlUO8',
'only_matching': True,
},
]
def __init__(self, *args, **kwargs):
super(YoutubeIE, self).__init__(*args, **kwargs)
self._player_cache = {}
def report_video_info_webpage_download(self, video_id):
"""Report attempt to download video info webpage."""
self.to_screen('%s: Downloading video info webpage' % video_id)
def report_information_extraction(self, video_id):
"""Report attempt to extract video information."""
self.to_screen('%s: Extracting video information' % video_id)
def report_unavailable_format(self, video_id, format):
"""Report extracted video URL."""
self.to_screen('%s: Format %s not available' % (video_id, format))
def report_rtmp_download(self):
"""Indicate the download will use the RTMP protocol."""
self.to_screen('RTMP download detected')
def _signature_cache_id(self, example_sig):
""" Return a string representation of a signature """
return '.'.join(compat_str(len(part)) for part in example_sig.split('.'))
def _extract_signature_function(self, video_id, player_url, example_sig):
id_m = re.match(
r'.*?-(?P<id>[a-zA-Z0-9_-]+)(?:/watch_as3|/html5player(?:-new)?|/base)?\.(?P<ext>[a-z]+)$',
player_url)
if not id_m:
raise ExtractorError('Cannot identify player %r' % player_url)
player_type = id_m.group('ext')
player_id = id_m.group('id')
# Read from filesystem cache
func_id = '%s_%s_%s' % (
player_type, player_id, self._signature_cache_id(example_sig))
assert os.path.basename(func_id) == func_id
cache_spec = self._downloader.cache.load('youtube-sigfuncs', func_id)
if cache_spec is not None:
return lambda s: ''.join(s[i] for i in cache_spec)
download_note = (
'Downloading player %s' % player_url
if self._downloader.params.get('verbose') else
'Downloading %s player %s' % (player_type, player_id)
)
if player_type == 'js':
code = self._download_webpage(
player_url, video_id,
note=download_note,
errnote='Download of %s failed' % player_url)
res = self._parse_sig_js(code)
elif player_type == 'swf':
urlh = self._request_webpage(
player_url, video_id,
note=download_note,
errnote='Download of %s failed' % player_url)
code = urlh.read()
res = self._parse_sig_swf(code)
else:
assert False, 'Invalid player type %r' % player_type
test_string = ''.join(map(compat_chr, range(len(example_sig))))
cache_res = res(test_string)
cache_spec = [ord(c) for c in cache_res]
self._downloader.cache.store('youtube-sigfuncs', func_id, cache_spec)
return res
def _print_sig_code(self, func, example_sig):
def gen_sig_code(idxs):
def _genslice(start, end, step):
starts = '' if start == 0 else str(start)
ends = (':%d' % (end + step)) if end + step >= 0 else ':'
steps = '' if step == 1 else (':%d' % step)
return 's[%s%s%s]' % (starts, ends, steps)
step = None
# Quelch pyflakes warnings - start will be set when step is set
start = '(Never used)'
for i, prev in zip(idxs[1:], idxs[:-1]):
if step is not None:
if i - prev == step:
continue
yield _genslice(start, prev, step)
step = None
continue
if i - prev in [-1, 1]:
step = i - prev
start = prev
continue
else:
yield 's[%d]' % prev
if step is None:
yield 's[%d]' % i
else:
yield _genslice(start, i, step)
test_string = ''.join(map(compat_chr, range(len(example_sig))))
cache_res = func(test_string)
cache_spec = [ord(c) for c in cache_res]
expr_code = ' + '.join(gen_sig_code(cache_spec))
signature_id_tuple = '(%s)' % (
', '.join(compat_str(len(p)) for p in example_sig.split('.')))
code = ('if tuple(len(p) for p in s.split(\'.\')) == %s:\n'
' return %s\n') % (signature_id_tuple, expr_code)
self.to_screen('Extracted signature function:\n' + code)
def _parse_sig_js(self, jscode):
funcname = self._search_regex(
r'\.sig\|\|([a-zA-Z0-9$]+)\(', jscode,
'Initial JS player signature function name')
jsi = JSInterpreter(jscode)
initial_function = jsi.extract_function(funcname)
return lambda s: initial_function([s])
def _parse_sig_swf(self, file_contents):
swfi = SWFInterpreter(file_contents)
TARGET_CLASSNAME = 'SignatureDecipher'
searched_class = swfi.extract_class(TARGET_CLASSNAME)
initial_function = swfi.extract_function(searched_class, 'decipher')
return lambda s: initial_function([s])
def _decrypt_signature(self, s, video_id, player_url, age_gate=False):
"""Turn the encrypted s field into a working signature"""
if player_url is None:
raise ExtractorError('Cannot decrypt signature without player_url')
if player_url.startswith('//'):
player_url = 'https:' + player_url
try:
player_id = (player_url, self._signature_cache_id(s))
if player_id not in self._player_cache:
func = self._extract_signature_function(
video_id, player_url, s
)
self._player_cache[player_id] = func
func = self._player_cache[player_id]
if self._downloader.params.get('youtube_print_sig_code'):
self._print_sig_code(func, s)
return func(s)
except Exception as e:
tb = traceback.format_exc()
raise ExtractorError(
'Signature extraction failed: ' + tb, cause=e)
def _get_subtitles(self, video_id, webpage):
try:
subs_doc = self._download_xml(
'https://video.google.com/timedtext?hl=en&type=list&v=%s' % video_id,
video_id, note=False)
except ExtractorError as err:
self._downloader.report_warning('unable to download video subtitles: %s' % compat_str(err))
return {}
sub_lang_list = {}
for track in subs_doc.findall('track'):
lang = track.attrib['lang_code']
if lang in sub_lang_list:
continue
sub_formats = []
for ext in ['sbv', 'vtt', 'srt']:
params = compat_urllib_parse.urlencode({
'lang': lang,
'v': video_id,
'fmt': ext,
'name': track.attrib['name'].encode('utf-8'),
})
sub_formats.append({
'url': 'https://www.youtube.com/api/timedtext?' + params,
'ext': ext,
})
sub_lang_list[lang] = sub_formats
if not sub_lang_list:
self._downloader.report_warning('video doesn\'t have subtitles')
return {}
return sub_lang_list
def _get_ytplayer_config(self, video_id, webpage):
patterns = (
# User data may contain arbitrary character sequences that may affect
# JSON extraction with regex, e.g. when '};' is contained the second
# regex won't capture the whole JSON. Yet working around by trying more
# concrete regex first keeping in mind proper quoted string handling
# to be implemented in future that will replace this workaround (see
# https://github.com/rg3/youtube-dl/issues/7468,
# https://github.com/rg3/youtube-dl/pull/7599)
r';ytplayer\.config\s*=\s*({.+?});ytplayer',
r';ytplayer\.config\s*=\s*({.+?});',
)
config = self._search_regex(
patterns, webpage, 'ytplayer.config', default=None)
if config:
return self._parse_json(
uppercase_escape(config), video_id, fatal=False)
def _get_automatic_captions(self, video_id, webpage):
"""We need the webpage for getting the captions url, pass it as an
argument to speed up the process."""
self.to_screen('%s: Looking for automatic captions' % video_id)
player_config = self._get_ytplayer_config(video_id, webpage)
err_msg = 'Couldn\'t find automatic captions for %s' % video_id
if not player_config:
self._downloader.report_warning(err_msg)
return {}
try:
args = player_config['args']
caption_url = args['ttsurl']
timestamp = args['timestamp']
# We get the available subtitles
list_params = compat_urllib_parse.urlencode({
'type': 'list',
'tlangs': 1,
'asrs': 1,
})
list_url = caption_url + '&' + list_params
caption_list = self._download_xml(list_url, video_id)
original_lang_node = caption_list.find('track')
if original_lang_node is None:
self._downloader.report_warning('Video doesn\'t have automatic captions')
return {}
original_lang = original_lang_node.attrib['lang_code']
caption_kind = original_lang_node.attrib.get('kind', '')
sub_lang_list = {}
for lang_node in caption_list.findall('target'):
sub_lang = lang_node.attrib['lang_code']
sub_formats = []
for ext in ['sbv', 'vtt', 'srt']:
params = compat_urllib_parse.urlencode({
'lang': original_lang,
'tlang': sub_lang,
'fmt': ext,
'ts': timestamp,
'kind': caption_kind,
})
sub_formats.append({
'url': caption_url + '&' + params,
'ext': ext,
})
sub_lang_list[sub_lang] = sub_formats
return sub_lang_list
# An extractor error can be raise by the download process if there are
# no automatic captions but there are subtitles
except (KeyError, ExtractorError):
self._downloader.report_warning(err_msg)
return {}
@classmethod
def extract_id(cls, url):
mobj = re.match(cls._VALID_URL, url, re.VERBOSE)
if mobj is None:
raise ExtractorError('Invalid URL: %s' % url)
video_id = mobj.group(2)
return video_id
def _extract_from_m3u8(self, manifest_url, video_id):
url_map = {}
def _get_urls(_manifest):
lines = _manifest.split('\n')
urls = filter(lambda l: l and not l.startswith('#'),
lines)
return urls
manifest = self._download_webpage(manifest_url, video_id, 'Downloading formats manifest')
formats_urls = _get_urls(manifest)
for format_url in formats_urls:
itag = self._search_regex(r'itag/(\d+?)/', format_url, 'itag')
url_map[itag] = format_url
return url_map
def _extract_annotations(self, video_id):
url = 'https://www.youtube.com/annotations_invideo?features=1&legacy=1&video_id=%s' % video_id
return self._download_webpage(url, video_id, note='Searching for annotations.', errnote='Unable to download video annotations.')
def _parse_dash_manifest(
self, video_id, dash_manifest_url, player_url, age_gate, fatal=True):
def decrypt_sig(mobj):
s = mobj.group(1)
dec_s = self._decrypt_signature(s, video_id, player_url, age_gate)
return '/signature/%s' % dec_s
dash_manifest_url = re.sub(r'/s/([a-fA-F0-9\.]+)', decrypt_sig, dash_manifest_url)
dash_doc = self._download_xml(
dash_manifest_url, video_id,
note='Downloading DASH manifest',
errnote='Could not download DASH manifest',
fatal=fatal)
if dash_doc is False:
return []
formats = []
for a in dash_doc.findall('.//{urn:mpeg:DASH:schema:MPD:2011}AdaptationSet'):
mime_type = a.attrib.get('mimeType')
for r in a.findall('{urn:mpeg:DASH:schema:MPD:2011}Representation'):
url_el = r.find('{urn:mpeg:DASH:schema:MPD:2011}BaseURL')
if url_el is None:
continue
if mime_type == 'text/vtt':
# TODO implement WebVTT downloading
pass
elif mime_type.startswith('audio/') or mime_type.startswith('video/'):
segment_list = r.find('{urn:mpeg:DASH:schema:MPD:2011}SegmentList')
format_id = r.attrib['id']
video_url = url_el.text
filesize = int_or_none(url_el.attrib.get('{http://youtube.com/yt/2012/10/10}contentLength'))
f = {
'format_id': format_id,
'url': video_url,
'width': int_or_none(r.attrib.get('width')),
'height': int_or_none(r.attrib.get('height')),
'tbr': int_or_none(r.attrib.get('bandwidth'), 1000),
'asr': int_or_none(r.attrib.get('audioSamplingRate')),
'filesize': filesize,
'fps': int_or_none(r.attrib.get('frameRate')),
}
if segment_list is not None:
f.update({
'initialization_url': segment_list.find('{urn:mpeg:DASH:schema:MPD:2011}Initialization').attrib['sourceURL'],
'segment_urls': [segment.attrib.get('media') for segment in segment_list.findall('{urn:mpeg:DASH:schema:MPD:2011}SegmentURL')],
'protocol': 'http_dash_segments',
})
try:
existing_format = next(
fo for fo in formats
if fo['format_id'] == format_id)
except StopIteration:
full_info = self._formats.get(format_id, {}).copy()
full_info.update(f)
codecs = r.attrib.get('codecs')
if codecs:
if full_info.get('acodec') == 'none' and 'vcodec' not in full_info:
full_info['vcodec'] = codecs
elif full_info.get('vcodec') == 'none' and 'acodec' not in full_info:
full_info['acodec'] = codecs
formats.append(full_info)
else:
existing_format.update(f)
else:
self.report_warning('Unknown MIME type %s in DASH manifest' % mime_type)
return formats
def _real_extract(self, url):
url, smuggled_data = unsmuggle_url(url, {})
proto = (
'http' if self._downloader.params.get('prefer_insecure', False)
else 'https')
start_time = None
end_time = None
parsed_url = compat_urllib_parse_urlparse(url)
for component in [parsed_url.fragment, parsed_url.query]:
query = compat_parse_qs(component)
if start_time is None and 't' in query:
start_time = parse_duration(query['t'][0])
if start_time is None and 'start' in query:
start_time = parse_duration(query['start'][0])
if end_time is None and 'end' in query:
end_time = parse_duration(query['end'][0])
# Extract original video URL from URL with redirection, like age verification, using next_url parameter
mobj = re.search(self._NEXT_URL_RE, url)
if mobj:
url = proto + '://www.youtube.com/' + compat_urllib_parse_unquote(mobj.group(1)).lstrip('/')
video_id = self.extract_id(url)
# Get video webpage
url = proto + '://www.youtube.com/watch?v=%s&gl=US&hl=en&has_verified=1&bpctr=9999999999' % video_id
video_webpage = self._download_webpage(url, video_id)
# Attempt to extract SWF player URL
mobj = re.search(r'swfConfig.*?"(https?:\\/\\/.*?watch.*?-.*?\.swf)"', video_webpage)
if mobj is not None:
player_url = re.sub(r'\\(.)', r'\1', mobj.group(1))
else:
player_url = None
dash_mpds = []
def add_dash_mpd(video_info):
dash_mpd = video_info.get('dashmpd')
if dash_mpd and dash_mpd[0] not in dash_mpds:
dash_mpds.append(dash_mpd[0])
# Get video info
embed_webpage = None
is_live = None
if re.search(r'player-age-gate-content">', video_webpage) is not None:
age_gate = True
# We simulate the access to the video from www.youtube.com/v/{video_id}
# this can be viewed without login into Youtube
url = proto + '://www.youtube.com/embed/%s' % video_id
embed_webpage = self._download_webpage(url, video_id, 'Downloading embed webpage')
data = compat_urllib_parse.urlencode({
'video_id': video_id,
'eurl': 'https://youtube.googleapis.com/v/' + video_id,
'sts': self._search_regex(
r'"sts"\s*:\s*(\d+)', embed_webpage, 'sts', default=''),
})
video_info_url = proto + '://www.youtube.com/get_video_info?' + data
video_info_webpage = self._download_webpage(
video_info_url, video_id,
note='Refetching age-gated info webpage',
errnote='unable to download video info webpage')
video_info = compat_parse_qs(video_info_webpage)
add_dash_mpd(video_info)
else:
age_gate = False
video_info = None
# Try looking directly into the video webpage
ytplayer_config = self._get_ytplayer_config(video_id, video_webpage)
if ytplayer_config:
args = ytplayer_config['args']
if args.get('url_encoded_fmt_stream_map'):
# Convert to the same format returned by compat_parse_qs
video_info = dict((k, [v]) for k, v in args.items())
add_dash_mpd(video_info)
if args.get('livestream') == '1' or args.get('live_playback') == 1:
is_live = True
if not video_info or self._downloader.params.get('youtube_include_dash_manifest', True):
# We also try looking in get_video_info since it may contain different dashmpd
# URL that points to a DASH manifest with possibly different itag set (some itags
# are missing from DASH manifest pointed by webpage's dashmpd, some - from DASH
# manifest pointed by get_video_info's dashmpd).
# The general idea is to take a union of itags of both DASH manifests (for example
# video with such 'manifest behavior' see https://github.com/rg3/youtube-dl/issues/6093)
self.report_video_info_webpage_download(video_id)
for el_type in ['&el=info', '&el=embedded', '&el=detailpage', '&el=vevo', '']:
video_info_url = (
'%s://www.youtube.com/get_video_info?&video_id=%s%s&ps=default&eurl=&gl=US&hl=en'
% (proto, video_id, el_type))
video_info_webpage = self._download_webpage(
video_info_url,
video_id, note=False,
errnote='unable to download video info webpage')
get_video_info = compat_parse_qs(video_info_webpage)
if get_video_info.get('use_cipher_signature') != ['True']:
add_dash_mpd(get_video_info)
if not video_info:
video_info = get_video_info
if 'token' in get_video_info:
# Different get_video_info requests may report different results, e.g.
# some may report video unavailability, but some may serve it without
# any complaint (see https://github.com/rg3/youtube-dl/issues/7362,
# the original webpage as well as el=info and el=embedded get_video_info
# requests report video unavailability due to geo restriction while
# el=detailpage succeeds and returns valid data). This is probably
# due to YouTube measures against IP ranges of hosting providers.
# Working around by preferring the first succeeded video_info containing
# the token if no such video_info yet was found.
if 'token' not in video_info:
video_info = get_video_info
break
if 'token' not in video_info:
if 'reason' in video_info:
if 'The uploader has not made this video available in your country.' in video_info['reason']:
regions_allowed = self._html_search_meta('regionsAllowed', video_webpage, default=None)
if regions_allowed:
raise ExtractorError('YouTube said: This video is available in %s only' % (
', '.join(map(ISO3166Utils.short2full, regions_allowed.split(',')))),
expected=True)
raise ExtractorError(
'YouTube said: %s' % video_info['reason'][0],
expected=True, video_id=video_id)
else:
raise ExtractorError(
'"token" parameter not in video info for unknown reason',
video_id=video_id)
# title
if 'title' in video_info:
video_title = video_info['title'][0]
else:
self._downloader.report_warning('Unable to extract video title')
video_title = '_'
# description
video_description = get_element_by_id("eow-description", video_webpage)
if video_description:
video_description = re.sub(r'''(?x)
<a\s+
(?:[a-zA-Z-]+="[^"]+"\s+)*?
title="([^"]+)"\s+
(?:[a-zA-Z-]+="[^"]+"\s+)*?
class="yt-uix-redirect-link"\s*>
[^<]+
</a>
''', r'\1', video_description)
video_description = clean_html(video_description)
else:
fd_mobj = re.search(r'<meta name="description" content="([^"]+)"', video_webpage)
if fd_mobj:
video_description = unescapeHTML(fd_mobj.group(1))
else:
video_description = ''
if 'multifeed_metadata_list' in video_info and not smuggled_data.get('force_singlefeed', False):
if not self._downloader.params.get('noplaylist'):
entries = []
feed_ids = []
multifeed_metadata_list = compat_urllib_parse_unquote_plus(video_info['multifeed_metadata_list'][0])
for feed in multifeed_metadata_list.split(','):
feed_data = compat_parse_qs(feed)
entries.append({
'_type': 'url_transparent',
'ie_key': 'Youtube',
'url': smuggle_url(
'%s://www.youtube.com/watch?v=%s' % (proto, feed_data['id'][0]),
{'force_singlefeed': True}),
'title': '%s (%s)' % (video_title, feed_data['title'][0]),
})
feed_ids.append(feed_data['id'][0])
self.to_screen(
'Downloading multifeed video (%s) - add --no-playlist to just download video %s'
% (', '.join(feed_ids), video_id))
return self.playlist_result(entries, video_id, video_title, video_description)
self.to_screen('Downloading just video %s because of --no-playlist' % video_id)
if 'view_count' in video_info:
view_count = int(video_info['view_count'][0])
else:
view_count = None
# Check for "rental" videos
if 'ypc_video_rental_bar_text' in video_info and 'author' not in video_info:
raise ExtractorError('"rental" videos not supported')
# Start extracting information
self.report_information_extraction(video_id)
# uploader
if 'author' not in video_info:
raise ExtractorError('Unable to extract uploader name')
video_uploader = compat_urllib_parse_unquote_plus(video_info['author'][0])
# uploader_id
video_uploader_id = None
mobj = re.search(r'<link itemprop="url" href="http://www.youtube.com/(?:user|channel)/([^"]+)">', video_webpage)
if mobj is not None:
video_uploader_id = mobj.group(1)
else:
self._downloader.report_warning('unable to extract uploader nickname')
# thumbnail image
# We try first to get a high quality image:
m_thumb = re.search(r'<span itemprop="thumbnail".*?href="(.*?)">',
video_webpage, re.DOTALL)
if m_thumb is not None:
video_thumbnail = m_thumb.group(1)
elif 'thumbnail_url' not in video_info:
self._downloader.report_warning('unable to extract video thumbnail')
video_thumbnail = None
else: # don't panic if we can't find it
video_thumbnail = compat_urllib_parse_unquote_plus(video_info['thumbnail_url'][0])
# upload date
upload_date = self._html_search_meta(
'datePublished', video_webpage, 'upload date', default=None)
if not upload_date:
upload_date = self._search_regex(
[r'(?s)id="eow-date.*?>(.*?)</span>',
r'id="watch-uploader-info".*?>.*?(?:Published|Uploaded|Streamed live|Started) on (.+?)</strong>'],
video_webpage, 'upload date', default=None)
if upload_date:
upload_date = ' '.join(re.sub(r'[/,-]', r' ', mobj.group(1)).split())
upload_date = unified_strdate(upload_date)
m_cat_container = self._search_regex(
r'(?s)<h4[^>]*>\s*Category\s*</h4>\s*<ul[^>]*>(.*?)</ul>',
video_webpage, 'categories', default=None)
if m_cat_container:
category = self._html_search_regex(
r'(?s)<a[^<]+>(.*?)</a>', m_cat_container, 'category',
default=None)
video_categories = None if category is None else [category]
else:
video_categories = None
video_tags = [
unescapeHTML(m.group('content'))
for m in re.finditer(self._meta_regex('og:video:tag'), video_webpage)]
def _extract_count(count_name):
return str_to_int(self._search_regex(
r'-%s-button[^>]+><span[^>]+class="yt-uix-button-content"[^>]*>([\d,]+)</span>'
% re.escape(count_name),
video_webpage, count_name, default=None))
like_count = _extract_count('like')
dislike_count = _extract_count('dislike')
# subtitles
video_subtitles = self.extract_subtitles(video_id, video_webpage)
automatic_captions = self.extract_automatic_captions(video_id, video_webpage)
if 'length_seconds' not in video_info:
self._downloader.report_warning('unable to extract video duration')
video_duration = None
else:
video_duration = int(compat_urllib_parse_unquote_plus(video_info['length_seconds'][0]))
# annotations
video_annotations = None
if self._downloader.params.get('writeannotations', False):
video_annotations = self._extract_annotations(video_id)
def _map_to_format_list(urlmap):
formats = []
for itag, video_real_url in urlmap.items():
dct = {
'format_id': itag,
'url': video_real_url,
'player_url': player_url,
}
if itag in self._formats:
dct.update(self._formats[itag])
formats.append(dct)
return formats
if 'conn' in video_info and video_info['conn'][0].startswith('rtmp'):
self.report_rtmp_download()
formats = [{
'format_id': '_rtmp',
'protocol': 'rtmp',
'url': video_info['conn'][0],
'player_url': player_url,
}]
elif len(video_info.get('url_encoded_fmt_stream_map', [''])[0]) >= 1 or len(video_info.get('adaptive_fmts', [''])[0]) >= 1:
encoded_url_map = video_info.get('url_encoded_fmt_stream_map', [''])[0] + ',' + video_info.get('adaptive_fmts', [''])[0]
if 'rtmpe%3Dyes' in encoded_url_map:
raise ExtractorError('rtmpe downloads are not supported, see https://github.com/rg3/youtube-dl/issues/343 for more information.', expected=True)
formats = []
for url_data_str in encoded_url_map.split(','):
url_data = compat_parse_qs(url_data_str)
if 'itag' not in url_data or 'url' not in url_data:
continue
format_id = url_data['itag'][0]
url = url_data['url'][0]
if 'sig' in url_data:
url += '&signature=' + url_data['sig'][0]
elif 's' in url_data:
encrypted_sig = url_data['s'][0]
ASSETS_RE = r'"assets":.+?"js":\s*("[^"]+")'
jsplayer_url_json = self._search_regex(
ASSETS_RE,
embed_webpage if age_gate else video_webpage,
'JS player URL (1)', default=None)
if not jsplayer_url_json and not age_gate:
# We need the embed website after all
if embed_webpage is None:
embed_url = proto + '://www.youtube.com/embed/%s' % video_id
embed_webpage = self._download_webpage(
embed_url, video_id, 'Downloading embed webpage')
jsplayer_url_json = self._search_regex(
ASSETS_RE, embed_webpage, 'JS player URL')
player_url = json.loads(jsplayer_url_json)
if player_url is None:
player_url_json = self._search_regex(
r'ytplayer\.config.*?"url"\s*:\s*("[^"]+")',
video_webpage, 'age gate player URL')
player_url = json.loads(player_url_json)
if self._downloader.params.get('verbose'):
if player_url is None:
player_version = 'unknown'
player_desc = 'unknown'
else:
if player_url.endswith('swf'):
player_version = self._search_regex(
r'-(.+?)(?:/watch_as3)?\.swf$', player_url,
'flash player', fatal=False)
player_desc = 'flash player %s' % player_version
else:
player_version = self._search_regex(
[r'html5player-([^/]+?)(?:/html5player(?:-new)?)?\.js', r'(?:www|player)-([^/]+)/base\.js'],
player_url,
'html5 player', fatal=False)
player_desc = 'html5 player %s' % player_version
parts_sizes = self._signature_cache_id(encrypted_sig)
self.to_screen('{%s} signature length %s, %s' %
(format_id, parts_sizes, player_desc))
signature = self._decrypt_signature(
encrypted_sig, video_id, player_url, age_gate)
url += '&signature=' + signature
if 'ratebypass' not in url:
url += '&ratebypass=yes'
# Some itags are not included in DASH manifest thus corresponding formats will
# lack metadata (see https://github.com/rg3/youtube-dl/pull/5993).
# Trying to extract metadata from url_encoded_fmt_stream_map entry.
mobj = re.search(r'^(?P<width>\d+)[xX](?P<height>\d+)$', url_data.get('size', [''])[0])
width, height = (int(mobj.group('width')), int(mobj.group('height'))) if mobj else (None, None)
dct = {
'format_id': format_id,
'url': url,
'player_url': player_url,
'filesize': int_or_none(url_data.get('clen', [None])[0]),
'tbr': float_or_none(url_data.get('bitrate', [None])[0], 1000),
'width': width,
'height': height,
'fps': int_or_none(url_data.get('fps', [None])[0]),
'format_note': url_data.get('quality_label', [None])[0] or url_data.get('quality', [None])[0],
}
type_ = url_data.get('type', [None])[0]
if type_:
type_split = type_.split(';')
kind_ext = type_split[0].split('/')
if len(kind_ext) == 2:
kind, ext = kind_ext
dct['ext'] = ext
if kind in ('audio', 'video'):
codecs = None
for mobj in re.finditer(
r'(?P<key>[a-zA-Z_-]+)=(?P<quote>["\']?)(?P<val>.+?)(?P=quote)(?:;|$)', type_):
if mobj.group('key') == 'codecs':
codecs = mobj.group('val')
break
if codecs:
codecs = codecs.split(',')
if len(codecs) == 2:
acodec, vcodec = codecs[0], codecs[1]
else:
acodec, vcodec = (codecs[0], 'none') if kind == 'audio' else ('none', codecs[0])
dct.update({
'acodec': acodec,
'vcodec': vcodec,
})
if format_id in self._formats:
dct.update(self._formats[format_id])
formats.append(dct)
elif video_info.get('hlsvp'):
manifest_url = video_info['hlsvp'][0]
url_map = self._extract_from_m3u8(manifest_url, video_id)
formats = _map_to_format_list(url_map)
else:
raise ExtractorError('no conn, hlsvp or url_encoded_fmt_stream_map information found in video info')
# Look for the DASH manifest
if self._downloader.params.get('youtube_include_dash_manifest', True):
dash_mpd_fatal = True
for dash_manifest_url in dash_mpds:
dash_formats = {}
try:
for df in self._parse_dash_manifest(
video_id, dash_manifest_url, player_url, age_gate, dash_mpd_fatal):
# Do not overwrite DASH format found in some previous DASH manifest
if df['format_id'] not in dash_formats:
dash_formats[df['format_id']] = df
# Additional DASH manifests may end up in HTTP Error 403 therefore
# allow them to fail without bug report message if we already have
# some DASH manifest succeeded. This is temporary workaround to reduce
# burst of bug reports until we figure out the reason and whether it
# can be fixed at all.
dash_mpd_fatal = False
except (ExtractorError, KeyError) as e:
self.report_warning(
'Skipping DASH manifest: %r' % e, video_id)
if dash_formats:
# Remove the formats we found through non-DASH, they
# contain less info and it can be wrong, because we use
# fixed values (for example the resolution). See
# https://github.com/rg3/youtube-dl/issues/5774 for an
# example.
formats = [f for f in formats if f['format_id'] not in dash_formats.keys()]
formats.extend(dash_formats.values())
# Check for malformed aspect ratio
stretched_m = re.search(
r'<meta\s+property="og:video:tag".*?content="yt:stretch=(?P<w>[0-9]+):(?P<h>[0-9]+)">',
video_webpage)
if stretched_m:
ratio = float(stretched_m.group('w')) / float(stretched_m.group('h'))
for f in formats:
if f.get('vcodec') != 'none':
f['stretched_ratio'] = ratio
self._sort_formats(formats)
return {
'id': video_id,
'uploader': video_uploader,
'uploader_id': video_uploader_id,
'upload_date': upload_date,
'title': video_title,
'thumbnail': video_thumbnail,
'description': video_description,
'categories': video_categories,
'tags': video_tags,
'subtitles': video_subtitles,
'automatic_captions': automatic_captions,
'duration': video_duration,
'age_limit': 18 if age_gate else 0,
'annotations': video_annotations,
'webpage_url': proto + '://www.youtube.com/watch?v=%s' % video_id,
'view_count': view_count,
'like_count': like_count,
'dislike_count': dislike_count,
'average_rating': float_or_none(video_info.get('avg_rating', [None])[0]),
'formats': formats,
'is_live': is_live,
'start_time': start_time,
'end_time': end_time,
}
class YoutubePlaylistIE(YoutubeBaseInfoExtractor, YoutubePlaylistBaseInfoExtractor):
IE_DESC = 'YouTube.com playlists'
_VALID_URL = r"""(?x)(?:
(?:https?://)?
(?:\w+\.)?
youtube\.com/
(?:
(?:course|view_play_list|my_playlists|artist|playlist|watch|embed/videoseries)
\? (?:.*?&)*? (?:p|a|list)=
| p/
)
(
(?:PL|LL|EC|UU|FL|RD|UL)?[0-9A-Za-z-_]{10,}
# Top tracks, they can also include dots
|(?:MC)[\w\.]*
)
.*
|
((?:PL|LL|EC|UU|FL|RD|UL)[0-9A-Za-z-_]{10,})
)"""
_TEMPLATE_URL = 'https://www.youtube.com/playlist?list=%s'
_VIDEO_RE = r'href="\s*/watch\?v=(?P<id>[0-9A-Za-z_-]{11})&[^"]*?index=(?P<index>\d+)(?:[^>]+>(?P<title>[^<]+))?'
IE_NAME = 'youtube:playlist'
_TESTS = [{
'url': 'https://www.youtube.com/playlist?list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re',
'info_dict': {
'title': 'ytdl test PL',
'id': 'PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re',
},
'playlist_count': 3,
}, {
'url': 'https://www.youtube.com/playlist?list=PLtPgu7CB4gbZDA7i_euNxn75ISqxwZPYx',
'info_dict': {
'id': 'PLtPgu7CB4gbZDA7i_euNxn75ISqxwZPYx',
'title': 'YDL_Empty_List',
},
'playlist_count': 0,
}, {
'note': 'Playlist with deleted videos (#651). As a bonus, the video #51 is also twice in this list.',
'url': 'https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC',
'info_dict': {
'title': '29C3: Not my department',
'id': 'PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC',
},
'playlist_count': 95,
}, {
'note': 'issue #673',
'url': 'PLBB231211A4F62143',
'info_dict': {
'title': '[OLD]Team Fortress 2 (Class-based LP)',
'id': 'PLBB231211A4F62143',
},
'playlist_mincount': 26,
}, {
'note': 'Large playlist',
'url': 'https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q',
'info_dict': {
'title': 'Uploads from Cauchemar',
'id': 'UUBABnxM4Ar9ten8Mdjj1j0Q',
},
'playlist_mincount': 799,
}, {
'url': 'PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl',
'info_dict': {
'title': 'YDL_safe_search',
'id': 'PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl',
},
'playlist_count': 2,
}, {
'note': 'embedded',
'url': 'http://www.youtube.com/embed/videoseries?list=PL6IaIsEjSbf96XFRuNccS_RuEXwNdsoEu',
'playlist_count': 4,
'info_dict': {
'title': 'JODA15',
'id': 'PL6IaIsEjSbf96XFRuNccS_RuEXwNdsoEu',
}
}, {
'note': 'Embedded SWF player',
'url': 'http://www.youtube.com/p/YN5VISEtHet5D4NEvfTd0zcgFk84NqFZ?hl=en_US&fs=1&rel=0',
'playlist_count': 4,
'info_dict': {
'title': 'JODA7',
'id': 'YN5VISEtHet5D4NEvfTd0zcgFk84NqFZ',
}
}, {
'note': 'Buggy playlist: the webpage has a "Load more" button but it doesn\'t have more videos',
'url': 'https://www.youtube.com/playlist?list=UUXw-G3eDE9trcvY2sBMM_aA',
'info_dict': {
'title': 'Uploads from Interstellar Movie',
'id': 'UUXw-G3eDE9trcvY2sBMM_aA',
},
'playlist_mincout': 21,
}]
def _real_initialize(self):
self._login()
def _extract_mix(self, playlist_id):
# The mixes are generated from a single video
# the id of the playlist is just 'RD' + video_id
url = 'https://youtube.com/watch?v=%s&list=%s' % (playlist_id[-11:], playlist_id)
webpage = self._download_webpage(
url, playlist_id, 'Downloading Youtube mix')
search_title = lambda class_name: get_element_by_attribute('class', class_name, webpage)
title_span = (
search_title('playlist-title') or
search_title('title long-title') or
search_title('title'))
title = clean_html(title_span)
ids = orderedSet(re.findall(
r'''(?xs)data-video-username=".*?".*?
href="/watch\?v=([0-9A-Za-z_-]{11})&[^"]*?list=%s''' % re.escape(playlist_id),
webpage))
url_results = self._ids_to_results(ids)
return self.playlist_result(url_results, playlist_id, title)
def _extract_playlist(self, playlist_id):
url = self._TEMPLATE_URL % playlist_id
page = self._download_webpage(url, playlist_id)
for match in re.findall(r'<div class="yt-alert-message">([^<]+)</div>', page):
match = match.strip()
# Check if the playlist exists or is private
if re.match(r'[^<]*(The|This) playlist (does not exist|is private)[^<]*', match):
raise ExtractorError(
'The playlist doesn\'t exist or is private, use --username or '
'--netrc to access it.',
expected=True)
elif re.match(r'[^<]*Invalid parameters[^<]*', match):
raise ExtractorError(
'Invalid parameters. Maybe URL is incorrect.',
expected=True)
elif re.match(r'[^<]*Choose your language[^<]*', match):
continue
else:
self.report_warning('Youtube gives an alert message: ' + match)
playlist_title = self._html_search_regex(
r'(?s)<h1 class="pl-header-title[^"]*"[^>]*>\s*(.*?)\s*</h1>',
page, 'title')
return self.playlist_result(self._entries(page, playlist_id), playlist_id, playlist_title)
def _real_extract(self, url):
# Extract playlist id
mobj = re.match(self._VALID_URL, url)
if mobj is None:
raise ExtractorError('Invalid URL: %s' % url)
playlist_id = mobj.group(1) or mobj.group(2)
# Check if it's a video-specific URL
query_dict = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
if 'v' in query_dict:
video_id = query_dict['v'][0]
if self._downloader.params.get('noplaylist'):
self.to_screen('Downloading just video %s because of --no-playlist' % video_id)
return self.url_result(video_id, 'Youtube', video_id=video_id)
else:
self.to_screen('Downloading playlist %s - add --no-playlist to just download video %s' % (playlist_id, video_id))
if playlist_id.startswith('RD') or playlist_id.startswith('UL'):
# Mixes require a custom extraction process
return self._extract_mix(playlist_id)
return self._extract_playlist(playlist_id)
class YoutubeChannelIE(YoutubePlaylistBaseInfoExtractor):
IE_DESC = 'YouTube.com channels'
_VALID_URL = r'https?://(?:youtu\.be|(?:\w+\.)?youtube(?:-nocookie)?\.com)/channel/(?P<id>[0-9A-Za-z_-]+)'
_TEMPLATE_URL = 'https://www.youtube.com/channel/%s/videos'
_VIDEO_RE = r'(?:title="(?P<title>[^"]+)"[^>]+)?href="/watch\?v=(?P<id>[0-9A-Za-z_-]+)&?'
IE_NAME = 'youtube:channel'
_TESTS = [{
'note': 'paginated channel',
'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w',
'playlist_mincount': 91,
'info_dict': {
'id': 'UUKfVa3S1e4PHvxWcwyMMg8w',
'title': 'Uploads from lex will',
}
}, {
'note': 'Age restricted channel',
# from https://www.youtube.com/user/DeusExOfficial
'url': 'https://www.youtube.com/channel/UCs0ifCMCm1icqRbqhUINa0w',
'playlist_mincount': 64,
'info_dict': {
'id': 'UUs0ifCMCm1icqRbqhUINa0w',
'title': 'Uploads from Deus Ex',
},
}]
def _real_extract(self, url):
channel_id = self._match_id(url)
url = self._TEMPLATE_URL % channel_id
# Channel by page listing is restricted to 35 pages of 30 items, i.e. 1050 videos total (see #5778)
# Workaround by extracting as a playlist if managed to obtain channel playlist URL
# otherwise fallback on channel by page extraction
channel_page = self._download_webpage(
url + '?view=57', channel_id,
'Downloading channel page', fatal=False)
if channel_page is False:
channel_playlist_id = False
else:
channel_playlist_id = self._html_search_meta(
'channelId', channel_page, 'channel id', default=None)
if not channel_playlist_id:
channel_playlist_id = self._search_regex(
r'data-(?:channel-external-|yt)id="([^"]+)"',
channel_page, 'channel id', default=None)
if channel_playlist_id and channel_playlist_id.startswith('UC'):
playlist_id = 'UU' + channel_playlist_id[2:]
return self.url_result(
compat_urlparse.urljoin(url, '/playlist?list=%s' % playlist_id), 'YoutubePlaylist')
channel_page = self._download_webpage(url, channel_id, 'Downloading page #1')
autogenerated = re.search(r'''(?x)
class="[^"]*?(?:
channel-header-autogenerated-label|
yt-channel-title-autogenerated
)[^"]*"''', channel_page) is not None
if autogenerated:
# The videos are contained in a single page
# the ajax pages can't be used, they are empty
entries = [
self.url_result(
video_id, 'Youtube', video_id=video_id,
video_title=video_title)
for video_id, video_title in self.extract_videos_from_page(channel_page)]
return self.playlist_result(entries, channel_id)
return self.playlist_result(self._entries(channel_page, channel_id), channel_id)
class YoutubeUserIE(YoutubeChannelIE):
IE_DESC = 'YouTube.com user videos (URL or "ytuser" keyword)'
_VALID_URL = r'(?:(?:(?:https?://)?(?:\w+\.)?youtube\.com/(?:user/)?(?!(?:attribution_link|watch|results)(?:$|[^a-z_A-Z0-9-])))|ytuser:)(?!feed/)(?P<id>[A-Za-z0-9_-]+)'
_TEMPLATE_URL = 'https://www.youtube.com/user/%s/videos'
IE_NAME = 'youtube:user'
_TESTS = [{
'url': 'https://www.youtube.com/user/TheLinuxFoundation',
'playlist_mincount': 320,
'info_dict': {
'title': 'TheLinuxFoundation',
}
}, {
'url': 'ytuser:phihag',
'only_matching': True,
}]
@classmethod
def suitable(cls, url):
# Don't return True if the url can be extracted with other youtube
# extractor, the regex would is too permissive and it would match.
other_ies = iter(klass for (name, klass) in globals().items() if name.endswith('IE') and klass is not cls)
if any(ie.suitable(url) for ie in other_ies):
return False
else:
return super(YoutubeUserIE, cls).suitable(url)
class YoutubeUserPlaylistsIE(YoutubePlaylistsBaseInfoExtractor):
IE_DESC = 'YouTube.com user playlists'
_VALID_URL = r'https?://(?:\w+\.)?youtube\.com/user/(?P<id>[^/]+)/playlists'
IE_NAME = 'youtube:user:playlists'
_TESTS = [{
'url': 'http://www.youtube.com/user/ThirstForScience/playlists',
'playlist_mincount': 4,
'info_dict': {
'id': 'ThirstForScience',
'title': 'Thirst for Science',
},
}, {
# with "Load more" button
'url': 'http://www.youtube.com/user/igorkle1/playlists?view=1&sort=dd',
'playlist_mincount': 70,
'info_dict': {
'id': 'igorkle1',
'title': 'Игорь Клейнер',
},
}]
class YoutubeSearchIE(SearchInfoExtractor, YoutubePlaylistIE):
IE_DESC = 'YouTube.com searches'
# there doesn't appear to be a real limit, for example if you search for
# 'python' you get more than 8.000.000 results
_MAX_RESULTS = float('inf')
IE_NAME = 'youtube:search'
_SEARCH_KEY = 'ytsearch'
_EXTRA_QUERY_ARGS = {}
_TESTS = []
def _get_n_results(self, query, n):
"""Get a specified number of results for a query"""
videos = []
limit = n
for pagenum in itertools.count(1):
url_query = {
'search_query': query.encode('utf-8'),
'page': pagenum,
'spf': 'navigate',
}
url_query.update(self._EXTRA_QUERY_ARGS)
result_url = 'https://www.youtube.com/results?' + compat_urllib_parse.urlencode(url_query)
data = self._download_json(
result_url, video_id='query "%s"' % query,
note='Downloading page %s' % pagenum,
errnote='Unable to download API page')
html_content = data[1]['body']['content']
if 'class="search-message' in html_content:
raise ExtractorError(
'[youtube] No video results', expected=True)
new_videos = self._ids_to_results(orderedSet(re.findall(
r'href="/watch\?v=(.{11})', html_content)))
videos += new_videos
if not new_videos or len(videos) > limit:
break
if len(videos) > n:
videos = videos[:n]
return self.playlist_result(videos, query)
class YoutubeSearchDateIE(YoutubeSearchIE):
IE_NAME = YoutubeSearchIE.IE_NAME + ':date'
_SEARCH_KEY = 'ytsearchdate'
IE_DESC = 'YouTube.com searches, newest videos first'
_EXTRA_QUERY_ARGS = {'search_sort': 'video_date_uploaded'}
class YoutubeSearchURLIE(InfoExtractor):
IE_DESC = 'YouTube.com search URLs'
IE_NAME = 'youtube:search_url'
_VALID_URL = r'https?://(?:www\.)?youtube\.com/results\?(.*?&)?search_query=(?P<query>[^&]+)(?:[&]|$)'
_TESTS = [{
'url': 'https://www.youtube.com/results?baz=bar&search_query=youtube-dl+test+video&filters=video&lclk=video',
'playlist_mincount': 5,
'info_dict': {
'title': 'youtube-dl test video',
}
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
query = compat_urllib_parse_unquote_plus(mobj.group('query'))
webpage = self._download_webpage(url, query)
result_code = self._search_regex(
r'(?s)<ol[^>]+class="item-section"(.*?)</ol>', webpage, 'result HTML')
part_codes = re.findall(
r'(?s)<h3[^>]+class="[^"]*yt-lockup-title[^"]*"[^>]*>(.*?)</h3>', result_code)
entries = []
for part_code in part_codes:
part_title = self._html_search_regex(
[r'(?s)title="([^"]+)"', r'>([^<]+)</a>'], part_code, 'item title', fatal=False)
part_url_snippet = self._html_search_regex(
r'(?s)href="([^"]+)"', part_code, 'item URL')
part_url = compat_urlparse.urljoin(
'https://www.youtube.com/', part_url_snippet)
entries.append({
'_type': 'url',
'url': part_url,
'title': part_title,
})
return {
'_type': 'playlist',
'entries': entries,
'title': query,
}
class YoutubeShowIE(YoutubePlaylistsBaseInfoExtractor):
IE_DESC = 'YouTube.com (multi-season) shows'
_VALID_URL = r'https?://www\.youtube\.com/show/(?P<id>[^?#]*)'
IE_NAME = 'youtube:show'
_TESTS = [{
'url': 'https://www.youtube.com/show/airdisasters',
'playlist_mincount': 5,
'info_dict': {
'id': 'airdisasters',
'title': 'Air Disasters',
}
}]
def _real_extract(self, url):
playlist_id = self._match_id(url)
return super(YoutubeShowIE, self)._real_extract(
'https://www.youtube.com/show/%s/playlists' % playlist_id)
class YoutubeFeedsInfoExtractor(YoutubeBaseInfoExtractor):
"""
Base class for feed extractors
Subclasses must define the _FEED_NAME and _PLAYLIST_TITLE properties.
"""
_LOGIN_REQUIRED = True
@property
def IE_NAME(self):
return 'youtube:%s' % self._FEED_NAME
def _real_initialize(self):
self._login()
def _real_extract(self, url):
page = self._download_webpage(
'https://www.youtube.com/feed/%s' % self._FEED_NAME, self._PLAYLIST_TITLE)
# The extraction process is the same as for playlists, but the regex
# for the video ids doesn't contain an index
ids = []
more_widget_html = content_html = page
for page_num in itertools.count(1):
matches = re.findall(r'href="\s*/watch\?v=([0-9A-Za-z_-]{11})', content_html)
# 'recommended' feed has infinite 'load more' and each new portion spins
# the same videos in (sometimes) slightly different order, so we'll check
# for unicity and break when portion has no new videos
new_ids = filter(lambda video_id: video_id not in ids, orderedSet(matches))
if not new_ids:
break
ids.extend(new_ids)
mobj = re.search(r'data-uix-load-more-href="/?(?P<more>[^"]+)"', more_widget_html)
if not mobj:
break
more = self._download_json(
'https://youtube.com/%s' % mobj.group('more'), self._PLAYLIST_TITLE,
'Downloading page #%s' % page_num,
transform_source=uppercase_escape)
content_html = more['content_html']
more_widget_html = more['load_more_widget_html']
return self.playlist_result(
self._ids_to_results(ids), playlist_title=self._PLAYLIST_TITLE)
class YoutubeWatchLaterIE(YoutubePlaylistIE):
IE_NAME = 'youtube:watchlater'
IE_DESC = 'Youtube watch later list, ":ytwatchlater" for short (requires authentication)'
_VALID_URL = r'https?://www\.youtube\.com/(?:feed/watch_later|playlist\?list=WL)|:ytwatchlater'
_TESTS = [] # override PlaylistIE tests
def _real_extract(self, url):
return self._extract_playlist('WL')
class YoutubeFavouritesIE(YoutubeBaseInfoExtractor):
IE_NAME = 'youtube:favorites'
IE_DESC = 'YouTube.com favourite videos, ":ytfav" for short (requires authentication)'
_VALID_URL = r'https?://www\.youtube\.com/my_favorites|:ytfav(?:ou?rites)?'
_LOGIN_REQUIRED = True
def _real_extract(self, url):
webpage = self._download_webpage('https://www.youtube.com/my_favorites', 'Youtube Favourites videos')
playlist_id = self._search_regex(r'list=(.+?)["&]', webpage, 'favourites playlist id')
return self.url_result(playlist_id, 'YoutubePlaylist')
class YoutubeRecommendedIE(YoutubeFeedsInfoExtractor):
IE_DESC = 'YouTube.com recommended videos, ":ytrec" for short (requires authentication)'
_VALID_URL = r'https?://www\.youtube\.com/feed/recommended|:ytrec(?:ommended)?'
_FEED_NAME = 'recommended'
_PLAYLIST_TITLE = 'Youtube Recommended videos'
class YoutubeSubscriptionsIE(YoutubeFeedsInfoExtractor):
IE_DESC = 'YouTube.com subscriptions feed, "ytsubs" keyword (requires authentication)'
_VALID_URL = r'https?://www\.youtube\.com/feed/subscriptions|:ytsubs(?:criptions)?'
_FEED_NAME = 'subscriptions'
_PLAYLIST_TITLE = 'Youtube Subscriptions'
class YoutubeHistoryIE(YoutubeFeedsInfoExtractor):
IE_DESC = 'Youtube watch history, ":ythistory" for short (requires authentication)'
_VALID_URL = 'https?://www\.youtube\.com/feed/history|:ythistory'
_FEED_NAME = 'history'
_PLAYLIST_TITLE = 'Youtube History'
class YoutubeTruncatedURLIE(InfoExtractor):
IE_NAME = 'youtube:truncated_url'
IE_DESC = False # Do not list
_VALID_URL = r'''(?x)
(?:https?://)?
(?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie)?\.com/
(?:watch\?(?:
feature=[a-z_]+|
annotation_id=annotation_[^&]+|
x-yt-cl=[0-9]+|
hl=[^&]*|
t=[0-9]+
)?
|
attribution_link\?a=[^&]+
)
$
'''
_TESTS = [{
'url': 'http://www.youtube.com/watch?annotation_id=annotation_3951667041',
'only_matching': True,
}, {
'url': 'http://www.youtube.com/watch?',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?x-yt-cl=84503534',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?feature=foo',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?hl=en-GB',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?t=2372',
'only_matching': True,
}]
def _real_extract(self, url):
raise ExtractorError(
'Did you forget to quote the URL? Remember that & is a meta '
'character in most shells, so you want to put the URL in quotes, '
'like youtube-dl '
'"http://www.youtube.com/watch?feature=foo&v=BaW_jenozKc" '
' or simply youtube-dl BaW_jenozKc .',
expected=True)
class YoutubeTruncatedIDIE(InfoExtractor):
IE_NAME = 'youtube:truncated_id'
IE_DESC = False # Do not list
_VALID_URL = r'https?://(?:www\.)?youtube\.com/watch\?v=(?P<id>[0-9A-Za-z_-]{1,10})$'
_TESTS = [{
'url': 'https://www.youtube.com/watch?v=N_708QY7Ob',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
raise ExtractorError(
'Incomplete YouTube ID %s. URL %s looks truncated.' % (video_id, url),
expected=True)
| unlicense |
fnordahl/nova | nova/api/openstack/compute/baremetal_nodes.py | 23 | 6315 | # Copyright (c) 2013 NTT DOCOMO, INC.
# Copyright 2014 IBM Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The bare-metal admin extension."""
from oslo_config import cfg
from oslo_utils import importutils
import webob
from nova.api.openstack import common
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.i18n import _
ironic_client = importutils.try_import('ironicclient.client')
ironic_exc = importutils.try_import('ironicclient.exc')
CONF = cfg.CONF
ALIAS = "os-baremetal-nodes"
authorize = extensions.os_compute_authorizer(ALIAS)
node_fields = ['id', 'cpus', 'local_gb', 'memory_mb', 'pm_address',
'pm_user', 'service_host', 'terminal_port', 'instance_uuid']
node_ext_fields = ['uuid', 'task_state', 'updated_at', 'pxe_config_path']
interface_fields = ['id', 'address', 'datapath_id', 'port_no']
CONF.import_opt('api_version',
'nova.virt.ironic.driver',
group='ironic')
CONF.import_opt('api_endpoint',
'nova.virt.ironic.driver',
group='ironic')
CONF.import_opt('admin_username',
'nova.virt.ironic.driver',
group='ironic')
CONF.import_opt('admin_password',
'nova.virt.ironic.driver',
group='ironic')
CONF.import_opt('admin_tenant_name',
'nova.virt.ironic.driver',
group='ironic')
CONF.import_opt('compute_driver', 'nova.virt.driver')
def _check_ironic_client_enabled():
"""Check whether Ironic is installed or not."""
if ironic_client is None:
common.raise_feature_not_supported()
def _get_ironic_client():
"""return an Ironic client."""
# TODO(NobodyCam): Fix insecure setting
kwargs = {'os_username': CONF.ironic.admin_username,
'os_password': CONF.ironic.admin_password,
'os_auth_url': CONF.ironic.admin_url,
'os_tenant_name': CONF.ironic.admin_tenant_name,
'os_service_type': 'baremetal',
'os_endpoint_type': 'public',
'insecure': 'true',
'ironic_url': CONF.ironic.api_endpoint}
icli = ironic_client.get_client(CONF.ironic.api_version, **kwargs)
return icli
def _no_ironic_proxy(cmd):
raise webob.exc.HTTPBadRequest(
explanation=_("Command Not supported. Please use Ironic "
"command %(cmd)s to perform this "
"action.") % {'cmd': cmd})
class BareMetalNodeController(wsgi.Controller):
"""The Bare-Metal Node API controller for the OpenStack API."""
def _node_dict(self, node_ref):
d = {}
for f in node_fields:
d[f] = node_ref.get(f)
for f in node_ext_fields:
d[f] = node_ref.get(f)
return d
@extensions.expected_errors((404, 501))
def index(self, req):
context = req.environ['nova.context']
authorize(context)
nodes = []
# proxy command to Ironic
_check_ironic_client_enabled()
icli = _get_ironic_client()
ironic_nodes = icli.node.list(detail=True)
for inode in ironic_nodes:
node = {'id': inode.uuid,
'interfaces': [],
'host': 'IRONIC MANAGED',
'task_state': inode.provision_state,
'cpus': inode.properties.get('cpus', 0),
'memory_mb': inode.properties.get('memory_mb', 0),
'disk_gb': inode.properties.get('local_gb', 0)}
nodes.append(node)
return {'nodes': nodes}
@extensions.expected_errors((404, 501))
def show(self, req, id):
context = req.environ['nova.context']
authorize(context)
# proxy command to Ironic
_check_ironic_client_enabled()
icli = _get_ironic_client()
try:
inode = icli.node.get(id)
except ironic_exc.NotFound:
msg = _("Node %s could not be found.") % id
raise webob.exc.HTTPNotFound(explanation=msg)
iports = icli.node.list_ports(id)
node = {'id': inode.uuid,
'interfaces': [],
'host': 'IRONIC MANAGED',
'task_state': inode.provision_state,
'cpus': inode.properties.get('cpus', 0),
'memory_mb': inode.properties.get('memory_mb', 0),
'disk_gb': inode.properties.get('local_gb', 0),
'instance_uuid': inode.instance_uuid}
for port in iports:
node['interfaces'].append({'address': port.address})
return {'node': node}
@extensions.expected_errors(400)
def create(self, req, body):
_no_ironic_proxy("port-create")
@extensions.expected_errors(400)
def delete(self, req, id):
_no_ironic_proxy("port-create")
@wsgi.action('add_interface')
@extensions.expected_errors(400)
def _add_interface(self, req, id, body):
_no_ironic_proxy("port-create")
@wsgi.action('remove_interface')
@extensions.expected_errors(400)
def _remove_interface(self, req, id, body):
_no_ironic_proxy("port-delete")
class BareMetalNodes(extensions.V21APIExtensionBase):
"""Admin-only bare-metal node administration."""
name = "BareMetalNodes"
alias = ALIAS
version = 1
def get_resources(self):
resource = [extensions.ResourceExtension(ALIAS,
BareMetalNodeController(),
member_actions={"action": "POST"})]
return resource
def get_controller_extensions(self):
"""It's an abstract function V21APIExtensionBase and the extension
will not be loaded without it.
"""
return []
| apache-2.0 |
gunan/tensorflow | tensorflow/lite/experimental/microfrontend/python/kernel_tests/audio_microfrontend_op_test.py | 23 | 5906 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for AudioMicrofrontend."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.lite.experimental.microfrontend.python.ops import audio_microfrontend_op as frontend_op
from tensorflow.python.framework import ops
SAMPLE_RATE = 1000
WINDOW_SIZE = 25
WINDOW_STEP = 10
NUM_CHANNELS = 2
UPPER_BAND_LIMIT = 450.0
LOWER_BAND_LIMIT = 8.0
SMOOTHING_BITS = 10
class AudioFeatureGenerationTest(tf.test.TestCase):
def setUp(self):
super(AudioFeatureGenerationTest, self).setUp()
ops.disable_eager_execution()
def testSimple(self):
with self.test_session():
audio = tf.constant(
[0, 32767, 0, -32768] * ((WINDOW_SIZE + 4 * WINDOW_STEP) // 4),
tf.int16)
filterbanks = frontend_op.audio_microfrontend(
audio,
sample_rate=SAMPLE_RATE,
window_size=WINDOW_SIZE,
window_step=WINDOW_STEP,
num_channels=NUM_CHANNELS,
upper_band_limit=UPPER_BAND_LIMIT,
lower_band_limit=LOWER_BAND_LIMIT,
smoothing_bits=SMOOTHING_BITS,
enable_pcan=True)
self.assertAllEqual(filterbanks.eval(),
[[479, 425], [436, 378], [410, 350], [391, 325]])
def testSimpleFloatScaled(self):
with self.test_session():
audio = tf.constant(
[0, 32767, 0, -32768] * ((WINDOW_SIZE + 4 * WINDOW_STEP) // 4),
tf.int16)
filterbanks = frontend_op.audio_microfrontend(
audio,
sample_rate=SAMPLE_RATE,
window_size=WINDOW_SIZE,
window_step=WINDOW_STEP,
num_channels=NUM_CHANNELS,
upper_band_limit=UPPER_BAND_LIMIT,
lower_band_limit=LOWER_BAND_LIMIT,
smoothing_bits=SMOOTHING_BITS,
enable_pcan=True,
out_scale=64,
out_type=tf.float32)
self.assertAllEqual(filterbanks.eval(),
[[7.484375, 6.640625], [6.8125, 5.90625],
[6.40625, 5.46875], [6.109375, 5.078125]])
def testStacking(self):
with self.test_session():
audio = tf.constant(
[0, 32767, 0, -32768] * ((WINDOW_SIZE + 4 * WINDOW_STEP) // 4),
tf.int16)
filterbanks = frontend_op.audio_microfrontend(
audio,
sample_rate=SAMPLE_RATE,
window_size=WINDOW_SIZE,
window_step=WINDOW_STEP,
num_channels=NUM_CHANNELS,
upper_band_limit=UPPER_BAND_LIMIT,
lower_band_limit=LOWER_BAND_LIMIT,
smoothing_bits=SMOOTHING_BITS,
enable_pcan=True,
right_context=1,
frame_stride=2)
self.assertAllEqual(filterbanks.eval(),
[[479, 425, 436, 378], [410, 350, 391, 325]])
def testStackingWithOverlap(self):
with self.test_session():
audio = tf.constant(
[0, 32767, 0, -32768] * ((WINDOW_SIZE + 4 * WINDOW_STEP) // 4),
tf.int16)
filterbanks = frontend_op.audio_microfrontend(
audio,
sample_rate=SAMPLE_RATE,
window_size=WINDOW_SIZE,
window_step=WINDOW_STEP,
num_channels=NUM_CHANNELS,
upper_band_limit=UPPER_BAND_LIMIT,
lower_band_limit=LOWER_BAND_LIMIT,
smoothing_bits=SMOOTHING_BITS,
enable_pcan=True,
left_context=1,
right_context=1)
self.assertAllEqual(
self.evaluate(filterbanks),
[[479, 425, 479, 425, 436, 378], [479, 425, 436, 378, 410, 350],
[436, 378, 410, 350, 391, 325], [410, 350, 391, 325, 391, 325]])
def testStackingDropFrame(self):
with self.test_session():
audio = tf.constant(
[0, 32767, 0, -32768] * ((WINDOW_SIZE + 4 * WINDOW_STEP) // 4),
tf.int16)
filterbanks = frontend_op.audio_microfrontend(
audio,
sample_rate=SAMPLE_RATE,
window_size=WINDOW_SIZE,
window_step=WINDOW_STEP,
num_channels=NUM_CHANNELS,
upper_band_limit=UPPER_BAND_LIMIT,
lower_band_limit=LOWER_BAND_LIMIT,
smoothing_bits=SMOOTHING_BITS,
enable_pcan=True,
left_context=1,
frame_stride=2)
self.assertAllEqual(filterbanks.eval(),
[[479, 425, 479, 425], [436, 378, 410, 350]])
def testZeroPadding(self):
with self.test_session():
audio = tf.constant(
[0, 32767, 0, -32768] * ((WINDOW_SIZE + 7 * WINDOW_STEP) // 4),
tf.int16)
filterbanks = frontend_op.audio_microfrontend(
audio,
sample_rate=SAMPLE_RATE,
window_size=WINDOW_SIZE,
window_step=WINDOW_STEP,
num_channels=NUM_CHANNELS,
upper_band_limit=UPPER_BAND_LIMIT,
lower_band_limit=LOWER_BAND_LIMIT,
smoothing_bits=SMOOTHING_BITS,
enable_pcan=True,
left_context=2,
frame_stride=3,
zero_padding=True)
self.assertAllEqual(
self.evaluate(filterbanks),
[[0, 0, 0, 0, 479, 425], [436, 378, 410, 350, 391, 325],
[374, 308, 362, 292, 352, 275]])
if __name__ == '__main__':
tf.test.main()
| apache-2.0 |
ezequielpereira/Time-Line | libs/wx/lib/popupctl.py | 6 | 7065 | #----------------------------------------------------------------------
# Name: popup
# Purpose: Generic popup control
#
# Author: Gerrit van Dyk
#
# Created: 2002/11/20
# Version: 0.1
# RCS-ID: $Id: popupctl.py 55187 2008-08-23 02:20:11Z RD $
# License: wxWindows license
#----------------------------------------------------------------------
# 11/24/2007 - Cody Precord
#
# o Use RendererNative to draw button
#
# 12/09/2003 - Jeff Grimmett (grimmtooth@softhome.net)
#
# o 2.5 compatability update.
#
# 12/20/2003 - Jeff Grimmett (grimmtooth@softhome.net)
#
# o wxPopupDialog -> PopupDialog
# o wxPopupControl -> PopupControl
#
import wx
from wx.lib.buttons import GenButtonEvent
class PopButton(wx.PyControl):
def __init__(self,*_args,**_kwargs):
wx.PyControl.__init__(self, *_args, **_kwargs)
self.up = True
self.didDown = False
self.Bind(wx.EVT_LEFT_DOWN, self.OnLeftDown)
self.Bind(wx.EVT_LEFT_UP, self.OnLeftUp)
self.Bind(wx.EVT_MOTION, self.OnMotion)
self.Bind(wx.EVT_PAINT, self.OnPaint)
def Notify(self):
evt = GenButtonEvent(wx.wxEVT_COMMAND_BUTTON_CLICKED, self.GetId())
evt.SetIsDown(not self.up)
evt.SetButtonObj(self)
evt.SetEventObject(self)
self.GetEventHandler().ProcessEvent(evt)
def OnEraseBackground(self, event):
pass
def OnLeftDown(self, event):
if not self.IsEnabled():
return
self.didDown = True
self.up = False
self.CaptureMouse()
self.GetParent().textCtrl.SetFocus()
self.Refresh()
event.Skip()
def OnLeftUp(self, event):
if not self.IsEnabled():
return
if self.didDown:
self.ReleaseMouse()
if not self.up:
self.Notify()
self.up = True
self.Refresh()
self.didDown = False
event.Skip()
def OnMotion(self, event):
if not self.IsEnabled():
return
if event.LeftIsDown():
if self.didDown:
x,y = event.GetPosition()
w,h = self.GetClientSize()
if self.up and x<w and x>=0 and y<h and y>=0:
self.up = False
self.Refresh()
return
if not self.up and (x<0 or y<0 or x>=w or y>=h):
self.up = True
self.Refresh()
return
event.Skip()
def OnPaint(self, event):
dc = wx.BufferedPaintDC(self)
if self.up:
flag = wx.CONTROL_CURRENT
else:
flag = wx.CONTROL_PRESSED
wx.RendererNative.Get().DrawComboBoxDropButton(self, dc, self.GetClientRect(), flag)
#---------------------------------------------------------------------------
# Tried to use wxPopupWindow but the control misbehaves on MSW
class PopupDialog(wx.Dialog):
def __init__(self,parent,content = None):
wx.Dialog.__init__(self,parent,-1,'', style = wx.BORDER_SIMPLE|wx.STAY_ON_TOP)
self.ctrl = parent
self.win = wx.Window(self,-1,pos = (0,0),style = 0)
if content:
self.SetContent(content)
def SetContent(self,content):
self.content = content
self.content.Reparent(self.win)
self.content.Show(True)
self.win.SetClientSize(self.content.GetSize())
self.SetSize(self.win.GetSize())
def Display(self):
pos = self.ctrl.ClientToScreen( (0,0) )
dSize = wx.GetDisplaySize()
selfSize = self.GetSize()
tcSize = self.ctrl.GetSize()
pos.x -= (selfSize.width - tcSize.width) / 2
if pos.x + selfSize.width > dSize.width:
pos.x = dSize.width - selfSize.width
if pos.x < 0:
pos.x = 0
pos.y += tcSize.height
if pos.y + selfSize.height > dSize.height:
pos.y = dSize.height - selfSize.height
if pos.y < 0:
pos.y = 0
self.Move(pos)
self.ctrl.FormatContent()
self.ShowModal()
#---------------------------------------------------------------------------
class PopupControl(wx.PyControl):
def __init__(self,*_args,**_kwargs):
if _kwargs.has_key('value'):
del _kwargs['value']
style = _kwargs.get('style', 0)
if (style & wx.BORDER_MASK) == 0:
style |= wx.BORDER_NONE
_kwargs['style'] = style
wx.PyControl.__init__(self, *_args, **_kwargs)
self.textCtrl = wx.TextCtrl(self, wx.ID_ANY, '', pos = (0,0))
self.bCtrl = PopButton(self, wx.ID_ANY, style=wx.BORDER_NONE)
self.pop = None
self.content = None
self.Bind(wx.EVT_SIZE, self.OnSize)
self.bCtrl.Bind(wx.EVT_BUTTON, self.OnButton, self.bCtrl)
self.Bind(wx.EVT_SET_FOCUS, self.OnFocus)
self.SetInitialSize(_kwargs.get('size', wx.DefaultSize))
self.SendSizeEvent()
def OnFocus(self,evt):
# embedded control should get focus on TAB keypress
self.textCtrl.SetFocus()
evt.Skip()
def OnSize(self, evt):
# layout the child widgets
w,h = self.GetClientSize()
self.textCtrl.SetDimensions(0, 0, w - self.marginWidth - self.buttonWidth, h)
self.bCtrl.SetDimensions(w - self.buttonWidth, 0, self.buttonWidth, h)
def DoGetBestSize(self):
# calculate the best size of the combined control based on the
# needs of the child widgets.
tbs = self.textCtrl.GetBestSize()
return wx.Size(tbs.width + self.marginWidth + self.buttonWidth,
tbs.height)
def OnButton(self, evt):
if not self.pop:
if self.content:
self.pop = PopupDialog(self,self.content)
del self.content
else:
print 'No Content to pop'
if self.pop:
self.pop.Display()
def Enable(self, flag):
wx.PyControl.Enable(self,flag)
self.textCtrl.Enable(flag)
self.bCtrl.Enable(flag)
def SetPopupContent(self, content):
if not self.pop:
self.content = content
self.content.Show(False)
else:
self.pop.SetContent(content)
def FormatContent(self):
pass
def PopDown(self):
if self.pop:
self.pop.EndModal(1)
def SetValue(self, value):
self.textCtrl.SetValue(value)
def GetValue(self):
return self.textCtrl.GetValue()
def SetFont(self, font):
self.textCtrl.SetFont(font)
def GetFont(self):
return self.textCtrl.GetFont()
def _get_marginWidth(self):
if 'wxMac' in wx.PlatformInfo:
return 6
else:
return 3
marginWidth = property(_get_marginWidth)
def _get_buttonWidth(self):
return 20
buttonWidth = property(_get_buttonWidth)
# an alias
PopupCtrl = PopupControl
| gpl-3.0 |
Tatsh-ansible/ansible | lib/ansible/modules/cloud/docker/docker_volume.py | 9 | 7731 | #!/usr/bin/python
# coding: utf-8
#
# Copyright 2017 Red Hat | Ansible, Alex Grönholm <alex.gronholm@nextday.fi>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = u'''
module: docker_volume
version_added: "2.4"
short_description: Manage Docker volumes
description:
- Create/remove Docker volumes.
- Performs largely the same function as the "docker volume" CLI subcommand.
options:
name:
description:
- Name of the volume to operate on.
required: true
aliases:
- volume_name
driver:
description:
- Specify the type of volume. Docker provides the C(local) driver, but 3rd party drivers can also be used.
default: local
driver_options:
description:
- "Dictionary of volume settings. Consult docker docs for valid options and values:
U(https://docs.docker.com/engine/reference/commandline/volume_create/#driver-specific-options)"
labels:
description:
- List of labels to set for the volume
force:
description:
- With state C(present) causes the volume to be deleted and recreated if the volume already
exist and the driver, driver options or labels differ. This will cause any data in the existing
volume to be lost.
type: bool
default: 'no'
state:
description:
- C(absent) deletes the volume.
- C(present) creates the volume, if it does not already exist.
default: present
choices:
- absent
- present
extends_documentation_fragment:
- docker
author:
- Alex Grönholm (@agronholm)
requirements:
- "python >= 2.6"
- "docker-py >= 1.10.0"
- "The docker server >= 1.9.0"
'''
EXAMPLES = '''
- name: Create a volume
docker_volume:
name: volume_one
- name: Remove a volume
docker_volume:
name: volume_one
state: absent
- name: Create a volume with options
docker_volume:
name: volume_two
driver_options:
type: btrfs
device: /dev/sda2
'''
RETURN = '''
facts:
description: Volume inspection results for the affected volume.
returned: success
type: dict
sample: {}
'''
try:
from docker.errors import APIError
except ImportError:
# missing docker-py handled in ansible.module_utils.docker
pass
from ansible.module_utils.docker_common import DockerBaseClass, AnsibleDockerClient
from ansible.module_utils.six import iteritems, text_type
class TaskParameters(DockerBaseClass):
def __init__(self, client):
super(TaskParameters, self).__init__()
self.client = client
self.volume_name = None
self.driver = None
self.driver_options = None
self.labels = None
self.force = None
self.debug = None
for key, value in iteritems(client.module.params):
setattr(self, key, value)
class DockerVolumeManager(object):
def __init__(self, client):
self.client = client
self.parameters = TaskParameters(client)
self.check_mode = self.client.check_mode
self.results = {
u'changed': False,
u'actions': []
}
self.diff = self.client.module._diff
self.existing_volume = self.get_existing_volume()
state = self.parameters.state
if state == 'present':
self.present()
elif state == 'absent':
self.absent()
def get_existing_volume(self):
try:
volumes = self.client.volumes()
except APIError as e:
self.client.fail(text_type(e))
for volume in volumes[u'Volumes']:
if volume['Name'] == self.parameters.volume_name:
return volume
return None
def has_different_config(self):
"""
Return the list of differences between the current parameters and the existing volume.
:return: list of options that differ
"""
differences = []
if self.parameters.driver and self.parameters.driver != self.existing_volume['Driver']:
differences.append('driver')
if self.parameters.driver_options:
if not self.existing_volume.get('Options'):
differences.append('driver_options')
else:
for key, value in iteritems(self.parameters.driver_options):
if (not self.existing_volume['Options'].get(key) or
value != self.existing_volume['Options'][key]):
differences.append('driver_options.%s' % key)
if self.parameters.labels:
existing_labels = self.existing_volume.get('Labels', {})
all_labels = set(self.parameters.labels) | set(existing_labels)
for label in all_labels:
if existing_labels.get(label) != self.parameters.labels.get(label):
differences.append('labels.%s' % label)
return differences
def create_volume(self):
if not self.existing_volume:
if not self.check_mode:
try:
resp = self.client.create_volume(self.parameters.volume_name,
driver=self.parameters.driver,
driver_opts=self.parameters.driver_options,
labels=self.parameters.labels)
self.existing_volume = self.client.inspect_volume(resp['Name'])
except APIError as e:
self.client.fail(text_type(e))
self.results['actions'].append("Created volume %s with driver %s" % (self.parameters.volume_name, self.parameters.driver))
self.results['changed'] = True
def remove_volume(self):
if self.existing_volume:
if not self.check_mode:
try:
self.client.remove_volume(self.parameters.volume_name)
except APIError as e:
self.client.fail(text_type(e))
self.results['actions'].append("Removed volume %s" % self.parameters.volume_name)
self.results['changed'] = True
def present(self):
differences = []
if self.existing_volume:
differences = self.has_different_config()
if differences and self.parameters.force:
self.remove_volume()
self.existing_volume = None
self.create_volume()
if self.diff or self.check_mode or self.parameters.debug:
self.results['diff'] = differences
if not self.check_mode and not self.parameters.debug:
self.results.pop('actions')
self.results['ansible_facts'] = {u'docker_volume': self.get_existing_volume()}
def absent(self):
self.remove_volume()
def main():
argument_spec = dict(
volume_name=dict(type='str', required=True, aliases=['name']),
state=dict(type='str', default='present', choices=['present', 'absent']),
driver=dict(type='str', default='local'),
driver_options=dict(type='dict', default={}),
labels=dict(type='list'),
force=dict(type='bool', default=False),
debug=dict(type='bool', default=False)
)
client = AnsibleDockerClient(
argument_spec=argument_spec,
supports_check_mode=True
)
cm = DockerVolumeManager(client)
client.module.exit_json(**cm.results)
if __name__ == '__main__':
main()
| gpl-3.0 |
ktan2020/legacy-automation | win/Lib/test/test_contextlib.py | 11 | 9429 | """Unit tests for contextlib.py, and other context managers."""
import sys
import tempfile
import unittest
from contextlib import * # Tests __all__
from test import test_support
try:
import threading
except ImportError:
threading = None
class ContextManagerTestCase(unittest.TestCase):
def test_contextmanager_plain(self):
state = []
@contextmanager
def woohoo():
state.append(1)
yield 42
state.append(999)
with woohoo() as x:
self.assertEqual(state, [1])
self.assertEqual(x, 42)
state.append(x)
self.assertEqual(state, [1, 42, 999])
def test_contextmanager_finally(self):
state = []
@contextmanager
def woohoo():
state.append(1)
try:
yield 42
finally:
state.append(999)
with self.assertRaises(ZeroDivisionError):
with woohoo() as x:
self.assertEqual(state, [1])
self.assertEqual(x, 42)
state.append(x)
raise ZeroDivisionError()
self.assertEqual(state, [1, 42, 999])
def test_contextmanager_no_reraise(self):
@contextmanager
def whee():
yield
ctx = whee()
ctx.__enter__()
# Calling __exit__ should not result in an exception
self.assertFalse(ctx.__exit__(TypeError, TypeError("foo"), None))
def test_contextmanager_trap_yield_after_throw(self):
@contextmanager
def whoo():
try:
yield
except:
yield
ctx = whoo()
ctx.__enter__()
self.assertRaises(
RuntimeError, ctx.__exit__, TypeError, TypeError("foo"), None
)
def test_contextmanager_except(self):
state = []
@contextmanager
def woohoo():
state.append(1)
try:
yield 42
except ZeroDivisionError, e:
state.append(e.args[0])
self.assertEqual(state, [1, 42, 999])
with woohoo() as x:
self.assertEqual(state, [1])
self.assertEqual(x, 42)
state.append(x)
raise ZeroDivisionError(999)
self.assertEqual(state, [1, 42, 999])
def _create_contextmanager_attribs(self):
def attribs(**kw):
def decorate(func):
for k,v in kw.items():
setattr(func,k,v)
return func
return decorate
@contextmanager
@attribs(foo='bar')
def baz(spam):
"""Whee!"""
return baz
def test_contextmanager_attribs(self):
baz = self._create_contextmanager_attribs()
self.assertEqual(baz.__name__,'baz')
self.assertEqual(baz.foo, 'bar')
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_contextmanager_doc_attrib(self):
baz = self._create_contextmanager_attribs()
self.assertEqual(baz.__doc__, "Whee!")
class NestedTestCase(unittest.TestCase):
# XXX This needs more work
def test_nested(self):
@contextmanager
def a():
yield 1
@contextmanager
def b():
yield 2
@contextmanager
def c():
yield 3
with nested(a(), b(), c()) as (x, y, z):
self.assertEqual(x, 1)
self.assertEqual(y, 2)
self.assertEqual(z, 3)
def test_nested_cleanup(self):
state = []
@contextmanager
def a():
state.append(1)
try:
yield 2
finally:
state.append(3)
@contextmanager
def b():
state.append(4)
try:
yield 5
finally:
state.append(6)
with self.assertRaises(ZeroDivisionError):
with nested(a(), b()) as (x, y):
state.append(x)
state.append(y)
1 // 0
self.assertEqual(state, [1, 4, 2, 5, 6, 3])
def test_nested_right_exception(self):
@contextmanager
def a():
yield 1
class b(object):
def __enter__(self):
return 2
def __exit__(self, *exc_info):
try:
raise Exception()
except:
pass
with self.assertRaises(ZeroDivisionError):
with nested(a(), b()) as (x, y):
1 // 0
self.assertEqual((x, y), (1, 2))
def test_nested_b_swallows(self):
@contextmanager
def a():
yield
@contextmanager
def b():
try:
yield
except:
# Swallow the exception
pass
try:
with nested(a(), b()):
1 // 0
except ZeroDivisionError:
self.fail("Didn't swallow ZeroDivisionError")
def test_nested_break(self):
@contextmanager
def a():
yield
state = 0
while True:
state += 1
with nested(a(), a()):
break
state += 10
self.assertEqual(state, 1)
def test_nested_continue(self):
@contextmanager
def a():
yield
state = 0
while state < 3:
state += 1
with nested(a(), a()):
continue
state += 10
self.assertEqual(state, 3)
def test_nested_return(self):
@contextmanager
def a():
try:
yield
except:
pass
def foo():
with nested(a(), a()):
return 1
return 10
self.assertEqual(foo(), 1)
class ClosingTestCase(unittest.TestCase):
# XXX This needs more work
def test_closing(self):
state = []
class C:
def close(self):
state.append(1)
x = C()
self.assertEqual(state, [])
with closing(x) as y:
self.assertEqual(x, y)
self.assertEqual(state, [1])
def test_closing_error(self):
state = []
class C:
def close(self):
state.append(1)
x = C()
self.assertEqual(state, [])
with self.assertRaises(ZeroDivisionError):
with closing(x) as y:
self.assertEqual(x, y)
1 // 0
self.assertEqual(state, [1])
class FileContextTestCase(unittest.TestCase):
def testWithOpen(self):
tfn = tempfile.mktemp()
try:
f = None
with open(tfn, "w") as f:
self.assertFalse(f.closed)
f.write("Booh\n")
self.assertTrue(f.closed)
f = None
with self.assertRaises(ZeroDivisionError):
with open(tfn, "r") as f:
self.assertFalse(f.closed)
self.assertEqual(f.read(), "Booh\n")
1 // 0
self.assertTrue(f.closed)
finally:
test_support.unlink(tfn)
@unittest.skipUnless(threading, 'Threading required for this test.')
class LockContextTestCase(unittest.TestCase):
def boilerPlate(self, lock, locked):
self.assertFalse(locked())
with lock:
self.assertTrue(locked())
self.assertFalse(locked())
with self.assertRaises(ZeroDivisionError):
with lock:
self.assertTrue(locked())
1 // 0
self.assertFalse(locked())
def testWithLock(self):
lock = threading.Lock()
self.boilerPlate(lock, lock.locked)
def testWithRLock(self):
lock = threading.RLock()
self.boilerPlate(lock, lock._is_owned)
def testWithCondition(self):
lock = threading.Condition()
def locked():
return lock._is_owned()
self.boilerPlate(lock, locked)
def testWithSemaphore(self):
lock = threading.Semaphore()
def locked():
if lock.acquire(False):
lock.release()
return False
else:
return True
self.boilerPlate(lock, locked)
def testWithBoundedSemaphore(self):
lock = threading.BoundedSemaphore()
def locked():
if lock.acquire(False):
lock.release()
return False
else:
return True
self.boilerPlate(lock, locked)
# This is needed to make the test actually run under regrtest.py!
def test_main():
with test_support.check_warnings(("With-statements now directly support "
"multiple context managers",
DeprecationWarning)):
test_support.run_unittest(__name__)
if __name__ == "__main__":
test_main()
| mit |
haoyuchen1992/CourseBuilder | modules/dashboard/messages.py | 11 | 7248 | # Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Messages used in the dashboard."""
__author__ = 'John Orr (jorr@google.com)'
from common import safe_dom
def assemble_sanitized_message(text, link):
node_list = safe_dom.NodeList()
if text:
node_list.append(safe_dom.Text(text))
node_list.append(safe_dom.Entity(' '))
if link:
node_list.append(safe_dom.Element(
'a', href=link, target='_blank').add_text('Learn more...'))
return node_list
ABOUT_THE_COURSE_DESCRIPTION = assemble_sanitized_message("""
This information is configured by an administrator from the Admin pages.
""", None)
ASSESSMENT_CONTENT_DESCRIPTION = assemble_sanitized_message("""
Assessment questions and answers (JavaScript format).
""", 'https://code.google.com/p/course-builder/wiki/CreateAssessments')
ASSESSMENT_DETAILS_DESCRIPTION = assemble_sanitized_message("""
Properties and restrictions of your assessment.
""", 'https://code.google.com/p/course-builder/wiki/PeerReview')
ASSESSMENT_EDITOR_DESCRIPTION = assemble_sanitized_message(
None, 'https://code.google.com/p/course-builder/wiki/CreateAssessments')
ASSETS_DESCRIPTION = assemble_sanitized_message("""
These are all the assets for your course. You can upload new images and
documents here, after which you can use them in your lessons and activities.
You may create, edit, and delete activities and assessments from the Outline
page. All other assets must be edited by an administrator.
""", None)
ASSIGNMENTS_MENU_DESCRIPTION = assemble_sanitized_message("""
Select a peer-reviewed assignment and enter a student's email address to view
their assignment submission and any associated reviews.
""", None)
CONTENTS_OF_THE_COURSE_DESCRIPTION = assemble_sanitized_message("""
The course.yaml file contains many course settings. Edit it using the buttons
at the right.
""", 'https://code.google.com/p/course-builder/wiki/CourseSettings')
COURSE_OUTLINE_DESCRIPTION = assemble_sanitized_message(
'Build, organize and preview your course here.',
'https://code.google.com/p/course-builder/wiki/Dashboard#Outline')
COURSE_OUTLINE_EDITOR_DESCRIPTION = assemble_sanitized_message("""
Click up/down arrows to re-order units, or lessons within units. To move a
lesson between units, edit that lesson from the outline page and change its
parent unit.
""", None)
COURSE_TEMPLATE_DESCRIPTION = assemble_sanitized_message("""
The course_template.yaml file contains the common template settings
for all courses. You can override the template settings for this
course by editing your course.yaml file.
""", None)
DATA_FILES_DESCRIPTION = assemble_sanitized_message("""
The lesson.csv file contains the contents of your lesson. The unit.csv file
contains the course related content shown on the homepage. These files are
located in your Course Builder installation. Edit them directly with an editor
like Notepad++. Be careful, some editors will add extra characters, which may
prevent the uploading of these files.
""", 'https://code.google.com/p/course-builder/wiki/Dashboard#Outline')
EDIT_SETTINGS_DESCRIPTION = assemble_sanitized_message("""
The course.yaml file contains many course settings.
""", 'https://code.google.com/p/course-builder/wiki/CourseSettings')
IMPORT_COURSE_DESCRIPTION = assemble_sanitized_message("""
Import the contents of another course into this course. Both courses must be on
the same Google App Engine instance.
""", None)
INCORRECT_ANSWER_FEEDBACK = """
Shown when the student response does not match any of the possible answers.
"""
INPUT_FIELD_HEIGHT_DESCRIPTION = """
Height of the input field, measured in rows.
"""
INPUT_FIELD_WIDTH_DESCRIPTION = """
Width of the input field, measured in columns.
"""
LESSON_ACTIVITY_DESCRIPTION = assemble_sanitized_message("""
Create an activity by entering the correct syntax above.
""", ('https://code.google.com/p/course-builder/wiki/CreateActivities'
'#Writing_activities'))
LESSON_ACTIVITY_LISTED_DESCRIPTION = """
Whether the activity should be viewable as a stand-alone item in the unit index.
"""
LESSON_ACTIVITY_TITLE_DESCRIPTION = """
This appears above your activity.
"""
LESSON_OBJECTIVES_DESCRIPTION = """
The lesson body is displayed to students above the video in the default
template.
"""
LESSON_SCORED_DESCRIPTION = """
Whether questions in this lesson will be scored (summative) or only
provide textual feedback (formative).
"""
LESSON_VIDEO_ID_DESCRIPTION = """
Provide a YouTube video ID to embed a video.
"""
LESSON_NOTES_DESCRIPTION = """
Provide a URL that points to the notes for this lesson (if applicable). These
notes can be accessed by clicking on the 'Text Version' button on the lesson
page.
"""
LINK_EDITOR_DESCRIPTION = assemble_sanitized_message("""
Links will appear in your outline and will take students directly to the URL.
""", None)
LINK_EDITOR_URL_DESCRIPTION = """
Links to external sites must start with 'http' or https'.
"""
PAGES_DESCRIPTION = assemble_sanitized_message(
None, 'https://code.google.com/p/course-builder/wiki/Dashboard#Outline')
QUESTION_DESCRIPTION = 'Shown when selecting questions for quizzes, etc.'
REVIEWER_FEEDBACK_FORM_DESCRIPTION = assemble_sanitized_message("""
Review form questions and answers (JavaScript format).
""", 'https://code.google.com/p/course-builder/wiki/PeerReview')
SETTINGS_DESCRIPTION = assemble_sanitized_message(
None, 'https://code.google.com/p/course-builder/wiki/Dashboard#Settings')
UNIT_EDITOR_DESCRIPTION = assemble_sanitized_message("""
Units contain lessons and acitivities.
""", 'https://code.google.com/p/course-builder/wiki/Dashboard#Outline')
UPLOAD_ASSET_DESCRIPTION = assemble_sanitized_message("""
Choose a file to upload to this Google App Engine instance. Learn more about
file storage and hosting.
""", 'https://code.google.com/p/course-builder/wiki/Dashboard#Assets')
DUE_DATE_FORMAT_DESCRIPTION = assemble_sanitized_message("""
Should be formatted as YYYY-MM-DD hh:mm (e.g. 1997-07-16 19:20) and be specified
in the UTC timezone.""", None)
REVIEW_DUE_DATE_FORMAT_DESCRIPTION = assemble_sanitized_message("""
Should be formatted as YYYY-MM-DD hh:mm (e.g. 1997-07-16 19:20) and be specified
in the UTC timezone.
""", 'https://code.google.com/p/course-builder/wiki/PeerReview')
REVIEW_TIMEOUT_IN_MINUTES = assemble_sanitized_message("""
This value should be specified in minutes.
""", 'https://code.google.com/p/course-builder/wiki/PeerReview')
REVIEW_MIN_COUNT_DESCRIPTION = assemble_sanitized_message(
None, 'https://code.google.com/p/course-builder/wiki/PeerReview')
AUTO_GRADER_NAME = 'Automatic Grading'
HUMAN_GRADER_NAME = 'Peer Review'
PEER_MATCHER_NAME = 'Peer'
| apache-2.0 |
ademuk/django-oscar | src/oscar/apps/dashboard/partners/forms.py | 12 | 4445 | from django import forms
from django.contrib.auth.models import Permission
from django.utils.translation import ugettext_lazy as _, pgettext_lazy
from oscar.core.loading import get_model
from oscar.core.compat import existing_user_fields, get_user_model
from oscar.apps.customer.forms import EmailUserCreationForm
from oscar.core.validators import password_validators
User = get_user_model()
Partner = get_model('partner', 'Partner')
PartnerAddress = get_model('partner', 'PartnerAddress')
class PartnerSearchForm(forms.Form):
name = forms.CharField(
required=False, label=pgettext_lazy(u"Partner's name", u"Name"))
class PartnerCreateForm(forms.ModelForm):
class Meta:
model = Partner
fields = ('name',)
ROLE_CHOICES = (
('staff', _('Full dashboard access')),
('limited', _('Limited dashboard access')),
)
class NewUserForm(EmailUserCreationForm):
role = forms.ChoiceField(choices=ROLE_CHOICES, widget=forms.RadioSelect,
label=_('User role'), initial='limited')
def __init__(self, partner, *args, **kwargs):
self.partner = partner
super(NewUserForm, self).__init__(host=None, *args, **kwargs)
def save(self):
role = self.cleaned_data.get('role', 'limited')
user = super(NewUserForm, self).save(commit=False)
user.is_staff = role == 'staff'
user.save()
self.partner.users.add(user)
if role == 'limited':
dashboard_access_perm = Permission.objects.get(
codename='dashboard_access', content_type__app_label='partner')
user.user_permissions.add(dashboard_access_perm)
return user
class Meta:
model = User
fields = existing_user_fields(
['first_name', 'last_name', 'email']) + ['password1', 'password2']
class ExistingUserForm(forms.ModelForm):
"""
Slightly different form that makes
* makes saving password optional
* doesn't regenerate username
* doesn't allow changing email till #668 is resolved
"""
role = forms.ChoiceField(choices=ROLE_CHOICES, widget=forms.RadioSelect,
label=_('User role'))
password1 = forms.CharField(
label=_('Password'),
widget=forms.PasswordInput,
required=False,
validators=password_validators)
password2 = forms.CharField(
required=False,
label=_('Confirm Password'),
widget=forms.PasswordInput)
def clean_password2(self):
password1 = self.cleaned_data.get('password1', '')
password2 = self.cleaned_data.get('password2', '')
if password1 != password2:
raise forms.ValidationError(
_("The two password fields didn't match."))
return password2
def __init__(self, *args, **kwargs):
user = kwargs['instance']
role = 'staff' if user.is_staff else 'limited'
kwargs.get('initial', {}).setdefault('role', role)
super(ExistingUserForm, self).__init__(*args, **kwargs)
def save(self):
role = self.cleaned_data.get('role', 'none')
user = super(ExistingUserForm, self).save(commit=False)
user.is_staff = role == 'staff'
if self.cleaned_data['password1']:
user.set_password(self.cleaned_data['password1'])
user.save()
dashboard_perm = Permission.objects.get(
codename='dashboard_access', content_type__app_label='partner')
user_has_perm = user.user_permissions.filter(
pk=dashboard_perm.pk).exists()
if role == 'limited' and not user_has_perm:
user.user_permissions.add(dashboard_perm)
elif role == 'staff' and user_has_perm:
user.user_permissions.remove(dashboard_perm)
return user
class Meta:
model = User
fields = existing_user_fields(
['first_name', 'last_name']) + ['password1', 'password2']
class UserEmailForm(forms.Form):
# We use a CharField so that a partial email address can be entered
email = forms.CharField(
label=_("Email address"), max_length=100)
class PartnerAddressForm(forms.ModelForm):
name = forms.CharField(
required=False, label=pgettext_lazy(u"Partner's name", u"Name"))
class Meta:
fields = ('name', 'line1', 'line2', 'line3', 'line4',
'state', 'postcode', 'country')
model = PartnerAddress
| bsd-3-clause |
yutiansut/QUANTAXIS | QUANTAXIS/QAAnalysis/QAAnalysis_signal.py | 2 | 22236 | # coding:utf-8
# Author: 阿财(Rgveda@github)(11652964@qq.com)
# Created date: 2020-02-27
#
# The MIT License (MIT)
#
# Copyright (c) 2016-2021 yutiansut/QUANTAXIS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import numpy as np
import pandas as pd
import numba as nb
import scipy.signal as signal
from scipy.signal import lfilter, lfilter_zi, filtfilt, butter, savgol_filter
from QUANTAXIS.QAIndicator.base import *
from QUANTAXIS.QAData.base_datastruct import *
try:
import peakutils
except:
#print('PLEASE run "pip install peakutils" to call these modules')
pass
try:
from QUANTAXIS.QAIndicator.talib_numpy import *
import QUANTAXIS as QA
from QUANTAXIS.QAIndicator.base import *
from QUANTAXIS.QAUtil.QADate_Adv import (
QA_util_timestamp_to_str,
QA_util_datetime_to_Unix_timestamp,
QA_util_print_timestamp
)
except:
print('PLEASE run "pip install QUANTAXIS" before call QUANTAXIS.QAAnalysis.QAAnalysis_signal modules')
pass
"""
时序信号处理,公共函数
"""
def time_series_momemtum(price, n=24, rf=0.02):
"""
时间序列动量指标
Time Series Momentum strategy
"""
return (price / price.shift(n) - 1) - rf
def find_peak_vextors_eagerly(price, offest=0):
"""
(饥渴的)在 MACD 上坡的时候查找更多的极值点
"""
xn = price
# pass 0
window_size, poly_order = 5, 1
yy_sg = savgol_filter(xn, window_size, poly_order)
# pass 1
x_tp_min, x_tp_max = signal.argrelextrema(yy_sg, np.less)[0], signal.argrelextrema(yy_sg, np.greater)[0]
n = int(len(price) / (len(x_tp_min) + len(x_tp_max))) * 2
# peakutils 似乎一根筋只能查最大极值,通过曲线反相的方式查找极小点
mirrors = (yy_sg * -1) + np.mean(price) * 2
# pass 2 使用 peakutils 查找
x_tp_max = peakutils.indexes(yy_sg, thres=0.01 / max(price), min_dist=n)
x_tp_min = peakutils.indexes(mirrors, thres=0.01 / max(price), min_dist=n)
return x_tp_min + offest, x_tp_max + offest
def find_peak_vextors(price, return_ref=False, offest=0):
"""
采用巴特沃斯信号滤波器,自适应寻找最佳极值点,决定平均周期分段数量。
使用 scipy.Gaussian 机器学习统计算法进行第二次分析
If you meet a Warning message, To slove this need upgrade scipy=>1.2.
but QUANTAXIS incompatible scipy=>1.2
Parameters
----------
price : (N,) array_like
传入需要查找极值点的价格-时间序列。
The numerator coefficient vector of the filter.
return_ref : bool or None, optional
返回作为参照的平滑曲线,平滑曲线的目的是减少锯齿抖动,减少计算的极值点。
Return the smoothed line for reference.
offest : int or None, optional
传递参数时可能会被 .dropna() 或者 price[t:0] 等切片手段移除 nparray 头部
的 np.nan 元素,因为此函数返回的是向量节点的数组索引,为了跟原始参数对应,调用者
可以指定一个补偿偏移量,在返回的最大最小值中每个索引都会追加这个偏移量。
The number of elements index offest, for jump np.nan in price's head.
Returns
-------
x_tp_min, x_tp_max : ndarray
包含最大值和最少值索引的数组
The min/max peakpoint's index in array.
"""
xn = price
# Create an order 3 lowpass butterworth filter.
b, a = butter(3, 0.05)
# Apply the filter to xn. Use lfilter_zi to choose the initial condition
# of the filter.
zi = lfilter_zi(b, a)
z, _ = lfilter(b, a, xn, zi=zi * xn[0])
# Apply the filter again, to have a result filtered at an order
# the same as filtfilt.
z2, _ = lfilter(b, a, z, zi=zi * z[0])
# Use filtfilt to apply the filter. If you meet a Warning need upgrade to
# scipy=>1.2 but QUANTAXIS incompatible scipy=>1.2
y = filtfilt(b, a, xn)
# pass 1
x_tp_min, x_tp_max = signal.argrelextrema(y, np.less)[0], signal.argrelextrema(y, np.greater)[0]
n = int(len(price) / (len(x_tp_min) + len(x_tp_max))) * 2
# peakutils 似乎一根筋只能查最大极值,通过曲线反相的方式查找极小点
mirrors = (price * -1) + np.mean(price) * 2
# pass 2 使用 peakutils 查找
x_tp_max = peakutils.indexes(price, thres=0.01 / max(price), min_dist=n)
x_tp_min = peakutils.indexes(mirrors, thres=0.01 / max(price), min_dist=n)
if (return_ref):
return x_tp_min + offest, x_tp_max + offest, y
else:
return x_tp_min + offest, x_tp_max + offest
def Timeline_Integral_with_lambda(Tm,):
"""
explanation:
计算时域金叉/死叉信号的累积卷积和(死叉(1-->0)清零)
params:
* Tm ->:
meaning: 数据
type: null
optional: [null]
return:
np.array
demonstrate:
Not described
output:
Not described
"""
T = [Tm[0]]
#Ti = list(map(lambda x: reduce(lambda z,y: y * (z + y), Tm[0:x]), Tm))
#Ti = list(map(lambda x,y: x * (y + x), Ti[1:], Tm))
# print(Ti)
#list(map(lambda x,y: x * (y + x), Tm[1:], T))
return np.array(T)
@nb.jit(nopython=True)
def Timeline_Integral(Tm:np.ndarray,):
"""
explanation:
计算时域金叉/死叉信号的累积卷积和(死叉(1-->0)清零),经测试for实现最快,比reduce快
params:
* Tm ->:
meaning:
type: null
optional: [null]
return:
np.array
demonstrate:
Not described
output:
Not described
"""
T = np.zeros(len(Tm)).astype(np.int32)
for i, Tmx in enumerate(Tm):
T[i] = Tmx * (T[i - 1] + Tmx)
return T
def Timeline_Integral_with_reduce(Tm,):
"""
explanation:
计算时域金叉/死叉信号的累积卷积和(死叉(1-->0)清零),经测试for实现最快,比reduce快
params:
* Tm ->:
meaning: 数据
type: null
optional: [null]
return:
np.array
demonstrate:
Not described
output:
Not described
"""
T = []
for i in range(1,len(Tm)):
T.append(reduce(lambda x,y: int(y * (y + x)), Tm[0:i]))
return np.array(T)
@nb.jit(nopython=True)
def Timeline_Integral_with_cross_before(Tm:np.ndarray,):
"""
explanation:
计算时域金叉/死叉信号的累积卷积和(死叉(1-->0)不清零,金叉(0-->1)清零)
经测试for最快,比reduce快(无jit,jit的话for就更快了)
params:
* Tm ->:
meaning: 数据
type: null
optional: [null]
return:
np.array
demonstrate:
Not described
output:
Not described
"""
T = np.zeros(len(Tm)).astype(np.int32)
for i, Tmx in enumerate(Tm):
T[i] = (T[i - 1] + 1) if (Tmx != 1) else 0
return T
@nb.jit(nopython=True)
def LIS(X):
"""
explanation:
计算最长递增子序列
Longest increasing subsequence
params:
* X ->:
meaning: 序列
type: null
optional: [null]
return:
(子序列开始位置, 子序列结束位置)
demonstrate:
Not described
output:
Not described
"""
N = len(X)
P = [0] * N
M = [0] * (N + 1)
L = 0
for i in range(N):
lo = 1
hi = L
while lo <= hi:
mid = (lo + hi) // 2
if (X[M[mid]] < X[i]):
lo = mid + 1
else:
hi = mid - 1
newL = lo
P[i] = M[newL - 1]
M[newL] = i
if (newL > L):
L = newL
S = []
pos = []
k = M[L]
for i in range(L - 1, -1, -1):
S.append(X[k])
pos.append(k)
k = P[k]
return S[::-1], pos[::-1]
@nb.jit(nopython=True)
def LDS(X):
"""
explanation:
计算最长递减子序列
Longest decreasing subsequence
params:
* X ->:
meaning: 序列
type: null
optional: [null]
return:
(子序列开始位置, 子序列结束位置)
demonstrate:
Not described
output:
Not described
"""
N = len(X)
P = [0] * N
M = [0] * (N + 1)
L = 0
for i in range(N):
lo = 1
hi = L
while lo <= hi:
mid = (lo + hi) // 2
if (X[M[mid]] > X[i]):
lo = mid + 1
else:
hi = mid - 1
newL = lo
P[i] = M[newL - 1]
M[newL] = i
if (newL > L):
L = newL
S = []
pos = []
k = M[L]
for i in range(L - 1, -1, -1):
S.append(X[k])
pos.append(k)
k = P[k]
return S[::-1], pos[::-1]
def price_predict_with_macd_trend_func(data):
"""
价格趋势,基于巴特沃斯带通滤波器和scipy.Gaussian机器学习统计算法预测
它包含了macd_cross_func()全部功能(没办法,重复计算2次MACD似乎很蠢)
"""
MACD = TA_MACD(data.close)
PRICE_PREDICT = pd.DataFrame(columns=['PRICE_PRED_CROSS', 'PRICE_PRED_CROSS_JX', 'PRICE_PRED_CROSS_SX', 'MACD_CROSS', 'MACD_CROSS_JX', 'MACD_CROSS_SX'], index=data.index)
PRICE_PREDICT = PRICE_PREDICT.assign(DIF=MACD[:,0])
PRICE_PREDICT = PRICE_PREDICT.assign(DEA=MACD[:,1])
PRICE_PREDICT = PRICE_PREDICT.assign(MACD=MACD[:,2])
PRICE_PREDICT = PRICE_PREDICT.assign(DELTA=MACD[:,3])
dea_tp_min, dea_tp_max = find_peak_vextors(PRICE_PREDICT['DEA'].values[33:], offest=33)
PRICE_PREDICT.iloc[dea_tp_min, PRICE_PREDICT.columns.get_loc('MACD_CROSS')] = 1
PRICE_PREDICT.iloc[dea_tp_max, PRICE_PREDICT.columns.get_loc('MACD_CROSS')] = -1
MACD_CROSS_JX = CROSS(PRICE_PREDICT['DIF'], PRICE_PREDICT['DEA'])
DEA_CROSS_JX = CROSS(PRICE_PREDICT['DEA'], 0)
MACD_CROSS_SX = CROSS(PRICE_PREDICT['DEA'], PRICE_PREDICT['DIF'])
DEA_CROSS_SX = CROSS(0, PRICE_PREDICT['DEA'])
PRICE_PREDICT.loc[MACD_CROSS_JX == 1, 'MACD_CROSS_JX'] = 1
PRICE_PREDICT.loc[MACD_CROSS_SX == 1, 'MACD_CROSS_SX'] = -1
PRICE_PREDICT.iloc[dea_tp_min, PRICE_PREDICT.columns.get_loc('MACD_CROSS_JX')] = 1
PRICE_PREDICT.iloc[dea_tp_max, PRICE_PREDICT.columns.get_loc('MACD_CROSS_SX')] = 1
PRICE_PREDICT['MACD_CROSS_JX'] = Timeline_Integral_with_cross_before(PRICE_PREDICT['MACD_CROSS_JX'])
PRICE_PREDICT['MACD_CROSS_SX'] = Timeline_Integral_with_cross_before(PRICE_PREDICT['MACD_CROSS_SX'])
# pass 1
x_tp_min, x_tp_max = find_peak_vextors(data.close.values)
PRICE_PREDICT.iloc[x_tp_min, PRICE_PREDICT.columns.get_loc('PRICE_PRED_CROSS')] = x_tp_min
PRICE_PREDICT.iloc[x_tp_max, PRICE_PREDICT.columns.get_loc('PRICE_PRED_CROSS')] = -x_tp_max
PRICE_PREDICT.iloc[x_tp_min, PRICE_PREDICT.columns.get_loc('PRICE_PRED_CROSS_JX')] = 1
PRICE_PREDICT.iloc[x_tp_max, PRICE_PREDICT.columns.get_loc('PRICE_PRED_CROSS_SX')] = 1
# pass 2 MACD 金叉的时候寻找更多的极值点,创造更多买入条件
x_tp_min, x_tp_max = find_peak_vextors_eagerly(data.close.values)
macd_up_trend_PEAKPOINT_MIN = (PRICE_PREDICT.iloc[x_tp_min, PRICE_PREDICT.columns.get_loc('MACD_CROSS_JX')] < PRICE_PREDICT.iloc[x_tp_min, PRICE_PREDICT.columns.get_loc('MACD_CROSS_SX')])
macd_up_trend_PEAKPOINT_MAX = (PRICE_PREDICT.iloc[x_tp_max, PRICE_PREDICT.columns.get_loc('MACD_CROSS_JX')] < PRICE_PREDICT.iloc[x_tp_max, PRICE_PREDICT.columns.get_loc('MACD_CROSS_SX')])
macd_up_trend_PEAKPOINT_MIN = macd_up_trend_PEAKPOINT_MIN[macd_up_trend_PEAKPOINT_MIN.apply(lambda x: x == True)] # eqv. Trim(x == False)
macd_up_trend_PEAKPOINT_MAX = macd_up_trend_PEAKPOINT_MAX[macd_up_trend_PEAKPOINT_MAX.apply(lambda x: x == True)] # eqv. Trim(x == False)
PRICE_PREDICT.loc[macd_up_trend_PEAKPOINT_MIN.index, 'PRICE_PRED_CROSS_JX'] = 1
PRICE_PREDICT.loc[macd_up_trend_PEAKPOINT_MAX.index, 'PRICE_PRED_CROSS_SX'] = 1
PRICE_PREDICT.loc[macd_up_trend_PEAKPOINT_MIN.index, 'PRICE_PRED_CROSS'] = PRICE_PREDICT.loc[macd_up_trend_PEAKPOINT_MIN.index].apply(lambda x: PRICE_PREDICT.index.get_level_values(level=0).get_loc(x.name[0]), axis=1)
PRICE_PREDICT.loc[macd_up_trend_PEAKPOINT_MAX.index, 'PRICE_PRED_CROSS'] = PRICE_PREDICT.loc[macd_up_trend_PEAKPOINT_MAX.index].apply(lambda x: PRICE_PREDICT.index.get_level_values(level=0).get_loc(x.name[0]) * -1, axis=1)
PRICE_PREDICT['PRICE_PRED_CROSS_JX'] = Timeline_Integral_with_cross_before(PRICE_PREDICT['PRICE_PRED_CROSS_JX'])
PRICE_PREDICT['PRICE_PRED_CROSS_SX'] = Timeline_Integral_with_cross_before(PRICE_PREDICT['PRICE_PRED_CROSS_SX'])
if (len(PRICE_PREDICT.index.names) > 2):
return PRICE_PREDICT.reset_index([1,2])
elif (len(PRICE_PREDICT.index.names) > 1):
return PRICE_PREDICT.reset_index([1])
else:
return PRICE_PREDICT
def macd_cross_func(data):
"""
神一样的指标:MACD
"""
MACD = TA_MACD(data.close)
MACD_CROSS = pd.DataFrame(columns=['MACD_CROSS', 'MACD_CROSS_JX', 'MACD_CROSS_SX'], index=data.index)
MACD_CROSS = MACD_CROSS.assign(DIF=MACD[:,0])
MACD_CROSS = MACD_CROSS.assign(DEA=MACD[:,1])
MACD_CROSS = MACD_CROSS.assign(MACD=MACD[:,2])
MACD_CROSS = MACD_CROSS.assign(DELTA=MACD[:,3])
dea_tp_min, dea_tp_max = find_peak_vextors(MACD_CROSS['DEA'].values[33:], offest=33)
MACD_CROSS.iloc[dea_tp_min, MACD_CROSS.columns.get_loc('MACD_CROSS')] = 1
MACD_CROSS.iloc[dea_tp_max, MACD_CROSS.columns.get_loc('MACD_CROSS')] = -1
MACD_CROSS_JX = CROSS(MACD_CROSS['DIF'], MACD_CROSS['DEA'])
MACD_CROSS_SX = CROSS(MACD_CROSS['DEA'], MACD_CROSS['DIF'])
MACD_CROSS.loc[MACD_CROSS_JX == 1, 'MACD_CROSS_JX'] = 1
MACD_CROSS.loc[MACD_CROSS_SX == 1, 'MACD_CROSS_SX'] = -1
MACD_CROSS.iloc[dea_tp_min, MACD_CROSS.columns.get_loc('MACD_CROSS_JX')] = 1
MACD_CROSS.iloc[dea_tp_max, MACD_CROSS.columns.get_loc('MACD_CROSS_SX')] = 1
MACD_CROSS['MACD_CROSS_JX'] = Timeline_Integral_with_cross_before(MACD_CROSS['MACD_CROSS_JX'])
MACD_CROSS['MACD_CROSS_SX'] = Timeline_Integral_with_cross_before(MACD_CROSS['MACD_CROSS_SX'])
return MACD_CROSS
def maxfactor_cross_func(data):
"""
自创指标:MAXFACTOR
"""
RSI = QA.TA_RSI(data.close, timeperiod=12)
CCI = QA.TA_CCI(data.high, data.low, data.close)
KDJ = QA.TA_KDJ(data.high, data.low, data.close)
MAX_FACTOR = CCI[:,0] + (RSI[:,0] - 50) * 4 + (KDJ[:,2] - 50) * 4
MAX_FACTOR_delta = np.r_[np.nan, np.diff(MAX_FACTOR)]
REGRESSION_BASELINE = pd.Series((RSI[:,0] - 50) * 4, index=data.index)
MAXFACTOR_CROSS = pd.DataFrame(columns=['MAXFACTOR_CROSS', 'MAXFACTOR_CROSS_JX', 'MAXFACTOR_CROSS_SX'], index=data.index)
MAXFACTOR_CROSS = MAXFACTOR_CROSS.assign(MAXFACTOR=MAX_FACTOR)
MAXFACTOR_CROSS = MAXFACTOR_CROSS.assign(MAXFACTOR_DELTA=MAX_FACTOR_delta)
MAXFACTOR_CROSS = MAXFACTOR_CROSS.assign(REGRESSION_BASELINE=REGRESSION_BASELINE)
MAXFACTOR_CROSS_JX1 = CROSS(MAX_FACTOR + MAX_FACTOR_delta, REGRESSION_BASELINE - 133)
MAXFACTOR_CROSS_JX2 = CROSS(MAX_FACTOR + MAX_FACTOR_delta, REGRESSION_BASELINE)
MAXFACTOR_CROSS_JX3 = CROSS(MAX_FACTOR + MAX_FACTOR_delta, REGRESSION_BASELINE + 133)
MAXFACTOR_CROSS_JX_JUNCTION = (MAXFACTOR_CROSS_JX1 | MAXFACTOR_CROSS_JX2 | MAXFACTOR_CROSS_JX3)
MAXFACTOR_CROSS_SX1 = CROSS(REGRESSION_BASELINE + 133, MAX_FACTOR + MAX_FACTOR_delta)
MAXFACTOR_CROSS_SX2 = CROSS(REGRESSION_BASELINE, MAX_FACTOR + MAX_FACTOR_delta)
MAXFACTOR_CROSS_SX3 = CROSS(REGRESSION_BASELINE - 133, MAX_FACTOR + MAX_FACTOR_delta)
MAXFACTOR_CROSS_SX_JUNCTION = (MAXFACTOR_CROSS_SX1 | MAXFACTOR_CROSS_SX2 | MAXFACTOR_CROSS_SX3)
MAXFACTOR_CROSS.loc[(MAXFACTOR_CROSS_JX1 | MAXFACTOR_CROSS_JX2 | MAXFACTOR_CROSS_JX3) == 1, 'MAXFACTOR_CROSS'] = 1
MAXFACTOR_CROSS.loc[(MAXFACTOR_CROSS_SX1 | MAXFACTOR_CROSS_SX2 | MAXFACTOR_CROSS_SX3) == 1, 'MAXFACTOR_CROSS'] = -1
MAXFACTOR_CROSS['MAXFACTOR_CROSS_JX'] = Timeline_Integral_with_cross_before(MAXFACTOR_CROSS_JX_JUNCTION)
MAXFACTOR_CROSS['MAXFACTOR_CROSS_SX'] = Timeline_Integral_with_cross_before(MAXFACTOR_CROSS_SX_JUNCTION)
return MAXFACTOR_CROSS
def dual_cross_func(data):
"""
自创指标:CCI/KDJ 对 偏移后的 RSI 双金叉
为了避免 Warning,计算时忽略了前13个 NaN 的,最后 加入DataFrame 的时候补回来
"""
RSI = TA_RSI(data.close, timeperiod=12)
CCI = TA_CCI(data.high, data.low, data.close)
KDJ = TA_KDJ(data.high, data.low, data.close)
CCI_CROSS_JX = CROSS_STATUS(CCI[13:,0], (RSI[13:,0] - 50) * 4)
KDJ_J_CROSS_JX = CROSS_STATUS(KDJ[13:,2], RSI[13:,0])
KDJ_J_CROSS_JX_PLUS = CROSS_STATUS(KDJ[13:,2] + KDJ[13:,3], RSI[13:,0])
DUAL_CROSS_JX = np.r_[np.zeros(13), CROSS_STATUS(CCI_CROSS_JX * (CCI_CROSS_JX + KDJ_J_CROSS_JX + KDJ_J_CROSS_JX_PLUS), 1)]
CCI_CROSS_SX = CROSS_STATUS((RSI[13:,0] - 50) * 4, CCI[13:,0])
KDJ_J_CROSS_SX = CROSS_STATUS(RSI[13:,0], KDJ[13:,2])
KDJ_J_CROSS_SX_PLUS = CROSS_STATUS(RSI[13:,0], KDJ[13:,2] + KDJ[13:,3])
DUAL_CROSS_SX = np.r_[np.zeros(13), CROSS_STATUS(CCI_CROSS_SX * (CCI_CROSS_SX + KDJ_J_CROSS_SX + KDJ_J_CROSS_SX_PLUS), 1)]
DUAL_CROSS = pd.DataFrame(columns=['DUAL_CROSS', 'DUAL_CROSS_JX', 'DUAL_CROSS_SX'], index=data.index)
DUAL_CROSS.loc[DUAL_CROSS_JX == 1, 'DUAL_CROSS'] = 1
DUAL_CROSS.loc[DUAL_CROSS_SX == 1, 'DUAL_CROSS'] = -1
DUAL_CROSS['DUAL_CROSS_JX'] = Timeline_Integral(DUAL_CROSS_JX)
DUAL_CROSS['DUAL_CROSS_SX'] = Timeline_Integral(DUAL_CROSS_SX)
return DUAL_CROSS
def ma30_cross_func(data):
"""
MA均线金叉指标
"""
MA5 = talib.MA(data.close, 5)
MA30 = talib.MA(data.close, 30)
MA30_CROSS_JX = CROSS(MA5, MA30)
MA30_CROSS_JX_Integral = Timeline_Integral_with_cross_before(MA30_CROSS_JX)
MA30_CROSS_SX = CROSS(MA30, MA5)
MA30_CROSS_SX_Integral = Timeline_Integral_with_cross_before(MA30_CROSS_SX)
MA30_CROSS = pd.DataFrame(columns=['MA30_CROSS', 'MA30_CROSS_JX', 'MA30_CROSS_SX', 'MA30_TP_CROSS_JX', 'MA30_TP_CROSS_SX'], index=data.index)
MA30_CROSS.loc[MA30_CROSS_JX == 1, 'MA30_CROSS'] = 1
MA30_CROSS.loc[MA30_CROSS_SX == 1, 'MA30_CROSS'] = -1
MA30_CROSS['MA30_CROSS_JX'] = Timeline_Integral_with_cross_before(MA30_CROSS_JX)
MA30_CROSS['MA30_CROSS_SX'] = Timeline_Integral_with_cross_before(MA30_CROSS_SX)
# MA30 前29个是 NaN,处理会抛出 Warning,使用 [29:] 则不会计算 NaN,相应的 return_index+29
MA30_tp_min, MA30_tp_max = find_peak_vextors(MA30.values[29:], offest=29)
MA30_TP_CROSS = pd.DataFrame(columns=['MA30_TP_CROSS_JX', 'MA30_TP_CROSS_SX'], index=data.index)
MA30_TP_CROSS['MA30_TP_CROSS_SX'] = MA30_TP_CROSS['MA30_TP_CROSS_JX'] = 0
MA30_TP_CROSS.iloc[MA30_tp_min, MA30_TP_CROSS.columns.get_loc('MA30_TP_CROSS_JX')] = 1
MA30_TP_CROSS.iloc[MA30_tp_max, MA30_TP_CROSS.columns.get_loc('MA30_TP_CROSS_SX')] = 1
MA30_CROSS['MA30_TP_CROSS_JX'] = Timeline_Integral_with_cross_before(MA30_TP_CROSS['MA30_TP_CROSS_JX'])
MA30_CROSS['MA30_TP_CROSS_SX'] = Timeline_Integral_with_cross_before(MA30_TP_CROSS['MA30_TP_CROSS_SX'])
return MA30_CROSS
def boll_cross_func(data):
"""
布林线和K线金叉死叉 状态分析
"""
BBANDS = TA_BBANDS(data.close, timeperiod=20, nbdevup=2)
BOLL_CROSS = pd.DataFrame(columns=['min_peak', 'max_peak', 'BOLL_CROSS', 'BOLL_CROSS_JX', 'BOLL_CROSS_SX'], index=data.index)
data = data.assign(BOLL_MA=BBANDS[:,1])
# 防止插针行情突然搞乱故
data['smooth_low'] = talib.MA(data.low, 2)
data['smooth_high'] = talib.MA(data.high, 2)
BOLL_CROSS['min_peak'] = data.apply(lambda x: min(x['open'], x['close'], x['low'] if x['open'] < x['BOLL_MA'] else x['smooth_low']), axis=1)
BOLL_CROSS['max_peak'] = data.apply(lambda x: max(x['open'], x['close'], x['high'] if x['open'] > x['BOLL_MA'] else x['smooth_high']), axis=1)
BOLL_CROSS_JX = CROSS(BOLL_CROSS['min_peak'], BBANDS[:,2])
BOLL_CROSS_SX = CROSS(BBANDS[:,0], BOLL_CROSS['max_peak'])
BOLL_CROSS.loc[BOLL_CROSS_JX == 1, 'BOLL_CROSS'] = 1
BOLL_CROSS.loc[BOLL_CROSS_SX == 1, 'BOLL_CROSS'] = -1
BOLL_TP_CROSS = pd.DataFrame(columns=['BOLL_TP_CROSS_JX', 'BOLL_TP_CROSS_SX'], index=data.index)
BOLL_TP_CROSS['BOLL_TP_CROSS_SX'] = BOLL_TP_CROSS['BOLL_TP_CROSS_JX'] = 0
BOLL_TP_CROSS.loc[BOLL_CROSS_JX == 1, 'BOLL_TP_CROSS_JX'] = 1
BOLL_TP_CROSS.loc[BOLL_CROSS_SX == 1, 'BOLL_TP_CROSS_SX'] = 1
BOLL_CROSS = BOLL_CROSS.assign(BOLL_UB=BBANDS[:,0])
BOLL_CROSS = BOLL_CROSS.assign(BOLL_MA=BBANDS[:,1])
BOLL_CROSS = BOLL_CROSS.assign(BOLL_LB=BBANDS[:,2])
BOLL_CROSS = BOLL_CROSS.assign(BOLL_WIDTH=BBANDS[:,3])
BOLL_CROSS = BOLL_CROSS.assign(BOLL_DELTA=BBANDS[:,4])
BOLL_CROSS = BOLL_CROSS.assign(BBW_MA20=talib.MA(BBANDS[:,3], 20))
BOLL_CROSS['BOLL_CROSS_JX'] = Timeline_Integral_with_cross_before(BOLL_TP_CROSS['BOLL_TP_CROSS_JX'])
BOLL_CROSS['BOLL_CROSS_SX'] = Timeline_Integral_with_cross_before(BOLL_TP_CROSS['BOLL_TP_CROSS_SX'])
return BOLL_CROSS
| mit |
mrquim/mrquimrepo | script.module.exodus/lib/resources/lib/sources/en/wrzcraft.py | 5 | 6722 | # -*- coding: utf-8 -*-
'''
Exodus Add-on
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urllib,urlparse
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import debrid
class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['wrzcraft.net']
self.base_link = 'http://wrzcraft.net'
self.search_link = '/search/%s/feed/rss2/'
def movie(self, imdb, title, localtitle, aliases, year):
try:
url = {'imdb': imdb, 'title': title, 'year': year}
url = urllib.urlencode(url)
return url
except:
return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year}
url = urllib.urlencode(url)
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if url == None: return
url = urlparse.parse_qs(url)
url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
url['title'], url['premiered'], url['season'], url['episode'] = title, premiered, season, episode
url = urllib.urlencode(url)
return url
except:
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
if debrid.status() == False: raise Exception()
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']
query = '%s S%02dE%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (data['title'], data['year'])
query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
url = self.search_link % urllib.quote_plus(query)
url = urlparse.urljoin(self.base_link, url)
r = client.request(url)
posts = client.parseDOM(r, 'item')
hostDict = hostprDict + hostDict
items = []
for post in posts:
try:
t = client.parseDOM(post, 'title')[0]
c = client.parseDOM(post, 'content.+?')[0]
u = client.parseDOM(c, 'p')
u = [client.parseDOM(i, 'a', ret='href') for i in u]
u = [i[0] for i in u if len(i) == 1]
if not u: raise Exception()
if 'tvshowtitle' in data:
u = [(re.sub('(720p|1080p)', '', t) + ' ' + [x for x in i.strip('//').split('/')][-1], i) for i in u]
else:
u = [(t, i) for i in u]
items += u
except:
pass
for item in items:
try:
name = item[0]
name = client.replaceHTMLCodes(name)
t = re.sub('(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', name)
if not cleantitle.get(t) == cleantitle.get(title): raise Exception()
y = re.findall('[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]', name)[-1].upper()
if not y == hdlr: raise Exception()
fmt = re.sub('(.+)(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*)(\.|\)|\]|\s)', '', name.upper())
fmt = re.split('\.|\(|\)|\[|\]|\s|\-', fmt)
fmt = [i.lower() for i in fmt]
if any(i.endswith(('subs', 'sub', 'dubbed', 'dub')) for i in fmt): raise Exception()
if any(i in ['extras'] for i in fmt): raise Exception()
if '1080p' in fmt: quality = '1080p'
elif '720p' in fmt: quality = 'HD'
else: quality = 'SD'
if any(i in ['dvdscr', 'r5', 'r6'] for i in fmt): quality = 'SCR'
elif any(i in ['camrip', 'tsrip', 'hdcam', 'hdts', 'dvdcam', 'dvdts', 'cam', 'telesync', 'ts'] for i in fmt): quality = 'CAM'
info = []
if '3d' in fmt: info.append('3D')
try:
size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+) (?:GB|GiB|MB|MiB))', item[2])[-1]
div = 1 if size.endswith(('GB', 'GiB')) else 1024
size = float(re.sub('[^0-9|/.|/,]', '', size))/div
size = '%.2f GB' % size
info.append(size)
except:
pass
if any(i in ['hevc', 'h265', 'x265'] for i in fmt): info.append('HEVC')
info = ' | '.join(info)
url = item[1]
if any(x in url for x in ['.rar', '.zip', '.iso']): raise Exception()
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
if not host in hostDict: raise Exception()
host = client.replaceHTMLCodes(host)
host = host.encode('utf-8')
sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True})
except:
pass
check = [i for i in sources if not i['quality'] == 'CAM']
if check: sources = check
return sources
except:
return sources
def resolve(self, url):
return url
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.