repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
YangChihWei/2015cdb_g1_0623 | static/Brython3.1.1-20150328-091302/Lib/optparse.py | 728 | 60616 | """A powerful, extensible, and easy-to-use option parser.
By Greg Ward <gward@python.net>
Originally distributed as Optik.
For support, use the optik-users@lists.sourceforge.net mailing list
(http://lists.sourceforge.net/lists/listinfo/optik-users).
Simple usage example:
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-f", "--file", dest="filename",
help="write report to FILE", metavar="FILE")
parser.add_option("-q", "--quiet",
action="store_false", dest="verbose", default=True,
help="don't print status messages to stdout")
(options, args) = parser.parse_args()
"""
__version__ = "1.5.3"
__all__ = ['Option',
'make_option',
'SUPPRESS_HELP',
'SUPPRESS_USAGE',
'Values',
'OptionContainer',
'OptionGroup',
'OptionParser',
'HelpFormatter',
'IndentedHelpFormatter',
'TitledHelpFormatter',
'OptParseError',
'OptionError',
'OptionConflictError',
'OptionValueError',
'BadOptionError']
__copyright__ = """
Copyright (c) 2001-2006 Gregory P. Ward. All rights reserved.
Copyright (c) 2002-2006 Python Software Foundation. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the author nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import sys, os
import textwrap
def _repr(self):
return "<%s at 0x%x: %s>" % (self.__class__.__name__, id(self), self)
# This file was generated from:
# Id: option_parser.py 527 2006-07-23 15:21:30Z greg
# Id: option.py 522 2006-06-11 16:22:03Z gward
# Id: help.py 527 2006-07-23 15:21:30Z greg
# Id: errors.py 509 2006-04-20 00:58:24Z gward
try:
from gettext import gettext, ngettext
except ImportError:
def gettext(message):
return message
def ngettext(singular, plural, n):
if n == 1:
return singular
return plural
_ = gettext
class OptParseError (Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
class OptionError (OptParseError):
"""
Raised if an Option instance is created with invalid or
inconsistent arguments.
"""
def __init__(self, msg, option):
self.msg = msg
self.option_id = str(option)
def __str__(self):
if self.option_id:
return "option %s: %s" % (self.option_id, self.msg)
else:
return self.msg
class OptionConflictError (OptionError):
"""
Raised if conflicting options are added to an OptionParser.
"""
class OptionValueError (OptParseError):
"""
Raised if an invalid option value is encountered on the command
line.
"""
class BadOptionError (OptParseError):
"""
Raised if an invalid option is seen on the command line.
"""
def __init__(self, opt_str):
self.opt_str = opt_str
def __str__(self):
return _("no such option: %s") % self.opt_str
class AmbiguousOptionError (BadOptionError):
"""
Raised if an ambiguous option is seen on the command line.
"""
def __init__(self, opt_str, possibilities):
BadOptionError.__init__(self, opt_str)
self.possibilities = possibilities
def __str__(self):
return (_("ambiguous option: %s (%s?)")
% (self.opt_str, ", ".join(self.possibilities)))
class HelpFormatter:
"""
Abstract base class for formatting option help. OptionParser
instances should use one of the HelpFormatter subclasses for
formatting help; by default IndentedHelpFormatter is used.
Instance attributes:
parser : OptionParser
the controlling OptionParser instance
indent_increment : int
the number of columns to indent per nesting level
max_help_position : int
the maximum starting column for option help text
help_position : int
the calculated starting column for option help text;
initially the same as the maximum
width : int
total number of columns for output (pass None to constructor for
this value to be taken from the $COLUMNS environment variable)
level : int
current indentation level
current_indent : int
current indentation level (in columns)
help_width : int
number of columns available for option help text (calculated)
default_tag : str
text to replace with each option's default value, "%default"
by default. Set to false value to disable default value expansion.
option_strings : { Option : str }
maps Option instances to the snippet of help text explaining
the syntax of that option, e.g. "-h, --help" or
"-fFILE, --file=FILE"
_short_opt_fmt : str
format string controlling how short options with values are
printed in help text. Must be either "%s%s" ("-fFILE") or
"%s %s" ("-f FILE"), because those are the two syntaxes that
Optik supports.
_long_opt_fmt : str
similar but for long options; must be either "%s %s" ("--file FILE")
or "%s=%s" ("--file=FILE").
"""
NO_DEFAULT_VALUE = "none"
def __init__(self,
indent_increment,
max_help_position,
width,
short_first):
self.parser = None
self.indent_increment = indent_increment
self.help_position = self.max_help_position = max_help_position
if width is None:
try:
width = int(os.environ['COLUMNS'])
except (KeyError, ValueError):
width = 80
width -= 2
self.width = width
self.current_indent = 0
self.level = 0
self.help_width = None # computed later
self.short_first = short_first
self.default_tag = "%default"
self.option_strings = {}
self._short_opt_fmt = "%s %s"
self._long_opt_fmt = "%s=%s"
def set_parser(self, parser):
self.parser = parser
def set_short_opt_delimiter(self, delim):
if delim not in ("", " "):
raise ValueError(
"invalid metavar delimiter for short options: %r" % delim)
self._short_opt_fmt = "%s" + delim + "%s"
def set_long_opt_delimiter(self, delim):
if delim not in ("=", " "):
raise ValueError(
"invalid metavar delimiter for long options: %r" % delim)
self._long_opt_fmt = "%s" + delim + "%s"
def indent(self):
self.current_indent += self.indent_increment
self.level += 1
def dedent(self):
self.current_indent -= self.indent_increment
assert self.current_indent >= 0, "Indent decreased below 0."
self.level -= 1
def format_usage(self, usage):
raise NotImplementedError("subclasses must implement")
def format_heading(self, heading):
raise NotImplementedError("subclasses must implement")
def _format_text(self, text):
"""
Format a paragraph of free-form text for inclusion in the
help output at the current indentation level.
"""
text_width = self.width - self.current_indent
indent = " "*self.current_indent
return textwrap.fill(text,
text_width,
initial_indent=indent,
subsequent_indent=indent)
def format_description(self, description):
if description:
return self._format_text(description) + "\n"
else:
return ""
def format_epilog(self, epilog):
if epilog:
return "\n" + self._format_text(epilog) + "\n"
else:
return ""
def expand_default(self, option):
if self.parser is None or not self.default_tag:
return option.help
default_value = self.parser.defaults.get(option.dest)
if default_value is NO_DEFAULT or default_value is None:
default_value = self.NO_DEFAULT_VALUE
return option.help.replace(self.default_tag, str(default_value))
def format_option(self, option):
# The help for each option consists of two parts:
# * the opt strings and metavars
# eg. ("-x", or "-fFILENAME, --file=FILENAME")
# * the user-supplied help string
# eg. ("turn on expert mode", "read data from FILENAME")
#
# If possible, we write both of these on the same line:
# -x turn on expert mode
#
# But if the opt string list is too long, we put the help
# string on a second line, indented to the same column it would
# start in if it fit on the first line.
# -fFILENAME, --file=FILENAME
# read data from FILENAME
result = []
opts = self.option_strings[option]
opt_width = self.help_position - self.current_indent - 2
if len(opts) > opt_width:
opts = "%*s%s\n" % (self.current_indent, "", opts)
indent_first = self.help_position
else: # start help on same line as opts
opts = "%*s%-*s " % (self.current_indent, "", opt_width, opts)
indent_first = 0
result.append(opts)
if option.help:
help_text = self.expand_default(option)
help_lines = textwrap.wrap(help_text, self.help_width)
result.append("%*s%s\n" % (indent_first, "", help_lines[0]))
result.extend(["%*s%s\n" % (self.help_position, "", line)
for line in help_lines[1:]])
elif opts[-1] != "\n":
result.append("\n")
return "".join(result)
def store_option_strings(self, parser):
self.indent()
max_len = 0
for opt in parser.option_list:
strings = self.format_option_strings(opt)
self.option_strings[opt] = strings
max_len = max(max_len, len(strings) + self.current_indent)
self.indent()
for group in parser.option_groups:
for opt in group.option_list:
strings = self.format_option_strings(opt)
self.option_strings[opt] = strings
max_len = max(max_len, len(strings) + self.current_indent)
self.dedent()
self.dedent()
self.help_position = min(max_len + 2, self.max_help_position)
self.help_width = self.width - self.help_position
def format_option_strings(self, option):
"""Return a comma-separated list of option strings & metavariables."""
if option.takes_value():
metavar = option.metavar or option.dest.upper()
short_opts = [self._short_opt_fmt % (sopt, metavar)
for sopt in option._short_opts]
long_opts = [self._long_opt_fmt % (lopt, metavar)
for lopt in option._long_opts]
else:
short_opts = option._short_opts
long_opts = option._long_opts
if self.short_first:
opts = short_opts + long_opts
else:
opts = long_opts + short_opts
return ", ".join(opts)
class IndentedHelpFormatter (HelpFormatter):
"""Format help with indented section bodies.
"""
def __init__(self,
indent_increment=2,
max_help_position=24,
width=None,
short_first=1):
HelpFormatter.__init__(
self, indent_increment, max_help_position, width, short_first)
def format_usage(self, usage):
return _("Usage: %s\n") % usage
def format_heading(self, heading):
return "%*s%s:\n" % (self.current_indent, "", heading)
class TitledHelpFormatter (HelpFormatter):
"""Format help with underlined section headers.
"""
def __init__(self,
indent_increment=0,
max_help_position=24,
width=None,
short_first=0):
HelpFormatter.__init__ (
self, indent_increment, max_help_position, width, short_first)
def format_usage(self, usage):
return "%s %s\n" % (self.format_heading(_("Usage")), usage)
def format_heading(self, heading):
return "%s\n%s\n" % (heading, "=-"[self.level] * len(heading))
def _parse_num(val, type):
if val[:2].lower() == "0x": # hexadecimal
radix = 16
elif val[:2].lower() == "0b": # binary
radix = 2
val = val[2:] or "0" # have to remove "0b" prefix
elif val[:1] == "0": # octal
radix = 8
else: # decimal
radix = 10
return type(val, radix)
def _parse_int(val):
return _parse_num(val, int)
_builtin_cvt = { "int" : (_parse_int, _("integer")),
"long" : (_parse_int, _("integer")),
"float" : (float, _("floating-point")),
"complex" : (complex, _("complex")) }
def check_builtin(option, opt, value):
(cvt, what) = _builtin_cvt[option.type]
try:
return cvt(value)
except ValueError:
raise OptionValueError(
_("option %s: invalid %s value: %r") % (opt, what, value))
def check_choice(option, opt, value):
if value in option.choices:
return value
else:
choices = ", ".join(map(repr, option.choices))
raise OptionValueError(
_("option %s: invalid choice: %r (choose from %s)")
% (opt, value, choices))
# Not supplying a default is different from a default of None,
# so we need an explicit "not supplied" value.
NO_DEFAULT = ("NO", "DEFAULT")
class Option:
"""
Instance attributes:
_short_opts : [string]
_long_opts : [string]
action : string
type : string
dest : string
default : any
nargs : int
const : any
choices : [string]
callback : function
callback_args : (any*)
callback_kwargs : { string : any }
help : string
metavar : string
"""
# The list of instance attributes that may be set through
# keyword args to the constructor.
ATTRS = ['action',
'type',
'dest',
'default',
'nargs',
'const',
'choices',
'callback',
'callback_args',
'callback_kwargs',
'help',
'metavar']
# The set of actions allowed by option parsers. Explicitly listed
# here so the constructor can validate its arguments.
ACTIONS = ("store",
"store_const",
"store_true",
"store_false",
"append",
"append_const",
"count",
"callback",
"help",
"version")
# The set of actions that involve storing a value somewhere;
# also listed just for constructor argument validation. (If
# the action is one of these, there must be a destination.)
STORE_ACTIONS = ("store",
"store_const",
"store_true",
"store_false",
"append",
"append_const",
"count")
# The set of actions for which it makes sense to supply a value
# type, ie. which may consume an argument from the command line.
TYPED_ACTIONS = ("store",
"append",
"callback")
# The set of actions which *require* a value type, ie. that
# always consume an argument from the command line.
ALWAYS_TYPED_ACTIONS = ("store",
"append")
# The set of actions which take a 'const' attribute.
CONST_ACTIONS = ("store_const",
"append_const")
# The set of known types for option parsers. Again, listed here for
# constructor argument validation.
TYPES = ("string", "int", "long", "float", "complex", "choice")
# Dictionary of argument checking functions, which convert and
# validate option arguments according to the option type.
#
# Signature of checking functions is:
# check(option : Option, opt : string, value : string) -> any
# where
# option is the Option instance calling the checker
# opt is the actual option seen on the command-line
# (eg. "-a", "--file")
# value is the option argument seen on the command-line
#
# The return value should be in the appropriate Python type
# for option.type -- eg. an integer if option.type == "int".
#
# If no checker is defined for a type, arguments will be
# unchecked and remain strings.
TYPE_CHECKER = { "int" : check_builtin,
"long" : check_builtin,
"float" : check_builtin,
"complex": check_builtin,
"choice" : check_choice,
}
# CHECK_METHODS is a list of unbound method objects; they are called
# by the constructor, in order, after all attributes are
# initialized. The list is created and filled in later, after all
# the methods are actually defined. (I just put it here because I
# like to define and document all class attributes in the same
# place.) Subclasses that add another _check_*() method should
# define their own CHECK_METHODS list that adds their check method
# to those from this class.
CHECK_METHODS = None
# -- Constructor/initialization methods ----------------------------
def __init__(self, *opts, **attrs):
# Set _short_opts, _long_opts attrs from 'opts' tuple.
# Have to be set now, in case no option strings are supplied.
self._short_opts = []
self._long_opts = []
opts = self._check_opt_strings(opts)
self._set_opt_strings(opts)
# Set all other attrs (action, type, etc.) from 'attrs' dict
self._set_attrs(attrs)
# Check all the attributes we just set. There are lots of
# complicated interdependencies, but luckily they can be farmed
# out to the _check_*() methods listed in CHECK_METHODS -- which
# could be handy for subclasses! The one thing these all share
# is that they raise OptionError if they discover a problem.
for checker in self.CHECK_METHODS:
checker(self)
def _check_opt_strings(self, opts):
# Filter out None because early versions of Optik had exactly
# one short option and one long option, either of which
# could be None.
opts = [opt for opt in opts if opt]
if not opts:
raise TypeError("at least one option string must be supplied")
return opts
def _set_opt_strings(self, opts):
for opt in opts:
if len(opt) < 2:
raise OptionError(
"invalid option string %r: "
"must be at least two characters long" % opt, self)
elif len(opt) == 2:
if not (opt[0] == "-" and opt[1] != "-"):
raise OptionError(
"invalid short option string %r: "
"must be of the form -x, (x any non-dash char)" % opt,
self)
self._short_opts.append(opt)
else:
if not (opt[0:2] == "--" and opt[2] != "-"):
raise OptionError(
"invalid long option string %r: "
"must start with --, followed by non-dash" % opt,
self)
self._long_opts.append(opt)
def _set_attrs(self, attrs):
for attr in self.ATTRS:
if attr in attrs:
setattr(self, attr, attrs[attr])
del attrs[attr]
else:
if attr == 'default':
setattr(self, attr, NO_DEFAULT)
else:
setattr(self, attr, None)
if attrs:
attrs = sorted(attrs.keys())
raise OptionError(
"invalid keyword arguments: %s" % ", ".join(attrs),
self)
# -- Constructor validation methods --------------------------------
def _check_action(self):
if self.action is None:
self.action = "store"
elif self.action not in self.ACTIONS:
raise OptionError("invalid action: %r" % self.action, self)
def _check_type(self):
if self.type is None:
if self.action in self.ALWAYS_TYPED_ACTIONS:
if self.choices is not None:
# The "choices" attribute implies "choice" type.
self.type = "choice"
else:
# No type given? "string" is the most sensible default.
self.type = "string"
else:
# Allow type objects or builtin type conversion functions
# (int, str, etc.) as an alternative to their names. (The
# complicated check of builtins is only necessary for
# Python 2.1 and earlier, and is short-circuited by the
# first check on modern Pythons.)
import builtins
if ( isinstance(self.type, type) or
(hasattr(self.type, "__name__") and
getattr(builtins, self.type.__name__, None) is self.type) ):
self.type = self.type.__name__
if self.type == "str":
self.type = "string"
if self.type not in self.TYPES:
raise OptionError("invalid option type: %r" % self.type, self)
if self.action not in self.TYPED_ACTIONS:
raise OptionError(
"must not supply a type for action %r" % self.action, self)
def _check_choice(self):
if self.type == "choice":
if self.choices is None:
raise OptionError(
"must supply a list of choices for type 'choice'", self)
elif not isinstance(self.choices, (tuple, list)):
raise OptionError(
"choices must be a list of strings ('%s' supplied)"
% str(type(self.choices)).split("'")[1], self)
elif self.choices is not None:
raise OptionError(
"must not supply choices for type %r" % self.type, self)
def _check_dest(self):
# No destination given, and we need one for this action. The
# self.type check is for callbacks that take a value.
takes_value = (self.action in self.STORE_ACTIONS or
self.type is not None)
if self.dest is None and takes_value:
# Glean a destination from the first long option string,
# or from the first short option string if no long options.
if self._long_opts:
# eg. "--foo-bar" -> "foo_bar"
self.dest = self._long_opts[0][2:].replace('-', '_')
else:
self.dest = self._short_opts[0][1]
def _check_const(self):
if self.action not in self.CONST_ACTIONS and self.const is not None:
raise OptionError(
"'const' must not be supplied for action %r" % self.action,
self)
def _check_nargs(self):
if self.action in self.TYPED_ACTIONS:
if self.nargs is None:
self.nargs = 1
elif self.nargs is not None:
raise OptionError(
"'nargs' must not be supplied for action %r" % self.action,
self)
def _check_callback(self):
if self.action == "callback":
if not callable(self.callback):
raise OptionError(
"callback not callable: %r" % self.callback, self)
if (self.callback_args is not None and
not isinstance(self.callback_args, tuple)):
raise OptionError(
"callback_args, if supplied, must be a tuple: not %r"
% self.callback_args, self)
if (self.callback_kwargs is not None and
not isinstance(self.callback_kwargs, dict)):
raise OptionError(
"callback_kwargs, if supplied, must be a dict: not %r"
% self.callback_kwargs, self)
else:
if self.callback is not None:
raise OptionError(
"callback supplied (%r) for non-callback option"
% self.callback, self)
if self.callback_args is not None:
raise OptionError(
"callback_args supplied for non-callback option", self)
if self.callback_kwargs is not None:
raise OptionError(
"callback_kwargs supplied for non-callback option", self)
CHECK_METHODS = [_check_action,
_check_type,
_check_choice,
_check_dest,
_check_const,
_check_nargs,
_check_callback]
# -- Miscellaneous methods -----------------------------------------
def __str__(self):
return "/".join(self._short_opts + self._long_opts)
__repr__ = _repr
def takes_value(self):
return self.type is not None
def get_opt_string(self):
if self._long_opts:
return self._long_opts[0]
else:
return self._short_opts[0]
# -- Processing methods --------------------------------------------
def check_value(self, opt, value):
checker = self.TYPE_CHECKER.get(self.type)
if checker is None:
return value
else:
return checker(self, opt, value)
def convert_value(self, opt, value):
if value is not None:
if self.nargs == 1:
return self.check_value(opt, value)
else:
return tuple([self.check_value(opt, v) for v in value])
def process(self, opt, value, values, parser):
# First, convert the value(s) to the right type. Howl if any
# value(s) are bogus.
value = self.convert_value(opt, value)
# And then take whatever action is expected of us.
# This is a separate method to make life easier for
# subclasses to add new actions.
return self.take_action(
self.action, self.dest, opt, value, values, parser)
def take_action(self, action, dest, opt, value, values, parser):
if action == "store":
setattr(values, dest, value)
elif action == "store_const":
setattr(values, dest, self.const)
elif action == "store_true":
setattr(values, dest, True)
elif action == "store_false":
setattr(values, dest, False)
elif action == "append":
values.ensure_value(dest, []).append(value)
elif action == "append_const":
values.ensure_value(dest, []).append(self.const)
elif action == "count":
setattr(values, dest, values.ensure_value(dest, 0) + 1)
elif action == "callback":
args = self.callback_args or ()
kwargs = self.callback_kwargs or {}
self.callback(self, opt, value, parser, *args, **kwargs)
elif action == "help":
parser.print_help()
parser.exit()
elif action == "version":
parser.print_version()
parser.exit()
else:
raise ValueError("unknown action %r" % self.action)
return 1
# class Option
SUPPRESS_HELP = "SUPPRESS"+"HELP"
SUPPRESS_USAGE = "SUPPRESS"+"USAGE"
class Values:
def __init__(self, defaults=None):
if defaults:
for (attr, val) in defaults.items():
setattr(self, attr, val)
def __str__(self):
return str(self.__dict__)
__repr__ = _repr
def __eq__(self, other):
if isinstance(other, Values):
return self.__dict__ == other.__dict__
elif isinstance(other, dict):
return self.__dict__ == other
else:
return NotImplemented
def _update_careful(self, dict):
"""
Update the option values from an arbitrary dictionary, but only
use keys from dict that already have a corresponding attribute
in self. Any keys in dict without a corresponding attribute
are silently ignored.
"""
for attr in dir(self):
if attr in dict:
dval = dict[attr]
if dval is not None:
setattr(self, attr, dval)
def _update_loose(self, dict):
"""
Update the option values from an arbitrary dictionary,
using all keys from the dictionary regardless of whether
they have a corresponding attribute in self or not.
"""
self.__dict__.update(dict)
def _update(self, dict, mode):
if mode == "careful":
self._update_careful(dict)
elif mode == "loose":
self._update_loose(dict)
else:
raise ValueError("invalid update mode: %r" % mode)
def read_module(self, modname, mode="careful"):
__import__(modname)
mod = sys.modules[modname]
self._update(vars(mod), mode)
def read_file(self, filename, mode="careful"):
vars = {}
exec(open(filename).read(), vars)
self._update(vars, mode)
def ensure_value(self, attr, value):
if not hasattr(self, attr) or getattr(self, attr) is None:
setattr(self, attr, value)
return getattr(self, attr)
class OptionContainer:
"""
Abstract base class.
Class attributes:
standard_option_list : [Option]
list of standard options that will be accepted by all instances
of this parser class (intended to be overridden by subclasses).
Instance attributes:
option_list : [Option]
the list of Option objects contained by this OptionContainer
_short_opt : { string : Option }
dictionary mapping short option strings, eg. "-f" or "-X",
to the Option instances that implement them. If an Option
has multiple short option strings, it will appears in this
dictionary multiple times. [1]
_long_opt : { string : Option }
dictionary mapping long option strings, eg. "--file" or
"--exclude", to the Option instances that implement them.
Again, a given Option can occur multiple times in this
dictionary. [1]
defaults : { string : any }
dictionary mapping option destination names to default
values for each destination [1]
[1] These mappings are common to (shared by) all components of the
controlling OptionParser, where they are initially created.
"""
def __init__(self, option_class, conflict_handler, description):
# Initialize the option list and related data structures.
# This method must be provided by subclasses, and it must
# initialize at least the following instance attributes:
# option_list, _short_opt, _long_opt, defaults.
self._create_option_list()
self.option_class = option_class
self.set_conflict_handler(conflict_handler)
self.set_description(description)
def _create_option_mappings(self):
# For use by OptionParser constructor -- create the master
# option mappings used by this OptionParser and all
# OptionGroups that it owns.
self._short_opt = {} # single letter -> Option instance
self._long_opt = {} # long option -> Option instance
self.defaults = {} # maps option dest -> default value
def _share_option_mappings(self, parser):
# For use by OptionGroup constructor -- use shared option
# mappings from the OptionParser that owns this OptionGroup.
self._short_opt = parser._short_opt
self._long_opt = parser._long_opt
self.defaults = parser.defaults
def set_conflict_handler(self, handler):
if handler not in ("error", "resolve"):
raise ValueError("invalid conflict_resolution value %r" % handler)
self.conflict_handler = handler
def set_description(self, description):
self.description = description
def get_description(self):
return self.description
def destroy(self):
"""see OptionParser.destroy()."""
del self._short_opt
del self._long_opt
del self.defaults
# -- Option-adding methods -----------------------------------------
def _check_conflict(self, option):
conflict_opts = []
for opt in option._short_opts:
if opt in self._short_opt:
conflict_opts.append((opt, self._short_opt[opt]))
for opt in option._long_opts:
if opt in self._long_opt:
conflict_opts.append((opt, self._long_opt[opt]))
if conflict_opts:
handler = self.conflict_handler
if handler == "error":
raise OptionConflictError(
"conflicting option string(s): %s"
% ", ".join([co[0] for co in conflict_opts]),
option)
elif handler == "resolve":
for (opt, c_option) in conflict_opts:
if opt.startswith("--"):
c_option._long_opts.remove(opt)
del self._long_opt[opt]
else:
c_option._short_opts.remove(opt)
del self._short_opt[opt]
if not (c_option._short_opts or c_option._long_opts):
c_option.container.option_list.remove(c_option)
def add_option(self, *args, **kwargs):
"""add_option(Option)
add_option(opt_str, ..., kwarg=val, ...)
"""
if isinstance(args[0], str):
option = self.option_class(*args, **kwargs)
elif len(args) == 1 and not kwargs:
option = args[0]
if not isinstance(option, Option):
raise TypeError("not an Option instance: %r" % option)
else:
raise TypeError("invalid arguments")
self._check_conflict(option)
self.option_list.append(option)
option.container = self
for opt in option._short_opts:
self._short_opt[opt] = option
for opt in option._long_opts:
self._long_opt[opt] = option
if option.dest is not None: # option has a dest, we need a default
if option.default is not NO_DEFAULT:
self.defaults[option.dest] = option.default
elif option.dest not in self.defaults:
self.defaults[option.dest] = None
return option
def add_options(self, option_list):
for option in option_list:
self.add_option(option)
# -- Option query/removal methods ----------------------------------
def get_option(self, opt_str):
return (self._short_opt.get(opt_str) or
self._long_opt.get(opt_str))
def has_option(self, opt_str):
return (opt_str in self._short_opt or
opt_str in self._long_opt)
def remove_option(self, opt_str):
option = self._short_opt.get(opt_str)
if option is None:
option = self._long_opt.get(opt_str)
if option is None:
raise ValueError("no such option %r" % opt_str)
for opt in option._short_opts:
del self._short_opt[opt]
for opt in option._long_opts:
del self._long_opt[opt]
option.container.option_list.remove(option)
# -- Help-formatting methods ---------------------------------------
def format_option_help(self, formatter):
if not self.option_list:
return ""
result = []
for option in self.option_list:
if not option.help is SUPPRESS_HELP:
result.append(formatter.format_option(option))
return "".join(result)
def format_description(self, formatter):
return formatter.format_description(self.get_description())
def format_help(self, formatter):
result = []
if self.description:
result.append(self.format_description(formatter))
if self.option_list:
result.append(self.format_option_help(formatter))
return "\n".join(result)
class OptionGroup (OptionContainer):
def __init__(self, parser, title, description=None):
self.parser = parser
OptionContainer.__init__(
self, parser.option_class, parser.conflict_handler, description)
self.title = title
def _create_option_list(self):
self.option_list = []
self._share_option_mappings(self.parser)
def set_title(self, title):
self.title = title
def destroy(self):
"""see OptionParser.destroy()."""
OptionContainer.destroy(self)
del self.option_list
# -- Help-formatting methods ---------------------------------------
def format_help(self, formatter):
result = formatter.format_heading(self.title)
formatter.indent()
result += OptionContainer.format_help(self, formatter)
formatter.dedent()
return result
class OptionParser (OptionContainer):
"""
Class attributes:
standard_option_list : [Option]
list of standard options that will be accepted by all instances
of this parser class (intended to be overridden by subclasses).
Instance attributes:
usage : string
a usage string for your program. Before it is displayed
to the user, "%prog" will be expanded to the name of
your program (self.prog or os.path.basename(sys.argv[0])).
prog : string
the name of the current program (to override
os.path.basename(sys.argv[0])).
description : string
A paragraph of text giving a brief overview of your program.
optparse reformats this paragraph to fit the current terminal
width and prints it when the user requests help (after usage,
but before the list of options).
epilog : string
paragraph of help text to print after option help
option_groups : [OptionGroup]
list of option groups in this parser (option groups are
irrelevant for parsing the command-line, but very useful
for generating help)
allow_interspersed_args : bool = true
if true, positional arguments may be interspersed with options.
Assuming -a and -b each take a single argument, the command-line
-ablah foo bar -bboo baz
will be interpreted the same as
-ablah -bboo -- foo bar baz
If this flag were false, that command line would be interpreted as
-ablah -- foo bar -bboo baz
-- ie. we stop processing options as soon as we see the first
non-option argument. (This is the tradition followed by
Python's getopt module, Perl's Getopt::Std, and other argument-
parsing libraries, but it is generally annoying to users.)
process_default_values : bool = true
if true, option default values are processed similarly to option
values from the command line: that is, they are passed to the
type-checking function for the option's type (as long as the
default value is a string). (This really only matters if you
have defined custom types; see SF bug #955889.) Set it to false
to restore the behaviour of Optik 1.4.1 and earlier.
rargs : [string]
the argument list currently being parsed. Only set when
parse_args() is active, and continually trimmed down as
we consume arguments. Mainly there for the benefit of
callback options.
largs : [string]
the list of leftover arguments that we have skipped while
parsing options. If allow_interspersed_args is false, this
list is always empty.
values : Values
the set of option values currently being accumulated. Only
set when parse_args() is active. Also mainly for callbacks.
Because of the 'rargs', 'largs', and 'values' attributes,
OptionParser is not thread-safe. If, for some perverse reason, you
need to parse command-line arguments simultaneously in different
threads, use different OptionParser instances.
"""
standard_option_list = []
def __init__(self,
usage=None,
option_list=None,
option_class=Option,
version=None,
conflict_handler="error",
description=None,
formatter=None,
add_help_option=True,
prog=None,
epilog=None):
OptionContainer.__init__(
self, option_class, conflict_handler, description)
self.set_usage(usage)
self.prog = prog
self.version = version
self.allow_interspersed_args = True
self.process_default_values = True
if formatter is None:
formatter = IndentedHelpFormatter()
self.formatter = formatter
self.formatter.set_parser(self)
self.epilog = epilog
# Populate the option list; initial sources are the
# standard_option_list class attribute, the 'option_list'
# argument, and (if applicable) the _add_version_option() and
# _add_help_option() methods.
self._populate_option_list(option_list,
add_help=add_help_option)
self._init_parsing_state()
def destroy(self):
"""
Declare that you are done with this OptionParser. This cleans up
reference cycles so the OptionParser (and all objects referenced by
it) can be garbage-collected promptly. After calling destroy(), the
OptionParser is unusable.
"""
OptionContainer.destroy(self)
for group in self.option_groups:
group.destroy()
del self.option_list
del self.option_groups
del self.formatter
# -- Private methods -----------------------------------------------
# (used by our or OptionContainer's constructor)
def _create_option_list(self):
self.option_list = []
self.option_groups = []
self._create_option_mappings()
def _add_help_option(self):
self.add_option("-h", "--help",
action="help",
help=_("show this help message and exit"))
def _add_version_option(self):
self.add_option("--version",
action="version",
help=_("show program's version number and exit"))
def _populate_option_list(self, option_list, add_help=True):
if self.standard_option_list:
self.add_options(self.standard_option_list)
if option_list:
self.add_options(option_list)
if self.version:
self._add_version_option()
if add_help:
self._add_help_option()
def _init_parsing_state(self):
# These are set in parse_args() for the convenience of callbacks.
self.rargs = None
self.largs = None
self.values = None
# -- Simple modifier methods ---------------------------------------
def set_usage(self, usage):
if usage is None:
self.usage = _("%prog [options]")
elif usage is SUPPRESS_USAGE:
self.usage = None
# For backwards compatibility with Optik 1.3 and earlier.
elif usage.lower().startswith("usage: "):
self.usage = usage[7:]
else:
self.usage = usage
def enable_interspersed_args(self):
"""Set parsing to not stop on the first non-option, allowing
interspersing switches with command arguments. This is the
default behavior. See also disable_interspersed_args() and the
class documentation description of the attribute
allow_interspersed_args."""
self.allow_interspersed_args = True
def disable_interspersed_args(self):
"""Set parsing to stop on the first non-option. Use this if
you have a command processor which runs another command that
has options of its own and you want to make sure these options
don't get confused.
"""
self.allow_interspersed_args = False
def set_process_default_values(self, process):
self.process_default_values = process
def set_default(self, dest, value):
self.defaults[dest] = value
def set_defaults(self, **kwargs):
self.defaults.update(kwargs)
def _get_all_options(self):
options = self.option_list[:]
for group in self.option_groups:
options.extend(group.option_list)
return options
def get_default_values(self):
if not self.process_default_values:
# Old, pre-Optik 1.5 behaviour.
return Values(self.defaults)
defaults = self.defaults.copy()
for option in self._get_all_options():
default = defaults.get(option.dest)
if isinstance(default, str):
opt_str = option.get_opt_string()
defaults[option.dest] = option.check_value(opt_str, default)
return Values(defaults)
# -- OptionGroup methods -------------------------------------------
def add_option_group(self, *args, **kwargs):
# XXX lots of overlap with OptionContainer.add_option()
if isinstance(args[0], str):
group = OptionGroup(self, *args, **kwargs)
elif len(args) == 1 and not kwargs:
group = args[0]
if not isinstance(group, OptionGroup):
raise TypeError("not an OptionGroup instance: %r" % group)
if group.parser is not self:
raise ValueError("invalid OptionGroup (wrong parser)")
else:
raise TypeError("invalid arguments")
self.option_groups.append(group)
return group
def get_option_group(self, opt_str):
option = (self._short_opt.get(opt_str) or
self._long_opt.get(opt_str))
if option and option.container is not self:
return option.container
return None
# -- Option-parsing methods ----------------------------------------
def _get_args(self, args):
if args is None:
return sys.argv[1:]
else:
return args[:] # don't modify caller's list
def parse_args(self, args=None, values=None):
"""
parse_args(args : [string] = sys.argv[1:],
values : Values = None)
-> (values : Values, args : [string])
Parse the command-line options found in 'args' (default:
sys.argv[1:]). Any errors result in a call to 'error()', which
by default prints the usage message to stderr and calls
sys.exit() with an error message. On success returns a pair
(values, args) where 'values' is an Values instance (with all
your option values) and 'args' is the list of arguments left
over after parsing options.
"""
rargs = self._get_args(args)
if values is None:
values = self.get_default_values()
# Store the halves of the argument list as attributes for the
# convenience of callbacks:
# rargs
# the rest of the command-line (the "r" stands for
# "remaining" or "right-hand")
# largs
# the leftover arguments -- ie. what's left after removing
# options and their arguments (the "l" stands for "leftover"
# or "left-hand")
self.rargs = rargs
self.largs = largs = []
self.values = values
try:
stop = self._process_args(largs, rargs, values)
except (BadOptionError, OptionValueError) as err:
self.error(str(err))
args = largs + rargs
return self.check_values(values, args)
def check_values(self, values, args):
"""
check_values(values : Values, args : [string])
-> (values : Values, args : [string])
Check that the supplied option values and leftover arguments are
valid. Returns the option values and leftover arguments
(possibly adjusted, possibly completely new -- whatever you
like). Default implementation just returns the passed-in
values; subclasses may override as desired.
"""
return (values, args)
def _process_args(self, largs, rargs, values):
"""_process_args(largs : [string],
rargs : [string],
values : Values)
Process command-line arguments and populate 'values', consuming
options and arguments from 'rargs'. If 'allow_interspersed_args' is
false, stop at the first non-option argument. If true, accumulate any
interspersed non-option arguments in 'largs'.
"""
while rargs:
arg = rargs[0]
# We handle bare "--" explicitly, and bare "-" is handled by the
# standard arg handler since the short arg case ensures that the
# len of the opt string is greater than 1.
if arg == "--":
del rargs[0]
return
elif arg[0:2] == "--":
# process a single long option (possibly with value(s))
self._process_long_opt(rargs, values)
elif arg[:1] == "-" and len(arg) > 1:
# process a cluster of short options (possibly with
# value(s) for the last one only)
self._process_short_opts(rargs, values)
elif self.allow_interspersed_args:
largs.append(arg)
del rargs[0]
else:
return # stop now, leave this arg in rargs
# Say this is the original argument list:
# [arg0, arg1, ..., arg(i-1), arg(i), arg(i+1), ..., arg(N-1)]
# ^
# (we are about to process arg(i)).
#
# Then rargs is [arg(i), ..., arg(N-1)] and largs is a *subset* of
# [arg0, ..., arg(i-1)] (any options and their arguments will have
# been removed from largs).
#
# The while loop will usually consume 1 or more arguments per pass.
# If it consumes 1 (eg. arg is an option that takes no arguments),
# then after _process_arg() is done the situation is:
#
# largs = subset of [arg0, ..., arg(i)]
# rargs = [arg(i+1), ..., arg(N-1)]
#
# If allow_interspersed_args is false, largs will always be
# *empty* -- still a subset of [arg0, ..., arg(i-1)], but
# not a very interesting subset!
def _match_long_opt(self, opt):
"""_match_long_opt(opt : string) -> string
Determine which long option string 'opt' matches, ie. which one
it is an unambiguous abbreviation for. Raises BadOptionError if
'opt' doesn't unambiguously match any long option string.
"""
return _match_abbrev(opt, self._long_opt)
def _process_long_opt(self, rargs, values):
arg = rargs.pop(0)
# Value explicitly attached to arg? Pretend it's the next
# argument.
if "=" in arg:
(opt, next_arg) = arg.split("=", 1)
rargs.insert(0, next_arg)
had_explicit_value = True
else:
opt = arg
had_explicit_value = False
opt = self._match_long_opt(opt)
option = self._long_opt[opt]
if option.takes_value():
nargs = option.nargs
if len(rargs) < nargs:
self.error(ngettext(
"%(option)s option requires %(number)d argument",
"%(option)s option requires %(number)d arguments",
nargs) % {"option": opt, "number": nargs})
elif nargs == 1:
value = rargs.pop(0)
else:
value = tuple(rargs[0:nargs])
del rargs[0:nargs]
elif had_explicit_value:
self.error(_("%s option does not take a value") % opt)
else:
value = None
option.process(opt, value, values, self)
def _process_short_opts(self, rargs, values):
arg = rargs.pop(0)
stop = False
i = 1
for ch in arg[1:]:
opt = "-" + ch
option = self._short_opt.get(opt)
i += 1 # we have consumed a character
if not option:
raise BadOptionError(opt)
if option.takes_value():
# Any characters left in arg? Pretend they're the
# next arg, and stop consuming characters of arg.
if i < len(arg):
rargs.insert(0, arg[i:])
stop = True
nargs = option.nargs
if len(rargs) < nargs:
self.error(ngettext(
"%(option)s option requires %(number)d argument",
"%(option)s option requires %(number)d arguments",
nargs) % {"option": opt, "number": nargs})
elif nargs == 1:
value = rargs.pop(0)
else:
value = tuple(rargs[0:nargs])
del rargs[0:nargs]
else: # option doesn't take a value
value = None
option.process(opt, value, values, self)
if stop:
break
# -- Feedback methods ----------------------------------------------
def get_prog_name(self):
if self.prog is None:
return os.path.basename(sys.argv[0])
else:
return self.prog
def expand_prog_name(self, s):
return s.replace("%prog", self.get_prog_name())
def get_description(self):
return self.expand_prog_name(self.description)
def exit(self, status=0, msg=None):
if msg:
sys.stderr.write(msg)
sys.exit(status)
def error(self, msg):
"""error(msg : string)
Print a usage message incorporating 'msg' to stderr and exit.
If you override this in a subclass, it should not return -- it
should either exit or raise an exception.
"""
self.print_usage(sys.stderr)
self.exit(2, "%s: error: %s\n" % (self.get_prog_name(), msg))
def get_usage(self):
if self.usage:
return self.formatter.format_usage(
self.expand_prog_name(self.usage))
else:
return ""
def print_usage(self, file=None):
"""print_usage(file : file = stdout)
Print the usage message for the current program (self.usage) to
'file' (default stdout). Any occurrence of the string "%prog" in
self.usage is replaced with the name of the current program
(basename of sys.argv[0]). Does nothing if self.usage is empty
or not defined.
"""
if self.usage:
print(self.get_usage(), file=file)
def get_version(self):
if self.version:
return self.expand_prog_name(self.version)
else:
return ""
def print_version(self, file=None):
"""print_version(file : file = stdout)
Print the version message for this program (self.version) to
'file' (default stdout). As with print_usage(), any occurrence
of "%prog" in self.version is replaced by the current program's
name. Does nothing if self.version is empty or undefined.
"""
if self.version:
print(self.get_version(), file=file)
def format_option_help(self, formatter=None):
if formatter is None:
formatter = self.formatter
formatter.store_option_strings(self)
result = []
result.append(formatter.format_heading(_("Options")))
formatter.indent()
if self.option_list:
result.append(OptionContainer.format_option_help(self, formatter))
result.append("\n")
for group in self.option_groups:
result.append(group.format_help(formatter))
result.append("\n")
formatter.dedent()
# Drop the last "\n", or the header if no options or option groups:
return "".join(result[:-1])
def format_epilog(self, formatter):
return formatter.format_epilog(self.epilog)
def format_help(self, formatter=None):
if formatter is None:
formatter = self.formatter
result = []
if self.usage:
result.append(self.get_usage() + "\n")
if self.description:
result.append(self.format_description(formatter) + "\n")
result.append(self.format_option_help(formatter))
result.append(self.format_epilog(formatter))
return "".join(result)
def print_help(self, file=None):
"""print_help(file : file = stdout)
Print an extended help message, listing all options and any
help text provided with them, to 'file' (default stdout).
"""
if file is None:
file = sys.stdout
file.write(self.format_help())
# class OptionParser
def _match_abbrev(s, wordmap):
"""_match_abbrev(s : string, wordmap : {string : Option}) -> string
Return the string key in 'wordmap' for which 's' is an unambiguous
abbreviation. If 's' is found to be ambiguous or doesn't match any of
'words', raise BadOptionError.
"""
# Is there an exact match?
if s in wordmap:
return s
else:
# Isolate all words with s as a prefix.
possibilities = [word for word in wordmap.keys()
if word.startswith(s)]
# No exact match, so there had better be just one possibility.
if len(possibilities) == 1:
return possibilities[0]
elif not possibilities:
raise BadOptionError(s)
else:
# More than one possible completion: ambiguous prefix.
possibilities.sort()
raise AmbiguousOptionError(s, possibilities)
# Some day, there might be many Option classes. As of Optik 1.3, the
# preferred way to instantiate Options is indirectly, via make_option(),
# which will become a factory function when there are many Option
# classes.
make_option = Option
| gpl-3.0 |
edx/xblock-lti-consumer | lti_consumer/migrations/0002_ltiagslineitem.py | 1 | 1256 | # Generated by Django 2.2.16 on 2020-09-29 21:48
from django.db import migrations, models
import django.db.models.deletion
import opaque_keys.edx.django.models
class Migration(migrations.Migration):
dependencies = [
('lti_consumer', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='LtiAgsLineItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('resource_id', models.CharField(blank=True, max_length=100)),
('resource_link_id', opaque_keys.edx.django.models.UsageKeyField(blank=True, db_index=True, max_length=255, null=True)),
('label', models.CharField(max_length=100)),
('score_maximum', models.IntegerField()),
('tag', models.CharField(blank=True, max_length=50)),
('start_date_time', models.DateTimeField(blank=True, null=True)),
('end_date_time', models.DateTimeField(blank=True, null=True)),
('lti_configuration', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='lti_consumer.LtiConfiguration')),
],
),
]
| agpl-3.0 |
Micronaet/micronaet-migration | base_accounting_program/accounting.py | 1 | 14040 | # -*- coding: utf-8 -*-
###############################################################################
#
# Copyright (C) 2001-2014 Micronaet SRL (<http://www.micronaet.it>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
import os
import sys
import logging
import openerp
import openerp.netsvc as netsvc
import openerp.addons.decimal_precision as dp
from openerp.osv import fields, osv, expression, orm
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
from openerp import SUPERUSER_ID, api
from openerp import tools
from openerp.tools.translate import _
from openerp.tools.float_utils import float_round as round
from openerp.tools import (DEFAULT_SERVER_DATE_FORMAT,
DEFAULT_SERVER_DATETIME_FORMAT,
DATETIME_FORMATS_MAP,
float_compare)
_logger = logging.getLogger(__name__)
class ProductProductExtraFields(orm.Model):
_inherit ='product.product'
_columns = {
'import': fields.boolean('Imported'),
'mexal_id': fields.char(
'Product mexal ID', size=20),
'q_x_pack': fields.float(
'Q. per collo', digits=(16, 3)),
'linear_length': fields.float(
'Lung. lineare', digits=(16, 3)),
'large_description': fields.text(
'Large Description', translate=True, help="For web publishing"),
}
class ProductPricelistExtraFields(orm.Model):
_inherit ='product.pricelist'
_columns = {
'import': fields.boolean('Imported', required=False),
'mexal_id': fields.char(
'Mexal Pricelist', size=9, required=False, readonly=False),
}
class PricelistVersionExtraFields(orm.Model):
_inherit ='product.pricelist.version'
_columns = {
'import': fields.boolean('Imported', required=False),
'mexal_id': fields.char(
'Mexal Pricelist version', size=9, required=False, readonly=False),
}
class PricelistItemExtraFields(orm.Model):
_inherit ='product.pricelist.item'
_columns = {
'mexal_id': fields.char(
'Mexal Pricelist item', size=9, required=False, readonly=False),
}
"""
# fiam_sale.py
Extra fields for object used in sale orders
Maybe this new objects are not necessary and will be replaced in the future
TODO Maybe discount part is better move in a single module
"""
class SaleOrderBank(orm.Model):
_name = 'sale.order.bank'
_description = 'Sale oder bank'
_columns = {
'name': fields.char('Bank account', size=64),
'information': fields.text(
'Information', translate=True,
help="Account description, IBAN etc. linked in the offer"),
}
class SaleProductReturn(orm.Model):
''' List of text sentences for the return of the product, this list are
show in offer modules
'''
_name = 'sale.product.return'
_description = 'Sale product return'
_columns = {
'name': fields.char('Description', size=64),
'text': fields.text('Text', translate=True),
}
class SaleOrderExtraFields(orm.Model):
_inherit='sale.order'
_columns = {
'bank_id': fields.many2one('sale.order.bank', 'Conto bancario'),
'print_address': fields.boolean('Use extra address'),
'print_only_prices': fields.boolean('Only price offer'),
'has_master_header': fields.boolean(
'Header master table',
help="In 'only comunication offer' doesn't add header"),
'return_id': fields.many2one('sale.product.return', 'Product return'),
}
_defaults={
'has_master_header': lambda *a: True,
}
class SaleOrderLineExtraFields(orm.Model):
_inherit ='sale.order.line'
# TODO remove (put in correct module mx_discount_scale_order
# vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
def create(self, cr, uid, vals, context=None):
""" Multi discount rate
"""
if not vals.get('discount', 0.0) and vals.get(
'multi_discount_rates', False):
res = self.on_change_multi_discount(
cr, uid, 0, vals.get('multi_discount_rates'))['value']
vals['discount'] = res.get('discount', '')
return super(SaleOrderLineExtraFields, self).create(
cr, uid, vals, context=context)
def write(self, cr, uid, ids, vals, context=None):
""" Multi discount rate
"""
if vals.get('multi_discount_rates', False):
res = self.on_change_multi_discount(
cr, uid, 0, vals.get('multi_discount_rates'))['value']
vals['discount'] = res.get('discount', '')
# TODO raise error when update (need restart server)
return super(SaleOrderLineExtraFields, self).write(
cr, uid, ids, vals, context=context)
def on_change_multi_discount(self, cr, uid, ids, multi_discount_rates,
context=None):
''' Get multidiscount return compute of discount and better format
of multi rates
'''
res = {}
if multi_discount_rates:
disc = multi_discount_rates.replace(' ', '')
disc = disc.replace(',', '.')
discount_list = disc.split('+')
if discount_list:
base_discount = float(100)
for aliquota in discount_list:
try:
i = float(eval(aliquota))
except:
i = 0.00
base_discount -= base_discount * i / 100.00
res['discount'] = 100 - base_discount
res['multi_discount_rates'] = '+ '.join(discount_list)
else:
res['discount'] = 0.0
res['multi_discount_rates'] = ''
else:
res['discount'] = 0.00
res['multi_discount_rates'] = ''
return {'value': res}
def _discount_rates_get(self, cr, uid, context=None):
if context is None:
context = {}
if context.get('partner_id'):
cr.execute("""
SELECT discount_rates, id
FROM res_partner
WHERE id = %d
""" % context['partner_id'])
res = cr.fetchall()
if res[0][0]:
return res[0][0]
else:
return False
else:
return False
def _discount_value_get(self, cr, uid, context=None):
if context is None:
context = {}
if context.get('partner_id', False):
cr.execute("""
SELECT discount_value, id
FROM res_partner
WHERE id = %d""" % context['partner_id'])
res = cr.fetchall()
if res[0][0]:
return res[0][0]
else:
return False
else:
return False
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
_columns = {
# TODO remove (put in correct module mx_discount_scale_order
# vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
'multi_discount_rates': fields.char('Discount scale', size=30),
'price_use_manual': fields.boolean('Use manual net price',
help="If specificed use manual net price instead of "
"lord price - discount"),
'price_unit_manual': fields.float(
'Manual net price', digits_compute=dp.get_precision('Sale Price')),
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
'image_http': fields.boolean('Has image',
help="Has link for image on the web"),
'image_replace_name':fields.char('Override name',
size=30,
help="Usually the name is art. code + '.PNG', es. 400.PNG"
"if you want to change write the name in this field!"),
}
# TODO remove (put in correct module mx_discount_scale_order
# vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
_defaults = {
'multi_discount_rates': _discount_rates_get,
'discount': _discount_value_get,
}
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
"""
# fiam_partner.py
Add zone manage TODO maybe better put in a single module
Add extra fields populated from accounting > maybe better in a single module
"""
# TODO move in new module!!!!
class ResPartnerZone(orm.Model):
_name = 'res.partner.zone'
_description = 'Partner Zone'
_order = 'type,name'
_columns = {
'name':fields.char('Zone', size=64, required=True),
'mexal_id': fields.integer('Mexal ID'),
'type': fields.selection([
('region', 'Region'),
('state', 'State'),
('area', 'Area'),
], 'Tipo', required=True),
}
_defaults = {
'type': lambda *a: 'state',
}
class ResPartnerExtraFields(orm.Model):
_inherit ='res.partner'
def _function_statistics_invoice(
self, cr, uid, ids, args, field_list, context=None):
'''
Calculate up or down of invoice:
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param context: A standard dictionary for contextual values
@return: list of dictionary which contain partner id, colour
'''
if context is None:
context = {}
res = {}
for partner in self.browse(cr, uid, ids, context=context):
if partner.invoiced_current_year == partner.invoiced_last_year:
segno = 'equal'
valore = 0.0
else:
if partner.invoiced_last_year:
valore = 100.0 * (
partner.invoiced_current_year -
partner.invoiced_last_year) / partner.invoiced_last_year
else:
valore = 100.0
if partner.invoiced_current_year < partner.invoiced_last_year:
segno = 'down'
else:
segno = 'up'
res[partner.id] = {}
res[partner.id]['invoice_trend'] = segno
res[partner.id]['invoice_trend_perc'] = valore
return res
_columns = {
'zone_id': fields.many2one('res.partner.zone', 'Zone'),
'mexal_province': fields.char('MX province', size=9),
# vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
# TODO MOVE IN fido_management:
'fido_date': fields.date('FIDO Date'),
'fido_ko': fields.boolean('No FIDO'),
'fido_total': fields.float('Totale fido', digits=(16, 2)),
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
'mexal_note': fields.text('Mexal Note'),
'import': fields.char('ID import', size=10),
# vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
# NO MORE USED:
'mexal_c': fields.char('Mexal cliente', size=9),
'mexal_s': fields.char('Mexal fornitore', size=9),
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
'fiscal_id_code': fields.char('Fiscal code', size=16),
'private': fields.boolean('Private'),
'type_cei': fields.char('Type CEI', size=1),
# TODO remove (put in correct module mx_discount_scale_order
# vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
'discount_value': fields.float('Discount value', digits=(16, 2)),
'discount_rates':fields.char('Discount scale', size=30),
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# Statistics values:
# TODO Override fields and calculate with internal data not MX data
# vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
'date_last_ddt': fields.datetime('Date last DDT'),
'day_left_ddt': fields.integer('Day left last DDT'),
'invoiced_current_year': fields.float(
'Current invoiced', digits=(16, 2)),
'invoiced_last_year': fields.float('Last invoiced', digits=(16, 2)),
'order_current_year': fields.float('Current order', digits=(16, 2)),
'order_last_year': fields.float('Last order', digits=(16, 2)),
'invoice_trend': fields.function(
_function_statistics_invoice, method=True, type='selection',
selection=[
('down','<'),
('equal','='),
('up','>'), ],
string='Invoice status', store=True, readonly=True,
multi='invoice_stat'),
'invoice_trend_perc': fields.function(
_function_statistics_invoice, method=True, type='float',
digits=(16,2), string='Invoice diff. %', store=True, readonly=True,
multi='invoice_stat'),
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
'type_id': fields.many2one(
'crm.tracking.campaign',
# NOTE ex: 'crm.case.resource.type',
'Campaign'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
Parrot-Developers/ardupilot | Tools/scripts/frame_sizes.py | 351 | 1117 | #!/usr/bin/env python
import re, sys, operator, os
code_line = re.compile("^\s*\d+:/")
frame_line = re.compile("^\s*\d+\s+/\* frame size = (\d+) \*/")
class frame(object):
def __init__(self, code, frame_size):
self.code = code
self.frame_size = int(frame_size)
frames = []
def process_lst(filename):
'''process one lst file'''
last_code = ''
h = open(filename, mode='r')
for line in h:
if code_line.match(line):
last_code = line.strip()
elif frame_line.match(line):
frames.append(frame(last_code, frame_line.match(line).group(1)))
h.close()
if len(sys.argv) > 1:
dname = sys.argv[1]
else:
dname = '.'
for root, dirs, files in os.walk(dname):
for f in files:
if f.endswith(".lst"):
process_lst(os.path.join(root, f))
sorted_frames = sorted(frames,
key=operator.attrgetter('frame_size'),
reverse=True)
print("FrameSize Code")
for frame in sorted_frames:
if frame.frame_size > 0:
print("%9u %s" % (frame.frame_size, frame.code))
| gpl-3.0 |
inertialsense/InertialSenseSDK | python/pylib/ISToolsDataSorted.py | 1 | 51887 | '''
Created on Feb 22, 2014
@author: waltj
'''
from numbers import Number
import numpy as np
import os
import glob
import sys
import simplekml
import ctypes as ct
import pylib.pose as pose
import pylib.filterTools as ft
# Profiling code
import time as systime
from numpy import uint8 as u8
from numpy import uint16 as u16
from numpy import uint32 as u32
from numpy import int32 as i32
from numpy import float32 as f32
from numpy import int64 as i64
from numpy import float64 as f64
import datetime
# Set Reference LLA (deg, deg, m) used for NED - Salem, UT
refLla = np.r_[40.0557114, -111.6585476, 1426.77]
gpsWeek = 0
showUtcTime = 0
# Set Reference latitude, longitude, height above ellipsoid (deg, deg, m) used for NED calculations
def setRefLla(lla):
global refLla
refLla = lla
def setShowUtcTime(show):
global showUtcTime
showUtcTime = show
WEEK_TIME = []
def setGpsWeek(week):
global gpsWeek
global WEEK_TIME
# Search for a valid GPS week
size = np.shape(week)
if size and size[0] > 1:
# if week[0]:
# week = week[0]
# else:
# week = week[-1]
week = np.max(week)
if week > gpsWeek:
gpsWeek = week
GPS_start_Time = datetime.datetime.strptime('6/Jan/1980', "%d/%b/%Y")
WEEK_TIME = GPS_start_Time + (datetime.timedelta(weeks=int(week)))
def getTimeFromTowMs(ms):
global WEEK_TIME
return [WEEK_TIME + datetime.timedelta(milliseconds=int(i)) for i in ms]
def getTimeFromTow(s):
global WEEK_TIME
return [WEEK_TIME + datetime.timedelta(seconds=float(i)) for i in s]
def getTimeFromGTime(gtime):
GPS_start_Time = datetime.datetime.strptime('1/Jan/1970', "%d/%b/%Y")
return [GPS_start_Time + datetime.timedelta(seconds=float(t['time'] + t['sec'])) for t in gtime]
# import time
# Default run behavior
# execfile("..\INS_logger\IsParseLoggerDat.py")
# def getdict(self):
# dict((f, getattr(self, f)) for f, _ in self._fields_)
# Empty class/dictionary
class cObj:
def __init__(self):
# self.res = []
return
class cDataType:
def __init__(self, name='', dtype=0):
self.name = name
self.dtype = dtype
def set(self, name, dtype):
self.name = name
self.dtype = dtype
# def nameID(self, did, name ):
# self.id = did
# self.name = name
#
# def dType(self, dtype):
# self.dtype = dtype
def vector3(_v, name):
return np.c_[_v[name + '[0]'].T, _v[name + '[1]'].T, _v[name + '[2]'].T]
def vector4(_v, name):
return np.c_[_v[name + '[0]'].T, _v[name + '[1]'].T, _v[name + '[2]'].T, _v[name + '[3]'].T]
RAW_DATA_OBS = 1,
RAW_DATA_EPH = 2,
RAW_DATA_GEPH = 3,
RAW_DATA_SBAS = 4,
RAW_DATA_STA = 5,
RAW_DATA_RTK_SOL = 123
dtypeGpsRaw = np.dtype([
('dataSerNum', u32),
('receiverIndex', u8),
('type', u8),
('count', u8),
('reserved', u8)
])
dtypeGtime = np.dtype([
('time', i64),
('sec', f64)])
dtypeEph = np.dtype([
('sat', i32),
('iode', i32),
('iodc', i32),
('sva', i32),
('svh', i32),
('week', i32),
('code', i32),
('flag', i32),
('toe', dtypeGtime),
('toc', dtypeGtime),
('ttr', dtypeGtime),
('A', f64),
('e', f64),
('i0', f64),
('OMG0', f64),
('omg', f64),
('M0', f64),
('deln', f64),
('OMGd', f64),
('idot', f64),
('crc', f64),
('crs', f64),
('cuc', f64),
('cus', f64),
('cic', f64),
('cis', f64),
('toes', f64),
('fit', f64),
('f0', f64),
('f1', f64),
('f2', f64),
('tgd', (f64, 4)),
('Adot', f64),
('ndot', f64),
])
dtypeGEph = np.dtype([
('sat', i32),
('iode', i32),
('frq', i32),
('svh', i32),
('sva', i32),
('age', i32),
('toe', dtypeGtime),
('tof', dtypeGtime),
('pos', (f64, 3)),
('vel', (f64, 3)),
('acc', (f64, 3)),
('taun', f64),
('gamn', f64),
('dtaun', f64)
])
dtypeSbas = np.dtype([
('week', i32),
('tow', i32),
('prn', i32),
('msg', (u8, 29)),
('reserved', (u8, 3)),
])
dtypeSta = np.dtype([
('deltype', i32),
('pos', (f32, 3)),
('delta', (f32, 3)),
('hgt', f32),
('stationId', i32),
])
dtypeObsD = np.dtype([
('time', dtypeGtime),
('sat', u8),
('rcv', u8),
('SNR', u8),
('LLI', u8),
('code', u8),
('qualL', u8),
('qualP', u8),
('reserved', u8),
('L', f64),
('P', f64),
('D', f32)
])
class cDevice:
def __init__(self, index, directory, serialNumber, refIns=None):
global refLla
self.unknownDidDisplayed = {}
self.serialNumber = serialNumber
self.dtCnkHdr = np.dtype([
('marker', u32),
('version', u16),
('classification', u16),
('name', np.dtype((str, 4))),
('invName', np.dtype((str, 4))),
('dataSize', u32),
('invDataSize', u32),
('grpNum', u32),
('devSerialNum', u32),
('pHandle', u32),
('reserved', u32),
])
self.dtCnkSubHdr = np.dtype([
('dHdr', [('id', u32),
('size', u32),
('offset', u32), ]),
('dCount', u32),
])
# Data info
self.DID_COUNT = 73
self.di = [cDataType() for i in range(self.DID_COUNT)]
self.di[1].set('devInfo', np.dtype([
('dataSerNum', u32), # Indicates serial order in time
('reserved', u32),
('serialNumber', u32),
('hardwareVer', (u8, 4)),
('firmwareVer', (u8, 4)),
('build', u32),
('commVer', (u8, 4)),
('repoRevision', f32),
('manufacturer', np.dtype((str, 24))),
('buildDate', (u8, 4)),
('buildTime', (u8, 4)),
('addInfo', np.dtype((str, 24))),
]))
dtypeImu = np.dtype([
('pqr', (f32, 3)),
('acc', (f32, 3)),
])
# 2 'crashInfo'
self.di[3].set('preintegratedImu', np.dtype([
('dataSerNum', u32), # Indicates serial order in time
('time', f64),
('theta1', (f32, 3)),
('theta2', (f32, 3)),
('vel1', (f32, 3)),
('vel2', (f32, 3)),
('dt', f32),
]))
self.di[4].set('ins1', np.dtype([
('dataSerNum', u32), # Indicates serial order in time
('week', u32),
('tow', f64),
('iStatus', u32),
('hStatus', u32),
('euler', (f32, 3)),
('uvw', (f32, 3)),
('lla', (f64, 3)),
('ned', (f32, 3)),
]))
self.di[5].set('ins2', np.dtype([
('dataSerNum', u32), # Indicates serial order in time
('week', u32),
('tow', f64),
('iStatus', u32),
('hStatus', u32),
('q', (f32, 4)),
('uvw', (f32, 3)),
('lla', (f64, 3)),
]))
dtypeGpsPos = np.dtype([
('dataSerNum', u32), # Indicates serial order in time
('week', u32),
('timeOfWeekMs', u32),
('status', u32),
('ecef', (f64, 3)),
('lla', (f64, 3)),
('hMSL', f32),
('hAcc', f32),
('vAcc', f32),
('pDop', f32),
('cnoMean', f32),
('towOffset', f64)
])
self.di[6].set('gps1UbxPos', dtypeGpsPos)
# 7 'config'
# 8 'asciiBCastPeriod'
dtStartVars = np.dtype([
('lla', (f64, 3)),
('uvw', (f32, 3)),
('q', (f32, 4)),
])
self.di[9].set('insMisc', np.dtype([
('dataSerNum', u32), # Indicates serial order in time
('tow', f64),
('towMs', u32),
('x', dtStartVars),
('theta', (f32, 3)),
('ned', (f32, 3)),
('dcm', (f32, 9)),
('pqr', (f32, 3)),
('acc', (f32, 3)),
('mag', (f32, 3)),
('mslBar', f32),
]))
self.di[10].set('sysParams', np.dtype([
('dataSerNum', u32), # Indicates serial order in time
('towMs', u32),
('iStatus', u32),
('hStatus', u32),
('imuTemp', f32),
('baroTemp', f32),
('mcuTemp', f32),
('reserved1', f32),
('sampleDtMs', u32),
('insDtMs', u32),
('reserved2', (f32, 4)),
('genFaultcode', u32),
]))
# 11 'sysSensors'
self.di[12].set('flashConfig', np.dtype([
('dataSerNum', u32), # Indicates serial order in ti
('size', u32),
('checksum', u32),
('key', u32),
('startupSampleDtMs', u32),
('startupNavDtMs', u32),
('ser0BaudRate', u32),
('ser1BaudRate', u32),
('insRotation', (f32, 3)),
('insOffset', (f32, 3)),
('gps1AntOffset', (f32, 3)),
('insDynModel', u32),
('sysCfgBits', u32),
('refLla', (f64, 3)),
('lastLla', (f64, 3)),
('lastLlaTimeOfWeekMs', u32),
('lastLlaWeek', u32),
('lastLlaUpdateDistance', f32),
('ioConfig', u32),
('cBrdConfig', u32),
('gps2AntOffset', (f32, 3)),
('zeroVelRotation', (f32, 3)),
('zeroVelOffset', (f32, 3)),
('magInclination', f32),
('magDeclination', f32),
('gpsTimeSyncPulsePeriodMs', u32),
('startupGPSDtMs', u32),
('RTKCfgBits', u32),
('reserved', u32),
('ser2BaudRate', u32),
]))
self.di[13].set('gps1Pos', dtypeGpsPos)
self.di[14].set('gps2Pos', dtypeGpsPos)
# 15 'gps1Cno'
# 16 'gps2Cno'
# 17 'gps2Version'
# 18 'gps2Version'
# 19 'magCal'
self.di[20].set('insResources', np.dtype([
('dataSerNum', u32), # Indicates serial order in time
('towMs', u32),
('x_dot', dtStartVars),
('magYawOffset', f32),
]))
self.di[21].set('gps1RtkPosRel', np.dtype([
('dataSerNum', u32), # Indicates serial order in time
('timeOfWeekMs', u32 ),
('differentialAge', f32 ),
('arRatio', f32 ),
('vectorToBase', (f32, 3)),
('distanceToBase', f32 ),
('headingToBase', f32 ),
]))
self.di[22].set('gps1RtkPosMisc', np.dtype([
('dataSerNum', u32), # Indicates serial order in time
('timeOfWeekMs', u32),
('accuracyPos', (f32, 3)),
('accuracyCov', (f32, 3)),
('arThreshold', f32),
('gDop', f32),
('hDop', f32),
('vDop', f32),
('baseLla', (f64, 3)),
('cycleSlipCount', u32),
('roverGpsObservationCount', u32),
('baseGpsObservationCount', u32),
('roverGlonassObservationCount', u32),
('baseGlonassObservationCount', u32),
('roverGalileoObservationCount', u32),
('baseGalileoObservationCount', u32),
('roverBeidouObservationCount', u32),
('baseBeidouObservationCount', u32),
('roverQzsObservationCount', u32),
('baseQzsObservationCount', u32),
('roverGpsEphemerisCount', u32),
('baseGpsEphemerisCount', u32),
('roverGlonassEphemerisCount', u32),
('baseGlonassEphemerisCount', u32),
('roverGalileoEphemerisCount', u32),
('baseGalileoEphemerisCount', u32),
('roverBeidouEphemerisCount', u32),
('baseBeidouEphemerisCount', u32),
('roverQzsEphemerisCount', u32),
('baseQzsEphemerisCount', u32),
('roverSbasCount', u32),
('baseSbasCount', u32),
('baseAntennaCount', u32),
('ionUtcAlmCount', u32)
]))
# 23 'Feature Bits'
dtypeSensorsMpuWTemp = np.dtype([
('pqr', (f32, 3)),
('acc', (f32, 3)),
('mag', (f32, 3)),
('temp', f32),
])
self.di[24].set('sensorsIs1', np.dtype([
('dataSerNum', u32), # Indicates serial order in time
('mpu', (dtypeSensorsMpuWTemp, 2)),
]))
# 25 'Sensor IS2'
# 26 'Sensor TC Bias'
self.di[27].set('sensorBias', np.dtype([
('dataSerNum', u32), # Indicates serial order in time
('towMs', u32),
('pqr', (f32, 3)),
('acc', (f32, 3)),
('mslBar', f32),
('magI', (f32, 3)),
('magB', (f32, 3)),
]))
# 28 'Sensor ADC'
# 29 'SCOMP'
dtypeGpsVel = np.dtype([
('dataSerNum', u32), # Indicates serial order in time
('timeOfWeekMs', u32),
('velEcef', (f32, 3)),
('sAcc', f32)
])
self.di[30].set('gps1Vel', dtypeGpsVel)
self.di[31].set('gps2Vel', dtypeGpsVel)
# 32 'HDW params'
# 33-37 Flash
# 38 'RTOS Info'
self.di[39].set('debugArray', np.dtype([
('dataSerNum', u32), # Indicates serial order in time
('i', (i32, 9)),
('f', (f32, 9)),
('lf', (f64, 3)),
]))
self.di[47].set('insDev1', np.dtype([
('dataSerNum', u32), # Indicates serial order in time
('week', u32),
('tow', f64),
('iStatus', u32),
('hStatus', u32),
('euler', (f32, 3)),
('uvw', (f32, 3)),
('lla', (f64, 3)),
('ned', (f32, 3)),
('eulerErr', (f32, 3)),
('uvwErr', (f32, 3)),
('nedErr', (f32, 3)),
]))
self.di[48].set('ekfStates', np.dtype([
('dataSerNum', u32), # Indicates serial order in time
('time', f64),
('qe2b', (f32, 4)),
('ve', (f32, 3)),
('ecef', (f64, 3)),
('biasPqr', (f32, 3)),
('biasAcc', (f32, 3)),
('biasBaro', f32),
('magDec', f32),
('magInc', f32),
]))
# 49 'EKF Covariance'
# 50 'EKF Innovations'
# 51 'EKF Innovations Var'
self.di[52].set('magnetometer1', np.dtype([
('dataSerNum', u32), # Indicates serial order in time
('time', f64),
('mag', (f32, 3)),
]))
self.di[53].set('barometer', np.dtype([
('dataSerNum', u32), # Indicates serial order in time
('time', f64),
('bar', f32),
('mslBar', f32),
('barTemp', f32),
('humidity', f32),
]))
self.di[54].set('gps1RtkPos', dtypeGpsPos)
self.di[55].set('gps1RtkCmpRel', np.dtype([
('dataSerNum', u32), # Indicates serial order in time
('timeOfWeekMs', u32 ),
('differentialAge', f32 ),
('arRatio', f32 ),
('vectorToBase', (f32, 3)),
('distanceToBase', f32 ),
('headingToBase', f32 ),
]))
self.di[56].set('gpsVersion', np.dtype([
('dataSerNum', u32), # Indicates serial order in time
('swVersion', np.dtype((str, 30))),
('hwVersion', np.dtype((str, 10))),
('extension', np.dtype((str, 30))),
('reserved', (u32, 2)),
]))
# 57 'Communications Loopback'
self.di[58].set('dualImu', np.dtype([
('dataSerNum', u32), # Indicates serial order in time
('time', f64),
('I', (dtypeImu, 2)),
]))
self.di[59].set('inl2MagObs', np.dtype([
('dataSerNum', u32), # Indicates serial order in time
('towMs', u32),
('Ncal_samples', u32),
('ready', u32),
('calibrated', u32),
('auto-recal', u32),
('outlier', u32),
('magHeading', f32),
('insHeading', f32),
('magInsHdgDelta', f32),
('nis', f32),
('nis_threshold', f32),
('Wcal', (f32, 9)),
('activeCalSet', u32),
('magHeadingOffset', f32),
]))
# 60 - Raw GPS Ephemeris and Observation from Base
self.di[60].set('GPSBaseRaw', dtypeGpsRaw)
# 61 - RTK Options
# 62 - Internal User page Info
# 63 - Manufacturing Info
# 64 - Self Test
# 65 - INS - 3 - ECEF Position & Quaternions NED
# 66 - INS - 4 - ECEF Position & Quaternions ECEF
self.di[67].set('inl2Variance', np.dtype([
('dataSerNum', u32), # Indicates serial order in time
('towMs', u32),
('PxyxNED', (f32, 3)),
('PvelNED', (f32, 3)),
('PattNED', (f32, 3)),
('PABias', (f32, 3)),
('PWBias', (f32, 3)),
('PBaroBias', f32),
('PDeclination', f32),
]))
# 68 - Strobe input time
self.di[69].set('GPS1Raw', dtypeGpsRaw)
self.di[70].set('GPS2Raw', dtypeGpsRaw)
self.di[91].set('gps1RtkCmpMisc', np.dtype([
('dataSerNum', u32), # Indicates serial order in time
('timeOfWeekMs', u32),
('accuracyPos', (f32, 3)),
('accuracyCov', (f32, 3)),
('arThreshold', f32),
('gDop', f32),
('hDop', f32),
('vDop', f32),
('baseLla', (f64, 3)),
('cycleSlipCount', u32),
('roverGpsObservationCount', u32),
('baseGpsObservationCount', u32),
('roverGlonassObservationCount', u32),
('baseGlonassObservationCount', u32),
('roverGalileoObservationCount', u32),
('baseGalileoObservationCount', u32),
('roverBeidouObservationCount', u32),
('baseBeidouObservationCount', u32),
('roverQzsObservationCount', u32),
('baseQzsObservationCount', u32),
('roverGpsEphemerisCount', u32),
('baseGpsEphemerisCount', u32),
('roverGlonassEphemerisCount', u32),
('baseGlonassEphemerisCount', u32),
('roverGalileoEphemerisCount', u32),
('baseGalileoEphemerisCount', u32),
('roverBeidouEphemerisCount', u32),
('baseBeidouEphemerisCount', u32),
('roverQzsEphemerisCount', u32),
('baseQzsEphemerisCount', u32),
('roverSbasCount', u32),
('baseSbasCount', u32),
('baseAntennaCount', u32),
('ionUtcAlmCount', u32)
]))
# Profiling
timeStart = systime.time()
self.loadTime = 0
self.unknownId = {}
self.directory = directory
self.serialNumber = serialNumber
self.rdat = {} # Raw data in python list format
self.data = {} # data in numpy format
self.index = index # index in all serial numbers
self.refLla = refLla
# self.version = []
# self.units = []
if refIns is not None:
print("#%2d Opening: Ref INS %s" % (index, directory))
fileMask = "LOG_REF_INS*.dat"
# Use first file in directory if not defined
else:
print("#%2d Opening: %s %s" % (index, serialNumber, directory))
fileMask = "LOG_" + serialNumber + "*.sdat"
if not os.path.isdir(directory):
print("Directory doesn't exist!")
sys.exit()
os.chdir(directory)
self.fileNames = glob.glob(fileMask)
if not self.fileNames:
# print(" ***** Files not found! Check directory name and serial number. ***** ")
raise Exception('Load Error: .sdat files not found.')
self.parse()
self.clean()
# Profiling
self.loadTime = systime.time() - timeStart
print("Load time: %.2fs" % (self.loadTime))
def clean(self):
for key, item in self.data.iteritems():
if not isinstance(item, np.ndarray):
continue
for field in ['towMs', 'timeOfWeekMs', 'tow']:
if field in item.dtype.names:
if (np.diff(item[field].astype(np.int64)) < 0).any():
idx = np.argmin(np.diff(item[field].astype(np.int64)))
print("\033[93m" + "Time went backwards in ", key, r"!!!, removing all data " + ("before" if idx < len(item[field])/2.0 else "after") + "\033[0m")
if idx < len(item[field])/2.0:
self.data[key] = item[idx +1:]
else:
self.data[key] = item[:idx]
ms_multiplier = 1000.0 if 'Ms' in field else 1.0
if (np.diff(item[field]) > 3600 * ms_multiplier).any():
print("\033[93m" + "greater than 1 minute gap in ", key, " data, assuming GPS fix was acquired during data set, and chopping data"+ "\033[0m")
idx = np.argmax(np.diff(item[field])) + 1
self.data[key] = item[idx:]
def parse(self):
self.curTime = np.r_[0]
self.raw_gps_keys = []
# Iterate over files to concatenate data
self.fileNames.sort()
for fileName in self.fileNames:
print(fileName)
self.__parseFile(fileName)
# set the raw GPS dictionary as a datatype
for name in self.raw_gps_keys:
for key, item in self.data[name].iteritems():
self.data[name][key] = np.array(item)
if 'ins2' in self.data.keys():
setGpsWeek(self.data['ins2']['week'][0])
def parse_raw_gps(self, f, did, dati, sHdr, cHdr):
valid_types = [1, 2, 3, 4, 5, 6, 123]
valid_receiver_indexes = [1, 2, 3, 4]
if dati.name not in self.raw_gps_keys:
self.raw_gps_keys.append(dati.name)
buf = np.fromfile(f, np.uint8, count=cHdr['dataSize'])
for i in range(sHdr['dCount']):
pointer = 0
hdr_size = np.dtype(dtypeGpsRaw).itemsize
gps_raw_header = buf[pointer:pointer + hdr_size].view(dtypeGpsRaw)
pointer += hdr_size
# Pull in the header data
try:
type = gps_raw_header['type'][0]
count = gps_raw_header['count'][0]
receiverIndex = gps_raw_header['receiverIndex'][0]
assert (type in valid_types and receiverIndex in valid_receiver_indexes)
if dati.name not in self.data.keys():
self.data[dati.name] = {'dataSerNum': [gps_raw_header['dataSerNum'][0]],
'receiverIndex': [receiverIndex],
'type': [type],
'count': [count],
'corrupt_data': 0}
else:
self.data[dati.name]['dataSerNum'].append(gps_raw_header['dataSerNum'][0])
self.data[dati.name]['receiverIndex'].append(gps_raw_header['receiverIndex'][0])
self.data[dati.name]['type'].append(type)
self.data[dati.name]['count'].append(count)
except:
print("invalid raw gps header: type=", type, "count = ", count, "receiverIndex = ", receiverIndex)
self.data[dati.name]['corrupt_data'] += 1
continue
if type == RAW_DATA_OBS:
try:
bytes_in_payload = np.dtype(dtypeObsD).itemsize * count
obs = buf[pointer:pointer + bytes_in_payload].view(dtypeObsD)
pointer += bytes_in_payload
if 'obs' not in self.data[dati.name]:
self.data[dati.name]['obs'] = np.rec.array(obs)
else:
self.data[dati.name]['obs'] = np.hstack((self.data[dati.name]['obs'], np.rec.array(obs)))
except:
print("badly formed raw gps data - DID: %d type: Obs, count: %d, actual: %f" %
(did, count, (len(buf) - 8) / (float(np.dtype(dtypeObsD).itemsize))))
self.data[dati.name]['corrupt_data'] += 1
continue
def __parseFile(self, filename):
with open(filename, 'rb') as f:
while 1:
# Read and validate chunk header
cHdr = np.fromfile(f, dtype=self.dtCnkHdr, count=1)
count = cHdr['dataSize']
if np.shape(cHdr)[0] == 0 or cHdr['marker'][0] != 0xFC05EA32:
# print( "Done parsing data!" )
break
# Read chunk sub header
sHdr = np.fromfile(f, dtype=self.dtCnkSubHdr, count=1)
# Find ID
did = sHdr['dHdr']['id'][0]
dsize = sHdr['dHdr']['size'][0]
# if did == 6:
# print( "DID: ",did )
if did >= self.DID_COUNT:
if did not in self.unknownDidDisplayed.keys():
self.unknownDidDisplayed[did] = True
print("==============================================================================")
print(" - ERROR - Data ID " + str(did) + " out of range " + str(
self.DID_COUNT) + ". Please add missing DID definitions to ISToolsDataSorted.pyx.")
print("==============================================================================")
did = 0
self.unknownDidDisplayed[did] = True
systime.sleep(0.5)
dati = self.di[did]
if dati.dtype:
if dsize == (dati.dtype.itemsize - 4):
# Known data type
# print("Found id: ", did)
cDat = np.fromfile(f, dati.dtype, count=sHdr['dCount'])
if dati.name in self.data.keys():
# Append
# self.data[dati.name].append(cDat)
self.data[dati.name] = np.concatenate([self.data[dati.name], cDat])
else:
# Create
self.data[dati.name] = cDat
# Handle Raw data differently (because it changes sizes and carries multiple messages)
elif dati.dtype == dtypeGpsRaw:
self.parse_raw_gps(f, did, dati, sHdr, cHdr)
else:
# Mismatched data size
print("==============================================================================")
print(" - ERROR - Data ID", did, "(" + dati.name + ") mismatched size. Read", dsize, "expected", dati.dtype.itemsize - 4)
print("==============================================================================")
# systime.sleep(0.5)
# sys.exit()
cDat = np.fromfile(f, np.uint8, count=cHdr['dataSize'][0])
else:
# Unknown data type
if did not in self.unknownDidDisplayed.keys():
self.unknownDidDisplayed[did] = True
print("Undefined DID: ", did)
cDat = np.fromfile(f, np.uint8, count=cHdr['dataSize'][0])
class cDevices:
def __init__(self):
self.devices = []
self.loadTime = 0 # Profiling
# Load data to be viewed. If the "selection.txt" file is found, the line by line contents
# of selection.txt specify an additional subdirectory and list of serial numbers to be loaded.
# If serial numbers are not specified, either in selection.txt or in the loadData() parameter,
# then all serial numbers and files are read.
# directory Directory data is loaded from. If not specified, the current directory is used. If no data found, use latest data sub directory.
# serialNumbers Device serial numbers to load. If not specified, all serial numbers and files found are loaded.
# startDev First index of found devices (serial numbers) to load.
# devCount Number of devices (serial numbers) to load.
def loadData(self, directory=None, serialNumbers=None, refIns=None, startDev=0, devCount=-1):
# Profiling
self.loadTime = 0
timeLoadStart = systime.time()
# We don't support reference INS right now
if refIns != None:
raise Exception('refIns not supported right now.')
if directory is not None:
# Convert backslash to forward slash (Windows to Linux)
directory = directory.replace('\\', '/')
if '~' in directory:
pass
# Automatically open logs specified in "selection.txt"
os.chdir(directory)
# Use selection file if it exists
selectionFileName = 'selection.txt'
if os.path.exists(selectionFileName):
with open(selectionFileName) as f:
lines = f.read().splitlines()
# Convert backslash to forward slash (Windows to Linux)
directory += lines[0].replace('\\', '/')
# Read serial numbers from selection.txt
serialNumbers = []
for serNum in lines[1:]:
# Stop if we find a blank line
if serNum == '':
break
serialNumbers.append(serNum)
# If current directory has NO data, use newest sub directory containing data.
files = os.listdir(directory)
if not any(".sdat" in s for s in files):
dirName = None
dirTime = 0
for fname in files:
# Has data log directory name format
if len(fname) >= 15 and fname[0:2] == '20' and fname[8:9] == '_':
dTime = int(fname[0:8] + fname[9:15])
# Is latest
if dTime > dirTime:
dirTime = dTime
dirName = fname
if dirName != None:
directory += dirName
# Print directory
print("Loading Data: ", directory)
# Add all devices in directory
if serialNumbers is None or serialNumbers == []:
# Find list of serial numbers from files in directory
files = os.listdir(directory)
serNums = []
for str in files:
if str.find('.sdat') != -1:
str = str.replace('.sdat', '')
if str.find('LOG_SN') != -1:
str = str[4:11]
if not str in serNums:
serNums.append(str)
elif str.find('LOG_PR') != -1:
str = str.replace('LOG_', '')
str = str[:str.find('_')]
if not str in serNums:
serNums.append(str)
serialNumbers = serNums
count = len(serialNumbers)
# Validate serial numbers
if count <= 0:
raise Exception('Load Error: .sdat files not found.')
# Find size and last index
if devCount > 0 and devCount < count:
count = devCount
endIndex = min(startDev + count, len(serialNumbers))
# print ("Start Index: ", startDev, " End Index: ", endIndex)
# Add devices
for i in range(startDev, endIndex):
device = cDevice(i, directory, serialNumbers[i], refIns)
self.devices.append(device)
# Profiling
self.loadTime = systime.time() - timeLoadStart
print("Total load time: %.2fs" % (self.loadTime))
def gpsTimeToUTC(gpsWeek, gpsSOW, leapSecs=14):
global showUtcTime
if showUtcTime == 0:
return gpsSOW
# Search for a valid GPS week
size = np.shape(gpsWeek)
if size and size[0] > 1:
# if gpsWeek[0] == 0:
# gpsWeek = gpsWeek[-1]
# Use the largest value for the week
gpsWeek = np.max(gpsWeek)
if gpsWeek == 0:
return gpsSOW
secsInWeek = 604800
# secsInDay = 86400
gpsEpoch = (1980, 1, 6, 0, 0, 0) # (year, month, day, hh, mm, ss)
# secFract = gpsSOW % 1
epochTuple = gpsEpoch + (-1, -1, 0)
t0 = systime.mktime(epochTuple) - systime.timezone # mktime is localtime, correct for UTC
tdiff = (gpsWeek * secsInWeek) + gpsSOW - leapSecs
t = t0 + tdiff
return t
def join_struct_arrays(arrays):
sizes = np.array([a.itemsize for a in arrays])
offsets = np.r_[0, sizes.cumsum()]
n = len(arrays[0])
joint = np.empty((n, offsets[-1]), dtype=np.uint8)
for a, size, offset in zip(arrays, sizes, offsets):
joint[:, offset:offset + size] = a.view(np.uint8).reshape(n, size)
dtype = sum((a.dtype.descr for a in arrays), [])
return joint.ravel().view(dtype)
# Join list of structured numpy arrays into one
def join_struct_arrays2(arrays):
newdtype = sum((a.dtype.descr for a in arrays), [])
newrecarray = np.empty(len(arrays[0]), dtype=newdtype)
for a in arrays:
for name in a.dtype.names:
newrecarray[name] = a[name]
return newrecarray
class cSIMPLE:
def __init__(self, _v):
self.v = _v
class cIMU:
def __init__(self, _v):
global gpsWeek
self.v = _v
self.__flt = cObj()
self.__flt.pqr = None
self.__flt.acc = None
self.__flt.pqrNoBias = None
self.__flt.accNoBias = None
self.__flt.barNoBias = None
self.cornerFreqHz = 60
# self.cornerFreqHz = 30
# self.cornerFreqHz = 15
self.time = gpsTimeToUTC(gpsWeek, self.v['time'])
self.i = [cObj(), cObj()]
for j in range(0, 2):
self.i[j].pqr = None
self.i[j].acc = None
# Dual IMU
if 'I' in self.v.dtype.names:
self.i[0].pqr = self.v['I']['pqr'][:, 0, :]
self.i[1].pqr = self.v['I']['pqr'][:, 1, :]
self.i[0].acc = self.v['I']['acc'][:, 0, :]
self.i[1].acc = self.v['I']['acc'][:, 1, :]
# Preintegrated IMU
if 'theta1' in self.v.dtype.names and 'theta2' in self.v.dtype.names:
divDt = 1.0 / self.v['dt']
self.i[0].pqr = self.v['theta1']
self.i[1].pqr = self.v['theta2']
self.i[0].acc = self.v['vel1']
self.i[1].acc = self.v['vel2']
for i in range(0, 2):
for a in range(0, 3):
self.i[i].pqr[:, a] *= divDt
self.i[i].acc[:, a] *= divDt
def fltAcc(self):
if self.__flt.acc is None:
self.__flt.acc = ft.lpfNoDelay(self.v['acc'], self.cornerFreqHz, time=self.v['time'])
return self.__flt.acc
def fltPqr(self):
if self.__flt.pqr is None:
self.__flt.pqr = ft.lpfNoDelay(self.v['pqr'], self.cornerFreqHz, time=self.v['time'])
return self.__flt.pqr
def fltPqrNoBias(self):
if 'pqrNoBias' in self.v.dtype.names and self.__flt.pqrNoBias is None:
self.__flt.pqrNoBias = ft.lpfNoDelay(self.v['pqrNoBias'], self.cornerFreqHz, time=self.v['time'])
return self.__flt.pqrNoBias
def fltAccNoBias(self):
if 'accNoBias' in self.v.dtype.names and self.__flt.accNoBias is None:
self.__flt.accNoBias = ft.lpfNoDelay(self.v['accNoBias'], self.cornerFreqHz, time=self.v['time'])
return self.__flt.accNoBias
def fltBarNoBias(self):
if 'mslBarNoBias' in self.v.dtype.names and self.__flt.barNoBias is None:
self.__flt.mslBarNoBias = ft.lpfNoDelay(self.v['mslBarNoBias'], self.cornerFreqHz, time=self.v['time'])
return self.__flt.mslBarNoBias
# self.mslBar = ft.smooth(self.v['mslBar']+72, delta=200)
# self.mslBarDot = ft.derivative(self.v['time'], self.mslBar, delta=10)
# self.mslBarDotLpf = ft.lpfNoDelay(self.mslBarDot, cornerFreqHz=0.5, time = self.v['time'])
class cINS:
def __init__(self, _v):
# self.v = _v
self.v = _v[:-1] # Throw out last element
self.__velNED = None
self.__course = None
self.__ecef = None
self.__ned = None
self.__istatus = None
self.__hstatus = None
self.__size = np.shape(self.v['tow'])[0]
self.time = gpsTimeToUTC(self.v['week'], self.v['tow'])
if not 'euler' in self.v.dtype.names and 'q' in self.v.dtype.names:
# self.v['euler'] = pose.quat2eulerArray(self.v['q'])
# self.euler = pose.quat2eulerArray(self.v['q'])
dtypeeuler = np.dtype([('euler', (np.float, 3))])
e = pose.quat2eulerArray(self.v['q'])
euler = np.ndarray(np.shape(e)[0], dtype=dtypeeuler, buffer=e)
self.v = join_struct_arrays2([self.v, euler])
if not 'q' in self.v.dtype.names and 'euler' in self.v.dtype.names:
# self.v['q'] = pose.euler2quatArray(self.v['euler'])
# self.q = pose.euler2quatArray(self.v['euler'])
dtypeq = np.dtype([('q', (np.float, 4))])
q = pose.euler2quatArray(self.v['euler'])
quat = np.ndarray(np.shape(q)[0], dtype=dtypeq, buffer=q)
self.v = join_struct_arrays2([self.v, quat])
# Velocity vector in inertial frame
def velNed(self):
if self.__velNED is None:
self.__velNED = np.zeros(np.shape(self.v['uvw']))
for i in range(0, self.__size):
DCM = pose.eulerDCM(self.v['euler'][i, :])
velNED = np.dot(DCM.T, self.v['uvw'][i, :]) # body to inertial frame
self.__velNED[i, :] = velNED
return self.__velNED
def course(self):
if self.__course is None:
self.__course = np.arctan2(self.velNED[:, 1], self.velNED[:, 0])
return self.__course
def ned(self):
global refLla
if self.__ned is None:
self.__ned = pose.lla2ned(refLla, self.v['lla'])
return self.__ned
def ecef(self):
if self.__ecef is None:
self.__ecef = pose.lla2ecef(self.v['lla'])
return self.__ecef
def set(self, time):
self.time = time
def speed2D(self):
return np.sqrt(np.square(self.v['uvw'][:, 0]) +
np.square(self.v['uvw'][:, 1]))
def speed3D(self):
return np.sqrt(np.square(self.v['uvw'][:, 0]) +
np.square(self.v['uvw'][:, 1]) +
np.square(self.v['uvw'][:, 2]))
def iStatus(self):
if self.__istatus is None:
self.__istatus = insStatus(self.v['iStatus'])
return self.__istatus
def hStatus(self):
if self.__hstatus is None:
self.__hstatus = hdwStatus(self.v['hStatus'])
return self.__hstatus
class cRIMU:
def __init__(self, _v,
accBias=np.r_[0, 0, 0],
pqrBias=np.r_[0, 0, 0],
rotate=np.r_[0, 0, 0]):
self.v = _v
self.cornerFreqHz = 30
self.__flt = cObj()
self.__flt.pqr = None
self.__flt.acc = None
if accBias[0] != 0 or accBias[1] != 0 or accBias[2] != 0:
self.v['acc'] += accBias
if pqrBias[0] != 0 or pqrBias[1] != 0 or pqrBias[2] != 0:
self.v['pqr'] += pqrBias
if rotate[0] != 0 or rotate[1] != 0 or rotate[2] != 0:
self.v['acc'] = pose.vectorRotateInertialToBody2(self.v['acc'], rotate)
self.v['pqr'] = pose.vectorRotateInertialToBody2(self.v['pqr'], rotate)
def fltPqr(self):
if self.__flt.pqr is None:
self.__flt.pqr = ft.lpfNoDelay(self.v['pqr'], self.cornerFreqHz, time=self.v['time'])
return self.__flt.pqr
def fltAcc(self):
if self.__flt.acc is None:
self.__flt.acc = ft.lpfNoDelay(self.v['acc'], self.cornerFreqHz, time=self.v['time'])
return self.__flt.acc
class cRINS:
def __init__(self, _v, rotate=np.r_[0, 0, 0]):
global refLla
self.v = _v
self.__ned = None
self.__nedDotDot = None
self.__uvw = None
self.__rotate = rotate
# self.v['nedDot'] = ft.smooth(self.v['nedDot'], delta=10)
# self.v['euler'] = ft.smooth(self.v['euler'], delta=10)
if self.__rotate[0] != 0 or self.__rotate[1] != 0 or self.__rotate[2] != 0:
self.v['euler'][:, 0] += self.__rotate[0]
self.v['euler'][:, 1] += self.__rotate[1]
self.v['euler'][:, 2] += self.__rotate[2]
def ned(self):
if self.__ned is None:
self.__ned = pose.lla2ned(refLla, self.v['lla'])
return self.__ned
def nedDotDot(self):
if self.__nedDotDot is None:
self.__nedDotDot = ft.derivative(self.v['time'], self.v['nedDot'], delta=2)
self.__nedDotDot[:, 2] -= 9.80665
cornerFreqHz = 10
self.__nedDotDot = ft.lpfNoDelay(self.__nedDotDot, cornerFreqHz, time=self.v['time'])
return self.__nedDotDot
def uvw(self):
if self.__uvw is None:
self.__uvw = pose.vectorRotateInertialToBody(self.v['nedDot'], self.v['euler'])
if self.__rotate[0] != 0 or self.__rotate[1] != 0 or self.__rotate[2] != 0:
self.uvw = pose.vectorRotateInertialToBody2(self.uvw, self.__rotate)
return self.__uvw
class cRGPS:
def __init__(self, _v):
global refLla
self.v = _v
self.__ned = None
self.__acc = cObj()
self.__acc.ned = None
def ned(self):
if self.__ned is None:
self.__ned = pose.lla2ned(refLla, self.v['lla'])
return self.__ned
def accNed(self):
if self.__acc.ned is None:
# Create Accelerations from GPS velocities
# self.__acc.ned = ft.meanDerivative(self.v['time'], self.v['vel.ned'], 5, 3)
self.__acc.ned = ft.meanDerivative(self.v['time'], self.v['vel.ned'], 2, 2)
return self.__acc.ned
class cGPS:
def __init__(self, _v):
global refLla
global refLla
global gpsWeek
self.v = _v
self.time = gpsTimeToUTC(self.v['week'], (_v['timeOfWeekMs'] * 0.001))
self.ned = pose.lla2ned(refLla, _v['lla'])
self.satsUsed = (_v['status'] >> 0) & 0xFF
self.fixType = (_v['status'] >> 8) & 0xFF
self.rtkMode = (_v['status'] >> 20) & 0x01
# self.vectorToBase = _v['vectorToBase']
# self.distanceToBase = _v['distanceToBase']
class cGPSRaw:
def __init__(self, _v):
self.count = _v['count']
self.type = _v['type']
self.receiverIndex = _v['receiverIndex']
self.corruptCount = int(_v['corrupt_data'])
if 'obs' in _v.keys():
self.obs = _v['obs']
try:
self.obstime = np.array([np.datetime64(int(np.round((t['time'] + t['sec'])*1000000)), 'us') for t in _v['obs']['time']])
except OverflowError as e:
debug = 1
class cRTKMisc:
def __init__(self, _v):
self.v = _v
self.time = gpsTimeToUTC(_v['week'], (_v['timeOfWeekMs'] * 0.001))
self.slipCounter = _v['cycleSlipCount']
self.arThreshold = _v['arThreshold']
self.baseLla = _v['baseLla']
self.heading = _v['rtkCompassHeading']
class cGpsVel:
def __init__(self, _v):
global gpsWeek
self.v = _v
def acc(self):
if self.__acc is None:
self.__acc = cObj()
self.__acc.time = self.time
# self.__acc.ned = ft.meanDerivative(self.vel.time, self.v['ned'], 5, 3)
self.__acc.ned = ft.meanDerivative(self.time, self.v['ned'], 2, 2)
return self.__acc
class cGpsAcc:
def __init__(self, _v):
self.v = _v
# self.time = _v['timeMs'] * 0.001
self.time = gpsTimeToUTC(self.v['week'], (_v['timeOfWeekMs'] * 0.001))
class cBias:
def __init__(self, _v):
global gpsWeek
self.v = _v
# self.time = _v['timeMs'] * 0.001
self.time = gpsTimeToUTC(gpsWeek, (_v['towMs'] * 0.001))
class cInsRes:
def __init__(self, _v):
global gpsWeek
self.v = _v
self.time = gpsTimeToUTC(gpsWeek, (_v['towMs'] * 0.001))
class cDevInfo:
def __init__(self, _v):
self.v = _v
class cSysParams:
def __init__(self, _v):
global gpsWeek
self.v = _v
self.__istatus = None
self.__hstatus = None
if 'tow' in _v.dtype.names:
# self.time = _v['time']
self.time = gpsTimeToUTC(gpsWeek, _v['tow'])
if 'towMs' in _v.dtype.names:
# self.time = (_v['timeMs']) * 0.001
self.time = gpsTimeToUTC(gpsWeek, (_v['towMs'] * 0.001))
def iStatus(self):
if self.__istatus is None:
self.__istatus = insStatus(self.v['iStatus'])
return self.__istatus
def hStatus(self):
if self.__istatus is None:
self.__hstatus = hdwStatus(self.v['hStatus'])
return self.__hstatus
class cObsParams:
def __init__(self, _v):
global refLla
self.v = _v
self.accNed = cObj()
self.velNed = cObj()
self.lla = cObj()
self.uvw = cObj()
# self.time = _v['timeMs'] * 0.001
# self.accNed.time = _v['accNed.timeMs'] * 0.001
# self.velNed.time = _v['velNed.timeMs'] * 0.001
# self.lla.time = _v['lla.timeMs'] * 0.001
# self.uvw.time = _v['uvw.timeMs'] * 0.001
self.time = gpsTimeToUTC(gpsWeek, (_v['towMs'] * 0.001))
self.accNed.time = gpsTimeToUTC(gpsWeek, (_v['accNed']['towMs'] * 0.001))
self.velNed.time = gpsTimeToUTC(gpsWeek, (_v['velNed']['towMs'] * 0.001))
self.lla.time = gpsTimeToUTC(gpsWeek, (_v['lla']['towMs'] * 0.001))
self.accNed.refHdg = np.arctan2(self.v['accNed']['ref'][:, 1], self.v['accNed']['ref'][:, 0])
self.accNed.insHdg = np.arctan2(self.v['accNed']['ins'][:, 1], self.v['accNed']['ins'][:, 0])
self.lla.refNed = pose.lla2ned(refLla, _v['lla']['ref'])
self.lla.insNed = pose.lla2ned(refLla, _v['lla']['ins'])
# self.v['mslBar'] += 86;
class cInsParams:
def __init__(self, _v):
global gpsWeek
self.v = _v
# self.time = _v['timeMs'] * 0.001
self.time = gpsTimeToUTC(gpsWeek, (_v['towMs'] * 0.001))
if 'magTowMs' in _v.dtype.names:
# self.magTime = (_v['magTowMs']) * 0.001
self.magTime = gpsTimeToUTC(gpsWeek, (_v['magTowMs'] * 0.001))
def lla2kml(time, lla, serialNumber, kmlFileName="log.kml", **kwargs):
kml = simplekml.Kml()
color = kwargs.pop('color', simplekml.Color.yellow)
altitudeMode = kwargs.pop('altitudeMode', simplekml.constants.AltitudeMode.absolute)
timeStep = kwargs.pop('timeStep', 0)
latLon = []
tNext = 0
lNext = 0
for i in range(0, np.shape(lla)[0]):
latLon.append((lla[i, 1], lla[i, 0], lla[i, 2]))
# Add timestamp
if timeStep:
# if timeStep == -1:
# pt = kml.newpoint(name="%.1f" % time[i], coords=[latLon[i]])
# pt.style.iconstyle.color = color
# pt.style.iconstyle.scale = 0.5
# pt.style.labelstyle.scale = 0.7
if time[i] >= tNext:
tNext += timeStep
# round(tNext, timeStep)
if time[i] >= lNext:
if timeStep > lNext:
lNext += timeStep
else:
lNext += 1
pt = kml.newpoint(name="%.2f" % time[i], coords=[latLon[i]])
else:
pt = kml.newpoint(coords=[latLon[i]])
pt.style.iconstyle.color = color
pt.style.iconstyle.scale = 0.4
pt.style.labelstyle.scale = 0.6
pt.altitudemode = altitudeMode
# Add path
ls = kml.newlinestring(name="Tracks", description=serialNumber + " tracks", coords=latLon)
# Style
ls.extrude = 1
ls.altitudemode = altitudeMode
ls.style.linestyle.width = 2
ls.style.linestyle.color = color
kml.save(kmlFileName)
return kmlFileName
##### INS Status #####
def insStatus(istatus):
result = cObj()
result.align = cObj()
result.align.coarse = cObj()
result.align.good = cObj()
result.align.fine = cObj()
# 0-3
result.align.coarse.att = (istatus >> 0) & 1
result.align.coarse.vel = (istatus >> 1) & 1
result.align.coarse.pos = (istatus >> 2) & 1
# 4-7
result.align.good.att = (istatus >> 4) & 1
result.align.good.vel = (istatus >> 5) & 1
result.align.good.pos = (istatus >> 6) & 1
result.align.fine.att = (istatus >> 7) & 1
# 8-11
result.usingGps = (istatus >> 8) & 1
result.usingMag = (istatus >> 11) & 1
# 12-15
result.navMode = (istatus >> 12) & 1
# 16-23
result.solutionStatus = (istatus >> 16) & 0x7
# 20-23
result.magActiveCalSet = (istatus >> 20) & 1
result.magRecalibrating = (istatus >> 22) & 1
result.magInterOrBadCal = ((istatus >> 23) & 1) != 1
# 24-27
# 28-31
result.rtosTaskPeriodOverrun = (istatus >> 29) & 1
result.generalFault = (istatus >> 31) & 1
return result
##### Hardware Status #####
def hdwStatus(hstatus):
result = cObj()
# 0-3
result.motionGyrSig = (hstatus >> 0) & 0x1
result.motionAccSig = (hstatus >> 1) & 0x1
result.motionGyrDev = (hstatus >> 2) & 0x1
result.motionAccDev = (hstatus >> 3) & 0x1
# 4-7
result.satellite_rx = (hstatus >> 4) & 0x1
# 8-11
result.saturationGyr = (hstatus >> 8) & 0x1
result.saturationAcc = (hstatus >> 9) & 0x1
result.saturationMag = (hstatus >> 10) & 0x1
result.saturationBaro = (hstatus >> 11) & 0x1
# 12-15
result.saturationHistory = (hstatus >> 12) & 0x1
# 16-19
result.errComTxLimited = (hstatus >> 16) & 0x1
result.errComRxOverrun = (hstatus >> 17) & 0x1
result.errGpsTxLimited = (hstatus >> 18) & 0x1
result.errGpsRxOverrun = (hstatus >> 19) & 0x1
# 20-23
result.comParseErrCount = (hstatus >> 20) & 0xF
# 24-27
result.selfTestFault = (hstatus >> 24) & 0x1
result.errTemperature = (hstatus >> 25) & 0x1
# 28-31
result.faultWatchdogReset = (hstatus >> 28) & 0x1
result.faultBODReset = (hstatus >> 29) & 0x1
result.faultPORReset = (hstatus >> 30) & 0x1
result.faultCPUErrReset = (hstatus >> 31) & 0x1
return result
| mit |
GPflow/GPflowOpt | doc/source/conf.py | 1 | 5534 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# GPflowOpt documentation build configuration file, created by
# sphinx-quickstart on Sun Apr 30 20:34:41 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
from gpflowopt import __version__
# on_rtd is whether we are on readthedocs.org, this line of code grabbed from docs.readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# otherwise, readthedocs.org uses their theme by default, so no need to specify it
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.todo',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'numpydoc',
'nbsphinx',
'IPython.sphinxext.ipython_console_highlighting'
]
numpydoc_show_class_members = True
numpydoc_show_inherited_class_members = True
numpydoc_class_members_toctree = False
#autoclass_content = 'both'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'GPflowOpt'
copyright = '2017, Joachim van der Herten'
author = 'Joachim van der Herten, Ivo Couckuyt'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = __version__
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
#html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'GPflowOptdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'gpflowopt.tex', 'GPflowOpt Documentation',
'Joachim van der Herten', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'GPflowOpt', 'GPflowOpt Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'GPflowOpt', 'GPflowOpt Documentation',
author, 'GPflowOpt', 'One line description of project.',
'Miscellaneous'),
]
| apache-2.0 |
rsip22/101 | Python/OO/cheapest_options/models/establishment.py | 1 | 1339 | import random
from constants import PRICE_TABLE
from enums import client
class Establishment:
"""
Class for an establishment in the chain.
Attributes:
name: (str) name of the establishment
stars: (int) establishment category
is_rewards: (bool) establishment client participates in the rewards program
"""
REGULAR = client.ClientType.REGULAR.value
REWARDS = client.ClientType.REWARDS.value
def __init__(self, stars, is_rewards=False):
self.is_rewards = is_rewards
self.stars = stars
self.name = self._get_establishment_name()
self.weekday_price = self._get_weekday_price()
self.weekend_price = self._get_weekend_price()
def _get_weekday_price(self):
if not self.is_rewards:
return PRICE_TABLE[self.stars]["weekday"][self.REGULAR]
return PRICE_TABLE[self.stars]["weekday"][self.REWARDS]
def _get_weekend_price(self):
if not self.is_rewards:
return PRICE_TABLE[self.stars]["weekend"][self.REGULAR]
return PRICE_TABLE[self.stars]["weekend"][self.REWARDS]
def _get_establishment_name(self):
establishment_for_category = PRICE_TABLE[self.stars]["establishment"]
return random.choice(establishment_for_category)
def __str__(self):
return self.name
| gpl-2.0 |
kyrus/crypto-un-locker | CryptoUnLocker.py | 2 | 10737 | #!/usr/bin/env python
import struct
import os
import argparse
import shutil
import sys
from collections import namedtuple
from datetime import datetime
import csv
import re
from Crypto.Cipher import AES
from Crypto.PublicKey import RSA
from Crypto.Cipher import PKCS1_v1_5
from Crypto.Hash import SHA
from Crypto.Util.number import bytes_to_long
"""
CryptoLocker file structure:
0x14 bytes : SHA1 hash of '\x00'*4 + next 0x100 bytes of file.
0x100 bytes : AES key encrypted with RSA PKCS#1 v1.5:
0x2c bytes : AES key blob
remainder : file data encrypted with AES256-CBC with IV of 0x00
Key blob is a Microsoft PUBLICKEYSTRUC:
typedef struct _PUBLICKEYSTRUC {
BYTE bType;
BYTE bVersion;
WORD reserved;
ALG_ID aiKeyAlg;
} BLOBHEADER, PUBLICKEYSTRUC;
where:
bType = 0x08
bVersion = 0x02
reserved = 0
aiKeyAlg = 0x6610 (AES-256)
followed by a DWORD length of 0x20, and finally the 32 byte AES key.
"""
PUBLICKEYSTRUC = namedtuple('PUBLICKEYSTRUC', 'bType bVersion reserved aiKeyAlg')
RSAPUBKEY = namedtuple('RSAPUBKEY', 'magic bitlen pubexp')
PRIVATEKEYBLOB = namedtuple('PRIVATEKEYBLOB', 'modulus prime1 prime2 exponent1 exponent2 coefficient privateExponent')
PUBLICKEYSTRUC_s = struct.Struct('<bbHI')
RSAPUBKEY_s = struct.Struct('<4sII')
key_re = re.compile('-----BEGIN.*KEY-----\n(.*)\n-----END.*KEY-----', re.DOTALL)
def subtract(a,b):
if a == None or b == None:
return None
else:
return ord(b)-ord(a)
class OutputLevel:
VerboseLevel, InfoLevel, WarnLevel, ErrorLevel = range(4)
class CryptoUnLocker(object):
def __init__(self):
self.keys = []
def loadKeyFromFile(self, fn):
d = open(fn, 'rb').read()
matches = key_re.match(d)
if matches:
self.loadKeyFromString(matches.group(0))
return
# fall through if the file does not contain a PEM encoded RSA key
# try the CryptImportKey Win32 file format
if self.CryptImportKey(d):
return
# Apparently a new version of CryptoLocker is adding what looks
# like a version number to the start of the RSA key format. Try
# skipping over the first four bytes of the file then interpreting
# the rest as an RSA private key.
if self.CryptImportKey(d[4:]):
return
# if we can't import the file, raise an exception
raise Exception("Could not parse a private key from file")
def CryptImportKey(self, d):
publickeystruc = PUBLICKEYSTRUC._make(PUBLICKEYSTRUC_s.unpack_from(d))
if publickeystruc.bType == 7 and publickeystruc.bVersion == 2 and publickeystruc.aiKeyAlg == 41984:
rsapubkey = RSAPUBKEY._make(RSAPUBKEY_s.unpack_from(d[8:]))
if rsapubkey.magic == 'RSA2':
bitlen8 = rsapubkey.bitlen/8
bitlen16 = rsapubkey.bitlen/16
PRIVATEKEYBLOB_s = struct.Struct('%ds%ds%ds%ds%ds%ds%ds' % (bitlen8, bitlen16, bitlen16, bitlen16, bitlen16, bitlen16, bitlen8))
privatekey = PRIVATEKEYBLOB._make(map(bytes_to_long, PRIVATEKEYBLOB_s.unpack_from(d[20:])))
r = RSA.construct((privatekey.modulus, long(rsapubkey.pubexp), privatekey.privateExponent,
privatekey.prime1, privatekey.prime2))
self.keys.append(r)
return True
return False
def loadKeyFromString(self, s):
r = RSA.importKey(s)
self.keys.append(r)
def isCryptoLocker(self, fn):
file_header = open(fn, 'rb').read(0x114)
if len(file_header) != 0x114:
return False
# validate that the header is correct
header_hash = SHA.new('\x00'*4 + file_header[0x14:0x114])
return header_hash.digest() == file_header[:0x14]
def guessIfWiped(self, fn):
file_header = open(fn, 'rb').read(64)
if len(file_header) != 64:
return False
lst = map(subtract, file_header[:32:2], file_header[1:32:2])
return not lst or [lst[0]]*len(lst) == lst
def decryptFile(self, fn):
aes_key = None
with open(fn, 'rb') as fp:
file_header = fp.read(0x114)
if len(file_header) != 0x114:
raise Exception("Not a CryptoLocker file")
for rsa_key in self.keys:
aes_key = self.retrieveAESKey(rsa_key, file_header)
if aes_key:
break
if not aes_key:
raise Exception("Could not find the private key for this CryptoLocker file")
# read the remaining data and decrypt with the AES key
d = fp.read()
a = AES.new(aes_key, mode=AES.MODE_CBC, IV='\x00'*16)
d = a.decrypt(d)
d = d[:-ord(d[-1])]
return d
def retrieveAESKey(self, r, file_header):
# we have to reverse the bytes in the header to conform with the CryptoAPI
# CryptDecrypt function.
file_header = file_header[0x14:0x114]
file_header = file_header[::-1]
# decrypt the AES key blob
c = PKCS1_v1_5.new(r)
sentinel = '\x00' * 16
blob = c.decrypt(file_header, sentinel)
# retrieve key from file_header
(bType, bVersion, reserved, aiKeyAlg, keyLen) = struct.unpack('<BBHII', blob[:0xc])
if bType == 0x08 and bVersion == 0x02 and reserved == 0 and \
aiKeyAlg == 0x6610 and keyLen == 32:
aes_key = blob[0x0c:0x0c+32]
return aes_key
else:
return None
class CryptoUnLockerProcess(object):
def __init__(self, args, unlocker):
self.args = args
self.unlocker = unlocker
self.csvfp = None
self.csv = None
def doit(self):
if self.args.csvfile:
self.csvfp = open(self.args.csvfile,'wb')
self.csv = csv.writer(self.csvfp)
self.csv.writerow(['Timestamp', 'Filename', 'Message'])
keyfiles = []
if self.args.keyfile:
keyfiles = [self.args.keyfile]
elif self.args.keydir:
keyfiles = [os.path.join(self.args.keydir, fn) for fn in os.listdir(self.args.keydir)]
for fn in keyfiles:
try:
self.unlocker.loadKeyFromFile(fn)
self.output(OutputLevel.VerboseLevel, fn, "Successfully loaded key file")
except Exception, e:
self.output(OutputLevel.ErrorLevel, fn, "Unsuccessful loading key file: %s" % e.message)
if not len(self.unlocker.keys) and not self.args.detect:
self.output(OutputLevel.ErrorLevel, '', 'No key files were successfully loaded. Exiting.')
return 1
if self.args.recursive:
for root, dirs, files in os.walk(self.args.encrypted_filenames[0]):
for fn in files:
self.processFile(root, fn)
else:
for fn in self.args.encrypted_filenames:
self.processFile('', fn)
return 0
def processFile(self, pathname, fn):
if fn.endswith('.bak'):
# skip backup files
return
fullpath = os.path.join(pathname, fn)
try:
if self.unlocker.guessIfWiped(fullpath):
self.output(OutputLevel.VerboseLevel, fullpath, "File appears wiped")
return
elif not self.unlocker.isCryptoLocker(fullpath):
self.output(OutputLevel.VerboseLevel, fullpath, "Not a CryptoLocker file")
return
else:
if self.args.detect:
self.output(OutputLevel.InfoLevel, fullpath, "Potential CryptoLocker file")
return
except Exception, e:
self.output(OutputLevel.ErrorLevel, fullpath, "Unsuccessful opening file: %s" % e.message)
return
try:
decrypted_file = self.unlocker.decryptFile(fullpath)
self.output(OutputLevel.InfoLevel, fullpath, "Successfully decrypted file")
if not self.args.dry_run:
if self.args.destdir:
destdir = os.path.join(self.args.destdir, pathname)
if not os.path.exists(destdir):
os.makedirs(destdir)
open(os.path.join(destdir, fn), 'wb').write(decrypted_file)
else:
shutil.copy2(fullpath, fullpath + ".bak")
open(os.path.join(pathname, fn), 'wb').write(decrypted_file)
except Exception, e:
self.output(OutputLevel.ErrorLevel, fullpath, "Unsuccessful decrypting file: %s" % e.message)
def output(self, level, fn, msg):
if level == OutputLevel.VerboseLevel and not self.args.verbose:
return
if self.csv:
self.csv.writerow([datetime.now(), fn, msg])
icon = '[.]'
if level == OutputLevel.InfoLevel:
icon = '[+]'
elif level > OutputLevel.InfoLevel:
icon = '[-]'
if fn:
sys.stderr.write('%s %s: %s\n' % (icon, msg, fn))
else:
sys.stderr.write('%s %s\n' % (icon, msg))
sys.stderr.flush()
def main():
parser = argparse.ArgumentParser(description='Decrypt CryptoLocker encrypted files.')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--keyfile', action='store', dest='keyfile',
help='File containing the private key, or the EXE file provided for decryption')
group.add_argument('--keydir', action='store', dest='keydir',
help='Directory containing any number of private keys; the appropriate private key will be used during the decryption process')
group.add_argument('--detect', action='store_true', dest='detect', help="Don't try to decrypt; just find files that may be CryptoLockered")
parser.add_argument('-r', action='store_true', dest='recursive', help="Recursively search subdirectories")
parser.add_argument('-v', action='store_true', dest='verbose', help="Verbose output")
parser.add_argument('--dry-run', action='store_true', dest='dry_run', help="Don't actually write decrypted files")
parser.add_argument('-o', action='store', dest='destdir', help='Copy all decrypted files to an output directory, mirroring the source path')
parser.add_argument('--csv', action='store', dest='csvfile', help='Output to a CSV file')
parser.add_argument('encrypted_filenames', nargs="+")
results = parser.parse_args()
unlocker = CryptoUnLocker()
processor = CryptoUnLockerProcess(results, unlocker)
return processor.doit()
if __name__ == '__main__':
sys.exit(main())
| mit |
anthgur/servo | components/script/dom/bindings/codegen/parser/tests/test_cereactions.py | 32 | 3904 | def WebIDLTest(parser, harness):
threw = False
try:
parser.parse("""
interface Foo {
[CEReactions(DOMString a)] void foo(boolean arg2);
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown for [CEReactions] with an argument")
parser = parser.reset()
threw = False
try:
parser.parse("""
interface Foo {
[CEReactions(DOMString b)] readonly attribute boolean bar;
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown for [CEReactions] with an argument")
parser = parser.reset()
threw = False
try:
parser.parse("""
interface Foo {
[CEReactions] attribute boolean bar;
};
""")
results = parser.finish()
except Exception, e:
harness.ok(False, "Shouldn't have thrown for [CEReactions] used on writable attribute. %s" % e)
threw = True
parser = parser.reset()
threw = False
try:
parser.parse("""
interface Foo {
[CEReactions] void foo(boolean arg2);
};
""")
results = parser.finish()
except Exception, e:
harness.ok(False, "Shouldn't have thrown for [CEReactions] used on regular operations. %s" % e)
threw = True
parser = parser.reset()
threw = False
try:
parser.parse("""
interface Foo {
[CEReactions] readonly attribute boolean A;
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown for [CEReactions] used on a readonly attribute")
parser = parser.reset()
threw = False
try:
parser.parse("""
[CEReactions]
interface Foo {
}
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown for [CEReactions] used on a interface")
parser = parser.reset()
threw = False
try:
parser.parse("""
interface Foo {
[CEReactions] getter any(DOMString name);
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw,
"Should have thrown for [CEReactions] used on a named getter")
parser = parser.reset()
threw = False
try:
parser.parse("""
interface Foo {
[CEReactions] creator boolean (DOMString name, boolean value);
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw,
"Should have thrown for [CEReactions] used on a named creator")
parser = parser.reset()
threw = False
try:
parser.parse("""
interface Foo {
[CEReactions] legacycaller double compute(double x);
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw,
"Should have thrown for [CEReactions] used on a legacycaller")
parser = parser.reset()
threw = False
try:
parser.parse("""
interface Foo {
[CEReactions] stringifier DOMString ();
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw,
"Should have thrown for [CEReactions] used on a stringifier")
parser = parser.reset()
threw = False
try:
parser.parse("""
interface Foo {
[CEReactions] jsonifier;
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown for [CEReactions] used on a jsonifier")
| mpl-2.0 |
kejkz/holmium.core | holmium/core/enhancers.py | 3 | 2484 | """
implementation of element enhancer base
"""
from selenium.webdriver.support.ui import Select
class ElementEnhancer(object):
"""
base class for implementing custom element enhancers to
add functionality to located webelements based on the
element type (tag name)
"""
# pylint: disable=too-few-public-methods
__TAG__ = ""
def __init__(self, element):
self.element = element
super(ElementEnhancer, self).__init__()
@classmethod
def matches(cls, element):
"""
class method to verify that this enhancer is appropriate
for the provided webelement
"""
return cls.__TAG__.lower() == element.tag_name.lower()
def __getattribute__(self, key):
element = object.__getattribute__(self, "element")
try:
try:
return super(ElementEnhancer, self).__getattribute__(key)
except AttributeError:
return element.__getattribute__(key)
except:
raise AttributeError(
"neither %s, nor %s object has an attribute %s" % (
self.__class__.__name__, element.__class__.__name__, key))
class _SelectMixin(Select, object):
"""
cooperative super version of Select
"""
def __init__(self):
super(_SelectMixin, self).__init__(self.element)
Select.__init__(self, self.element)
class SelectEnhancer(ElementEnhancer, _SelectMixin):
"""
Enhancer for the select tag
"""
__TAG__ = "select"
REGISTERED_ENHANCERS = [SelectEnhancer]
def register_enhancer(enhancer):
"""
registers a :class:`ElementEnhancer` with the internal
lookup
"""
if not issubclass(enhancer, ElementEnhancer):
raise TypeError(
"Only subclasses of holmium.core.ElementEnhancer can be registered")
if not hasattr(enhancer, "__TAG__") or not enhancer.__TAG__:
raise AttributeError(
"ElementEnhancer implementations must declare a __TAG__"
"property to match against"
)
REGISTERED_ENHANCERS.insert(0, enhancer)
def reset_enhancers():
"""
resets the state so that any :class:`ElementEnhancer` that was registered
via a call to :func:`register_enhancer` is removed.
"""
global REGISTERED_ENHANCERS
REGISTERED_ENHANCERS = [SelectEnhancer]
def get_enhancers():
"""
returns the global registered enhancers
"""
return REGISTERED_ENHANCERS
| mit |
boyuegame/kbengine | kbe/res/scripts/common/Lib/distutils/command/sdist.py | 96 | 18221 | """distutils.command.sdist
Implements the Distutils 'sdist' command (create a source distribution)."""
import os
import string
import sys
from types import *
from glob import glob
from warnings import warn
from distutils.core import Command
from distutils import dir_util, dep_util, file_util, archive_util
from distutils.text_file import TextFile
from distutils.errors import *
from distutils.filelist import FileList
from distutils import log
from distutils.util import convert_path
def show_formats():
"""Print all possible values for the 'formats' option (used by
the "--help-formats" command-line option).
"""
from distutils.fancy_getopt import FancyGetopt
from distutils.archive_util import ARCHIVE_FORMATS
formats = []
for format in ARCHIVE_FORMATS.keys():
formats.append(("formats=" + format, None,
ARCHIVE_FORMATS[format][2]))
formats.sort()
FancyGetopt(formats).print_help(
"List of available source distribution formats:")
class sdist(Command):
description = "create a source distribution (tarball, zip file, etc.)"
def checking_metadata(self):
"""Callable used for the check sub-command.
Placed here so user_options can view it"""
return self.metadata_check
user_options = [
('template=', 't',
"name of manifest template file [default: MANIFEST.in]"),
('manifest=', 'm',
"name of manifest file [default: MANIFEST]"),
('use-defaults', None,
"include the default file set in the manifest "
"[default; disable with --no-defaults]"),
('no-defaults', None,
"don't include the default file set"),
('prune', None,
"specifically exclude files/directories that should not be "
"distributed (build tree, RCS/CVS dirs, etc.) "
"[default; disable with --no-prune]"),
('no-prune', None,
"don't automatically exclude anything"),
('manifest-only', 'o',
"just regenerate the manifest and then stop "
"(implies --force-manifest)"),
('force-manifest', 'f',
"forcibly regenerate the manifest and carry on as usual. "
"Deprecated: now the manifest is always regenerated."),
('formats=', None,
"formats for source distribution (comma-separated list)"),
('keep-temp', 'k',
"keep the distribution tree around after creating " +
"archive file(s)"),
('dist-dir=', 'd',
"directory to put the source distribution archive(s) in "
"[default: dist]"),
('metadata-check', None,
"Ensure that all required elements of meta-data "
"are supplied. Warn if any missing. [default]"),
('owner=', 'u',
"Owner name used when creating a tar file [default: current user]"),
('group=', 'g',
"Group name used when creating a tar file [default: current group]"),
]
boolean_options = ['use-defaults', 'prune',
'manifest-only', 'force-manifest',
'keep-temp', 'metadata-check']
help_options = [
('help-formats', None,
"list available distribution formats", show_formats),
]
negative_opt = {'no-defaults': 'use-defaults',
'no-prune': 'prune' }
default_format = {'posix': 'gztar',
'nt': 'zip' }
sub_commands = [('check', checking_metadata)]
def initialize_options(self):
# 'template' and 'manifest' are, respectively, the names of
# the manifest template and manifest file.
self.template = None
self.manifest = None
# 'use_defaults': if true, we will include the default file set
# in the manifest
self.use_defaults = 1
self.prune = 1
self.manifest_only = 0
self.force_manifest = 0
self.formats = None
self.keep_temp = 0
self.dist_dir = None
self.archive_files = None
self.metadata_check = 1
self.owner = None
self.group = None
def finalize_options(self):
if self.manifest is None:
self.manifest = "MANIFEST"
if self.template is None:
self.template = "MANIFEST.in"
self.ensure_string_list('formats')
if self.formats is None:
try:
self.formats = [self.default_format[os.name]]
except KeyError:
raise DistutilsPlatformError(
"don't know how to create source distributions "
"on platform %s" % os.name)
bad_format = archive_util.check_archive_formats(self.formats)
if bad_format:
raise DistutilsOptionError(
"unknown archive format '%s'" % bad_format)
if self.dist_dir is None:
self.dist_dir = "dist"
def run(self):
# 'filelist' contains the list of files that will make up the
# manifest
self.filelist = FileList()
# Run sub commands
for cmd_name in self.get_sub_commands():
self.run_command(cmd_name)
# Do whatever it takes to get the list of files to process
# (process the manifest template, read an existing manifest,
# whatever). File list is accumulated in 'self.filelist'.
self.get_file_list()
# If user just wanted us to regenerate the manifest, stop now.
if self.manifest_only:
return
# Otherwise, go ahead and create the source distribution tarball,
# or zipfile, or whatever.
self.make_distribution()
def check_metadata(self):
"""Deprecated API."""
warn("distutils.command.sdist.check_metadata is deprecated, \
use the check command instead", PendingDeprecationWarning)
check = self.distribution.get_command_obj('check')
check.ensure_finalized()
check.run()
def get_file_list(self):
"""Figure out the list of files to include in the source
distribution, and put it in 'self.filelist'. This might involve
reading the manifest template (and writing the manifest), or just
reading the manifest, or just using the default file set -- it all
depends on the user's options.
"""
# new behavior when using a template:
# the file list is recalculated every time because
# even if MANIFEST.in or setup.py are not changed
# the user might have added some files in the tree that
# need to be included.
#
# This makes --force the default and only behavior with templates.
template_exists = os.path.isfile(self.template)
if not template_exists and self._manifest_is_not_generated():
self.read_manifest()
self.filelist.sort()
self.filelist.remove_duplicates()
return
if not template_exists:
self.warn(("manifest template '%s' does not exist " +
"(using default file list)") %
self.template)
self.filelist.findall()
if self.use_defaults:
self.add_defaults()
if template_exists:
self.read_template()
if self.prune:
self.prune_file_list()
self.filelist.sort()
self.filelist.remove_duplicates()
self.write_manifest()
def add_defaults(self):
"""Add all the default files to self.filelist:
- README or README.txt
- setup.py
- test/test*.py
- all pure Python modules mentioned in setup script
- all files pointed by package_data (build_py)
- all files defined in data_files.
- all files defined as scripts.
- all C sources listed as part of extensions or C libraries
in the setup script (doesn't catch C headers!)
Warns if (README or README.txt) or setup.py are missing; everything
else is optional.
"""
standards = [('README', 'README.txt'), self.distribution.script_name]
for fn in standards:
if isinstance(fn, tuple):
alts = fn
got_it = False
for fn in alts:
if os.path.exists(fn):
got_it = True
self.filelist.append(fn)
break
if not got_it:
self.warn("standard file not found: should have one of " +
', '.join(alts))
else:
if os.path.exists(fn):
self.filelist.append(fn)
else:
self.warn("standard file '%s' not found" % fn)
optional = ['test/test*.py', 'setup.cfg']
for pattern in optional:
files = filter(os.path.isfile, glob(pattern))
self.filelist.extend(files)
# build_py is used to get:
# - python modules
# - files defined in package_data
build_py = self.get_finalized_command('build_py')
# getting python files
if self.distribution.has_pure_modules():
self.filelist.extend(build_py.get_source_files())
# getting package_data files
# (computed in build_py.data_files by build_py.finalize_options)
for pkg, src_dir, build_dir, filenames in build_py.data_files:
for filename in filenames:
self.filelist.append(os.path.join(src_dir, filename))
# getting distribution.data_files
if self.distribution.has_data_files():
for item in self.distribution.data_files:
if isinstance(item, str): # plain file
item = convert_path(item)
if os.path.isfile(item):
self.filelist.append(item)
else: # a (dirname, filenames) tuple
dirname, filenames = item
for f in filenames:
f = convert_path(f)
if os.path.isfile(f):
self.filelist.append(f)
if self.distribution.has_ext_modules():
build_ext = self.get_finalized_command('build_ext')
self.filelist.extend(build_ext.get_source_files())
if self.distribution.has_c_libraries():
build_clib = self.get_finalized_command('build_clib')
self.filelist.extend(build_clib.get_source_files())
if self.distribution.has_scripts():
build_scripts = self.get_finalized_command('build_scripts')
self.filelist.extend(build_scripts.get_source_files())
def read_template(self):
"""Read and parse manifest template file named by self.template.
(usually "MANIFEST.in") The parsing and processing is done by
'self.filelist', which updates itself accordingly.
"""
log.info("reading manifest template '%s'", self.template)
template = TextFile(self.template, strip_comments=1, skip_blanks=1,
join_lines=1, lstrip_ws=1, rstrip_ws=1,
collapse_join=1)
try:
while True:
line = template.readline()
if line is None: # end of file
break
try:
self.filelist.process_template_line(line)
# the call above can raise a DistutilsTemplateError for
# malformed lines, or a ValueError from the lower-level
# convert_path function
except (DistutilsTemplateError, ValueError) as msg:
self.warn("%s, line %d: %s" % (template.filename,
template.current_line,
msg))
finally:
template.close()
def prune_file_list(self):
"""Prune off branches that might slip into the file list as created
by 'read_template()', but really don't belong there:
* the build tree (typically "build")
* the release tree itself (only an issue if we ran "sdist"
previously with --keep-temp, or it aborted)
* any RCS, CVS, .svn, .hg, .git, .bzr, _darcs directories
"""
build = self.get_finalized_command('build')
base_dir = self.distribution.get_fullname()
self.filelist.exclude_pattern(None, prefix=build.build_base)
self.filelist.exclude_pattern(None, prefix=base_dir)
if sys.platform == 'win32':
seps = r'/|\\'
else:
seps = '/'
vcs_dirs = ['RCS', 'CVS', r'\.svn', r'\.hg', r'\.git', r'\.bzr',
'_darcs']
vcs_ptrn = r'(^|%s)(%s)(%s).*' % (seps, '|'.join(vcs_dirs), seps)
self.filelist.exclude_pattern(vcs_ptrn, is_regex=1)
def write_manifest(self):
"""Write the file list in 'self.filelist' (presumably as filled in
by 'add_defaults()' and 'read_template()') to the manifest file
named by 'self.manifest'.
"""
if self._manifest_is_not_generated():
log.info("not writing to manually maintained "
"manifest file '%s'" % self.manifest)
return
content = self.filelist.files[:]
content.insert(0, '# file GENERATED by distutils, do NOT edit')
self.execute(file_util.write_file, (self.manifest, content),
"writing manifest file '%s'" % self.manifest)
def _manifest_is_not_generated(self):
# check for special comment used in 3.1.3 and higher
if not os.path.isfile(self.manifest):
return False
fp = open(self.manifest)
try:
first_line = fp.readline()
finally:
fp.close()
return first_line != '# file GENERATED by distutils, do NOT edit\n'
def read_manifest(self):
"""Read the manifest file (named by 'self.manifest') and use it to
fill in 'self.filelist', the list of files to include in the source
distribution.
"""
log.info("reading manifest file '%s'", self.manifest)
manifest = open(self.manifest)
for line in manifest:
# ignore comments and blank lines
line = line.strip()
if line.startswith('#') or not line:
continue
self.filelist.append(line)
manifest.close()
def make_release_tree(self, base_dir, files):
"""Create the directory tree that will become the source
distribution archive. All directories implied by the filenames in
'files' are created under 'base_dir', and then we hard link or copy
(if hard linking is unavailable) those files into place.
Essentially, this duplicates the developer's source tree, but in a
directory named after the distribution, containing only the files
to be distributed.
"""
# Create all the directories under 'base_dir' necessary to
# put 'files' there; the 'mkpath()' is just so we don't die
# if the manifest happens to be empty.
self.mkpath(base_dir)
dir_util.create_tree(base_dir, files, dry_run=self.dry_run)
# And walk over the list of files, either making a hard link (if
# os.link exists) to each one that doesn't already exist in its
# corresponding location under 'base_dir', or copying each file
# that's out-of-date in 'base_dir'. (Usually, all files will be
# out-of-date, because by default we blow away 'base_dir' when
# we're done making the distribution archives.)
if hasattr(os, 'link'): # can make hard links on this system
link = 'hard'
msg = "making hard links in %s..." % base_dir
else: # nope, have to copy
link = None
msg = "copying files to %s..." % base_dir
if not files:
log.warn("no files to distribute -- empty manifest?")
else:
log.info(msg)
for file in files:
if not os.path.isfile(file):
log.warn("'%s' not a regular file -- skipping" % file)
else:
dest = os.path.join(base_dir, file)
self.copy_file(file, dest, link=link)
self.distribution.metadata.write_pkg_info(base_dir)
def make_distribution(self):
"""Create the source distribution(s). First, we create the release
tree with 'make_release_tree()'; then, we create all required
archive files (according to 'self.formats') from the release tree.
Finally, we clean up by blowing away the release tree (unless
'self.keep_temp' is true). The list of archive files created is
stored so it can be retrieved later by 'get_archive_files()'.
"""
# Don't warn about missing meta-data here -- should be (and is!)
# done elsewhere.
base_dir = self.distribution.get_fullname()
base_name = os.path.join(self.dist_dir, base_dir)
self.make_release_tree(base_dir, self.filelist.files)
archive_files = [] # remember names of files we create
# tar archive must be created last to avoid overwrite and remove
if 'tar' in self.formats:
self.formats.append(self.formats.pop(self.formats.index('tar')))
for fmt in self.formats:
file = self.make_archive(base_name, fmt, base_dir=base_dir,
owner=self.owner, group=self.group)
archive_files.append(file)
self.distribution.dist_files.append(('sdist', '', file))
self.archive_files = archive_files
if not self.keep_temp:
dir_util.remove_tree(base_dir, dry_run=self.dry_run)
def get_archive_files(self):
"""Return the list of archive files created when the command
was run, or None if the command hasn't run yet.
"""
return self.archive_files
| lgpl-3.0 |
piotroxp/scibibscan | scib/lib/python3.6/site-packages/setuptools/command/develop.py | 106 | 7384 | from distutils.util import convert_path
from distutils import log
from distutils.errors import DistutilsError, DistutilsOptionError
import os
import glob
import io
from setuptools.extern import six
from pkg_resources import Distribution, PathMetadata, normalize_path
from setuptools.command.easy_install import easy_install
import setuptools
class develop(easy_install):
"""Set up package for development"""
description = "install package in 'development mode'"
user_options = easy_install.user_options + [
("uninstall", "u", "Uninstall this source package"),
("egg-path=", None, "Set the path to be used in the .egg-link file"),
]
boolean_options = easy_install.boolean_options + ['uninstall']
command_consumes_arguments = False # override base
def run(self):
if self.uninstall:
self.multi_version = True
self.uninstall_link()
else:
self.install_for_development()
self.warn_deprecated_options()
def initialize_options(self):
self.uninstall = None
self.egg_path = None
easy_install.initialize_options(self)
self.setup_path = None
self.always_copy_from = '.' # always copy eggs installed in curdir
def finalize_options(self):
ei = self.get_finalized_command("egg_info")
if ei.broken_egg_info:
template = "Please rename %r to %r before using 'develop'"
args = ei.egg_info, ei.broken_egg_info
raise DistutilsError(template % args)
self.args = [ei.egg_name]
easy_install.finalize_options(self)
self.expand_basedirs()
self.expand_dirs()
# pick up setup-dir .egg files only: no .egg-info
self.package_index.scan(glob.glob('*.egg'))
egg_link_fn = ei.egg_name + '.egg-link'
self.egg_link = os.path.join(self.install_dir, egg_link_fn)
self.egg_base = ei.egg_base
if self.egg_path is None:
self.egg_path = os.path.abspath(ei.egg_base)
target = normalize_path(self.egg_base)
egg_path = normalize_path(os.path.join(self.install_dir,
self.egg_path))
if egg_path != target:
raise DistutilsOptionError(
"--egg-path must be a relative path from the install"
" directory to " + target
)
# Make a distribution for the package's source
self.dist = Distribution(
target,
PathMetadata(target, os.path.abspath(ei.egg_info)),
project_name=ei.egg_name
)
p = self.egg_base.replace(os.sep, '/')
if p != os.curdir:
p = '../' * (p.count('/') + 1)
self.setup_path = p
p = normalize_path(os.path.join(self.install_dir, self.egg_path, p))
if p != normalize_path(os.curdir):
raise DistutilsOptionError(
"Can't get a consistent path to setup script from"
" installation directory", p, normalize_path(os.curdir))
def install_for_development(self):
if six.PY3 and getattr(self.distribution, 'use_2to3', False):
# If we run 2to3 we can not do this inplace:
# Ensure metadata is up-to-date
self.reinitialize_command('build_py', inplace=0)
self.run_command('build_py')
bpy_cmd = self.get_finalized_command("build_py")
build_path = normalize_path(bpy_cmd.build_lib)
# Build extensions
self.reinitialize_command('egg_info', egg_base=build_path)
self.run_command('egg_info')
self.reinitialize_command('build_ext', inplace=0)
self.run_command('build_ext')
# Fixup egg-link and easy-install.pth
ei_cmd = self.get_finalized_command("egg_info")
self.egg_path = build_path
self.dist.location = build_path
# XXX
self.dist._provider = PathMetadata(build_path, ei_cmd.egg_info)
else:
# Without 2to3 inplace works fine:
self.run_command('egg_info')
# Build extensions in-place
self.reinitialize_command('build_ext', inplace=1)
self.run_command('build_ext')
self.install_site_py() # ensure that target dir is site-safe
if setuptools.bootstrap_install_from:
self.easy_install(setuptools.bootstrap_install_from)
setuptools.bootstrap_install_from = None
# create an .egg-link in the installation dir, pointing to our egg
log.info("Creating %s (link to %s)", self.egg_link, self.egg_base)
if not self.dry_run:
with open(self.egg_link, "w") as f:
f.write(self.egg_path + "\n" + self.setup_path)
# postprocess the installed distro, fixing up .pth, installing scripts,
# and handling requirements
self.process_distribution(None, self.dist, not self.no_deps)
def uninstall_link(self):
if os.path.exists(self.egg_link):
log.info("Removing %s (link to %s)", self.egg_link, self.egg_base)
egg_link_file = open(self.egg_link)
contents = [line.rstrip() for line in egg_link_file]
egg_link_file.close()
if contents not in ([self.egg_path],
[self.egg_path, self.setup_path]):
log.warn("Link points to %s: uninstall aborted", contents)
return
if not self.dry_run:
os.unlink(self.egg_link)
if not self.dry_run:
self.update_pth(self.dist) # remove any .pth link to us
if self.distribution.scripts:
# XXX should also check for entry point scripts!
log.warn("Note: you must uninstall or replace scripts manually!")
def install_egg_scripts(self, dist):
if dist is not self.dist:
# Installing a dependency, so fall back to normal behavior
return easy_install.install_egg_scripts(self, dist)
# create wrapper scripts in the script dir, pointing to dist.scripts
# new-style...
self.install_wrapper_scripts(dist)
# ...and old-style
for script_name in self.distribution.scripts or []:
script_path = os.path.abspath(convert_path(script_name))
script_name = os.path.basename(script_path)
with io.open(script_path) as strm:
script_text = strm.read()
self.install_script(dist, script_name, script_text, script_path)
def install_wrapper_scripts(self, dist):
dist = VersionlessRequirement(dist)
return easy_install.install_wrapper_scripts(self, dist)
class VersionlessRequirement(object):
"""
Adapt a pkg_resources.Distribution to simply return the project
name as the 'requirement' so that scripts will work across
multiple versions.
>>> dist = Distribution(project_name='foo', version='1.0')
>>> str(dist.as_requirement())
'foo==1.0'
>>> adapted_dist = VersionlessRequirement(dist)
>>> str(adapted_dist.as_requirement())
'foo'
"""
def __init__(self, dist):
self.__dist = dist
def __getattr__(self, name):
return getattr(self.__dist, name)
def as_requirement(self):
return self.project_name
| mit |
BenDz/amphtml | validator/build.py | 12 | 21559 | #!/usr/bin/env python2.7
#
# Copyright 2015 The AMP HTML Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the license.
#
"""A build script which (thus far) works on Ubuntu 14."""
import argparse
import glob
import logging
import os
import platform
import re
import shutil
import subprocess
import sys
import tempfile
def Die(msg):
"""Prints error and exits with status 1.
Args:
msg: The error message to emit
"""
print >> sys.stderr, msg
sys.exit(1)
def EnsureNodeJsIsInstalled():
"""Ensure Node.js is installed and that 'node' is the command to run."""
logging.info('entering ...')
try:
output = subprocess.check_output(['node', '--eval', 'console.log("42")'])
if output.strip() == '42':
return
except (subprocess.CalledProcessError, OSError):
pass
Die('Node.js not found. Try "apt-get install nodejs" or follow the install instructions at https://github.com/ampproject/amphtml/blob/master/validator/README.md#installation')
def CheckPrereqs():
"""Checks that various prerequisites for this script are satisfied."""
logging.info('entering ...')
if platform.system() != 'Linux' and platform.system() != 'Darwin':
Die('Sorry, this script assumes Linux or Mac OS X thus far. '
'Please feel free to edit the source and fix it to your needs.')
# Ensure source files are available.
for f in [
'validator-main.protoascii', 'validator.proto', 'validator_gen_js.py',
'package.json', 'engine/validator.js', 'engine/validator_test.js',
'engine/validator-in-browser.js', 'engine/tokenize-css.js',
'engine/definitions.js', 'engine/parse-css.js', 'engine/parse-srcset.js',
'engine/parse-url.js'
]:
if not os.path.exists(f):
Die('%s not found. Must run in amp_validator source directory.' % f)
# Ensure protoc is available.
try:
libprotoc_version = subprocess.check_output(['protoc', '--version'])
except (subprocess.CalledProcessError, OSError):
Die('Protobuf compiler not found. Try "apt-get install protobuf-compiler" or follow the install instructions at https://github.com/ampproject/amphtml/blob/master/validator/README.md#installation.')
# Ensure 'libprotoc 2.5.0' or newer.
m = re.search('^(\\w+) (\\d+)\\.(\\d+)\\.(\\d+)', libprotoc_version)
if (m.group(1) != 'libprotoc' or
(int(m.group(2)), int(m.group(3)), int(m.group(4))) < (2, 5, 0)):
Die('Expected libprotoc 2.5.0 or newer, saw: %s' % libprotoc_version)
# Ensure that the Python protobuf package is installed.
for m in ['descriptor', 'text_format']:
module = 'google.protobuf.%s' % m
try:
__import__(module)
except ImportError:
Die('%s not found. Try "apt-get install python-protobuf" or follow the install instructions at https://github.com/ampproject/amphtml/blob/master/validator/README.md#installation' % module)
# Ensure that yarn is installed.
try:
subprocess.check_output(['yarn', '--version'])
except (subprocess.CalledProcessError, OSError):
Die('Yarn package manager not found. Run '
'"curl -o- -L https://yarnpkg.com/install.sh | bash" '
'or see https://yarnpkg.com/docs/install.')
# Ensure JVM installed. TODO: Check for version?
try:
subprocess.check_output(['java', '-version'], stderr=subprocess.STDOUT)
except (subprocess.CalledProcessError, OSError):
Die('Java missing. Try "apt-get install openjdk-7-jre" or follow the install instructions at https://github.com/ampproject/amphtml/blob/master/validator/README.md#installation')
logging.info('... done')
def SetupOutDir(out_dir):
"""Sets up a clean output directory.
Args:
out_dir: directory name of the output directory. Must not have slashes,
dots, etc.
"""
logging.info('entering ...')
assert re.match(r'^[a-zA-Z_\-0-9]+$', out_dir), 'bad out_dir: %s' % out_dir
if os.path.exists(out_dir):
subprocess.check_call(['rm', '-rf', out_dir])
os.mkdir(out_dir)
logging.info('... done')
def InstallNodeDependencies():
"""Installs the dependencies using yarn."""
logging.info('entering ...')
# Install the project dependencies specified in package.json into
# node_modules.
logging.info('installing AMP Validator engine dependencies ...')
subprocess.check_call(
['yarn', 'install'],
stdout=(open(os.devnull, 'wb') if os.environ.get('TRAVIS') else sys.stdout))
logging.info('installing AMP Validator nodejs dependencies ...')
subprocess.check_call(
['yarn', 'install'],
cwd='nodejs',
stdout=(open(os.devnull, 'wb') if os.environ.get('TRAVIS') else sys.stdout))
logging.info('... done')
def GenValidatorPb2Py(out_dir):
"""Calls the proto compiler to generate validator_pb2.py.
Args:
out_dir: directory name of the output directory. Must not have slashes,
dots, etc.
"""
logging.info('entering ...')
assert re.match(r'^[a-zA-Z_\-0-9]+$', out_dir), 'bad out_dir: %s' % out_dir
subprocess.check_call(
['protoc', 'validator.proto', '--python_out=%s' % out_dir])
open('%s/__init__.py' % out_dir, 'w').close()
logging.info('... done')
def GenValidatorProtoascii(out_dir):
"""Assembles the validator protoascii file from the main and extensions.
Args:
out_dir: directory name of the output directory. Must not have slashes,
dots, etc.
"""
logging.info('entering ...')
assert re.match(r'^[a-zA-Z_\-0-9]+$', out_dir), 'bad out_dir: %s' % out_dir
protoascii_segments = [open('validator-main.protoascii').read()]
extensions = glob.glob('extensions/*/validator-*.protoascii')
# In the Github project, the extensions are located in a sibling directory
# to the validator rather than a child directory.
if not extensions:
extensions = glob.glob('../extensions/*/validator-*.protoascii')
extensions.sort()
for extension in extensions:
protoascii_segments.append(open(extension).read())
f = open('%s/validator.protoascii' % out_dir, 'w')
f.write(''.join(protoascii_segments))
f.close()
logging.info('... done')
def GenValidatorProtoGeneratedJs(out_dir):
"""Calls validator_gen_js to generate validator-proto-generated.js.
Args:
out_dir: directory name of the output directory. Must not have slashes,
dots, etc.
"""
logging.info('entering ...')
assert re.match(r'^[a-zA-Z_\-0-9]+$', out_dir), 'bad out_dir: %s' % out_dir
# These imports happen late, within this method because they don't necessarily
# exist when the module starts running, and the ones that probably do
# are checked by CheckPrereqs.
# pylint: disable=g-import-not-at-top
from google.protobuf import text_format
from google.protobuf import descriptor
from dist import validator_pb2
import validator_gen_js
# pylint: enable=g-import-not-at-top
out = []
validator_gen_js.GenerateValidatorGeneratedJs(
specfile=None,
validator_pb2=validator_pb2,
generate_proto_only=True,
generate_spec_only=False,
text_format=text_format,
html_format=None,
descriptor=descriptor,
out=out)
out.append('')
f = open('%s/validator-proto-generated.js' % out_dir, 'w')
f.write('\n'.join(out))
f.close()
logging.info('... done')
def GenValidatorGeneratedJs(out_dir):
"""Calls validator_gen_js to generate validator-generated.js.
Args:
out_dir: directory name of the output directory. Must not have slashes,
dots, etc.
"""
logging.info('entering ...')
assert re.match(r'^[a-zA-Z_\-0-9]+$', out_dir), 'bad out_dir: %s' % out_dir
# These imports happen late, within this method because they don't necessarily
# exist when the module starts running, and the ones that probably do
# are checked by CheckPrereqs.
# pylint: disable=g-import-not-at-top
from google.protobuf import text_format
from google.protobuf import descriptor
from dist import validator_pb2
import validator_gen_js
# pylint: enable=g-import-not-at-top
out = []
validator_gen_js.GenerateValidatorGeneratedJs(
specfile='%s/validator.protoascii' % out_dir,
validator_pb2=validator_pb2,
generate_proto_only=False,
generate_spec_only=True,
text_format=text_format,
html_format=None,
descriptor=descriptor,
out=out)
out.append('')
f = open('%s/validator-generated.js' % out_dir, 'w')
f.write('\n'.join(out))
f.close()
logging.info('... done')
def CompileWithClosure(js_files, definitions, entry_points, output_file):
"""Compiles the arguments with the Closure compiler for transpilation to ES5.
Args:
js_files: list of files to compile
definitions: list of definitions flags to closure compiler
entry_points: entry points (these won't be minimized)
output_file: name of the Javascript output file
"""
cmd = [
'java', '-jar', 'node_modules/google-closure-compiler/compiler.jar',
'--language_out=ES5_STRICT', '--dependency_mode=STRICT',
'--js_output_file=%s' % output_file
]
cmd += ['--entry_point=%s' % e for e in entry_points]
cmd += ['--output_manifest=%s' % ('%s.manifest' % output_file)]
cmd += [
'node_modules/google-closure-library/closure/**.js',
'!node_modules/google-closure-library/closure/**_test.js',
'node_modules/google-closure-library/third_party/closure/**.js',
'!node_modules/google-closure-library/third_party/closure/**_test.js'
]
cmd += js_files
cmd += definitions
subprocess.check_call(cmd)
def CompileValidatorMinified(out_dir):
"""Generates a minified validator script, which can be imported to validate.
Args:
out_dir: output directory
"""
logging.info('entering ...')
CompileWithClosure(
js_files=[
'engine/definitions.js', 'engine/htmlparser.js',
'engine/parse-css.js', 'engine/parse-srcset.js',
'engine/parse-url.js', 'engine/tokenize-css.js',
'%s/validator-generated.js' % out_dir,
'%s/validator-proto-generated.js' % out_dir,
'engine/validator-in-browser.js', 'engine/validator.js',
'engine/amp4ads-parse-css.js', 'engine/keyframes-parse-css.js',
'engine/htmlparser-interface.js'
],
definitions=[],
entry_points=[
'amp.validator.validateString',
'amp.validator.renderValidationResult',
'amp.validator.renderErrorMessage'
],
output_file='%s/validator_minified.js' % out_dir)
logging.info('... done')
def RunSmokeTest(out_dir):
"""Runs a smoke test (minimum valid AMP and empty html file).
Args:
out_dir: output directory
"""
logging.info('entering ...')
# Run index.js on the minimum valid amp and observe that it passes.
p = subprocess.Popen(
[
'node', 'nodejs/index.js', '--validator_js',
'%s/validator_minified.js' % out_dir,
'testdata/feature_tests/minimum_valid_amp.html', '--format=text'
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if ('testdata/feature_tests/minimum_valid_amp.html: PASS\n', '', p.returncode
) != (stdout, stderr, 0):
Die('Smoke test failed. returncode=%d stdout="%s" stderr="%s"' %
(p.returncode, stdout, stderr))
# Run index.js on an empty file and observe that it fails.
p = subprocess.Popen(
[
'node', 'nodejs/index.js', '--validator_js',
'%s/validator_minified.js' % out_dir,
'testdata/feature_tests/empty.html', '--format=text'
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode != 1:
Die('smoke test failed. Expected p.returncode==1, saw: %s' % p.returncode)
if not stderr.startswith('testdata/feature_tests/empty.html:1:0 '
'The mandatory tag \'html'):
Die('smoke test failed; stderr was: "%s"' % stderr)
logging.info('... done')
def RunIndexTest():
"""Runs the index_test.js, which tests the NodeJS API.
"""
logging.info('entering ...')
p = subprocess.Popen(
['node', './index_test.js'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd='nodejs')
(stdout, stderr) = p.communicate()
if p.returncode != 0:
Die('index_test.js failed. returncode=%d stdout="%s" stderr="%s"' %
(p.returncode, stdout, stderr))
logging.info('... done')
def CompileValidatorTestMinified(out_dir):
"""Runs closure compiler for validator_test.js.
Args:
out_dir: directory name of the output directory. Must not have slashes,
dots, etc.
"""
logging.info('entering ...')
CompileWithClosure(
js_files=[
'engine/definitions.js', 'engine/htmlparser.js',
'engine/parse-css.js', 'engine/parse-srcset.js',
'engine/parse-url.js', 'engine/tokenize-css.js',
'%s/validator-generated.js' % out_dir,
'%s/validator-proto-generated.js' % out_dir,
'engine/validator-in-browser.js', 'engine/validator.js',
'engine/amp4ads-parse-css.js', 'engine/keyframes-parse-css.js',
'engine/htmlparser-interface.js', 'engine/validator_test.js'
],
definitions=[],
entry_points=['amp.validator.ValidatorTest'],
output_file='%s/validator_test_minified.js' % out_dir)
logging.info('... success')
def CompileHtmlparserTestMinified(out_dir):
"""Runs closure compiler for htmlparser_test.js.
Args:
out_dir: directory name of the output directory. Must not have slashes,
dots, etc.
"""
logging.info('entering ...')
CompileWithClosure(
js_files=[
'engine/htmlparser.js', 'engine/htmlparser-interface.js',
'engine/htmlparser_test.js'
],
definitions=[],
entry_points=['amp.htmlparser.HtmlParserTest'],
output_file='%s/htmlparser_test_minified.js' % out_dir)
logging.info('... success')
def CompileParseCssTestMinified(out_dir):
"""Runs closure compiler for parse-css_test.js.
Args:
out_dir: directory name of the output directory. Must not have slashes,
dots, etc.
"""
logging.info('entering ...')
CompileWithClosure(
js_files=[
'engine/definitions.js', 'engine/parse-css.js', 'engine/parse-url.js',
'engine/tokenize-css.js', 'engine/css-selectors.js',
'engine/json-testutil.js', 'engine/parse-css_test.js',
'%s/validator-generated.js' % out_dir,
'%s/validator-proto-generated.js' % out_dir
],
definitions=[],
entry_points=['parse_css.ParseCssTest'],
output_file='%s/parse-css_test_minified.js' % out_dir)
logging.info('... success')
def CompileParseUrlTestMinified(out_dir):
"""Runs closure compiler for parse-url_test.js.
Args:
out_dir: directory name of the output directory. Must not have slashes,
dots, etc.
"""
logging.info('entering ...')
CompileWithClosure(
js_files=[
'engine/definitions.js', 'engine/parse-url.js', 'engine/parse-css.js',
'engine/tokenize-css.js', 'engine/css-selectors.js',
'engine/json-testutil.js', 'engine/parse-url_test.js',
'%s/validator-generated.js' % out_dir,
'%s/validator-proto-generated.js' % out_dir
],
definitions=[],
entry_points=['parse_url.ParseURLTest'],
output_file='%s/parse-url_test_minified.js' % out_dir)
logging.info('... success')
def CompileAmp4AdsParseCssTestMinified(out_dir):
"""Runs closure compiler for amp4ads-parse-css_test.js.
Args:
out_dir: directory name of the output directory. Must not have slashes,
dots, etc.
"""
logging.info('entering ...')
CompileWithClosure(
js_files=[
'engine/definitions.js', 'engine/amp4ads-parse-css_test.js',
'engine/parse-css.js', 'engine/parse-url.js',
'engine/amp4ads-parse-css.js', 'engine/tokenize-css.js',
'engine/css-selectors.js', 'engine/json-testutil.js',
'%s/validator-generated.js' % out_dir,
'%s/validator-proto-generated.js' % out_dir
],
definitions=[],
entry_points=['parse_css.Amp4AdsParseCssTest'],
output_file='%s/amp4ads-parse-css_test_minified.js' % out_dir)
logging.info('... success')
def CompileKeyframesParseCssTestMinified(out_dir):
"""Runs closure compiler for keyframes-parse-css_test.js.
Args:
out_dir: directory name of the output directory. Must not have slashes,
dots, etc.
"""
logging.info('entering ...')
CompileWithClosure(
js_files=[
'engine/definitions.js', 'engine/keyframes-parse-css_test.js',
'engine/parse-css.js', 'engine/parse-url.js',
'engine/keyframes-parse-css.js', 'engine/tokenize-css.js',
'engine/css-selectors.js', 'engine/json-testutil.js',
'%s/validator-generated.js' % out_dir,
'%s/validator-proto-generated.js' % out_dir
],
definitions=[],
entry_points=['parse_css.KeyframesParseCssTest'],
output_file='%s/keyframes-parse-css_test_minified.js' % out_dir)
logging.info('... success')
def CompileParseSrcsetTestMinified(out_dir):
"""Runs closure compiler for parse-srcset_test.js.
Args:
out_dir: directory name of the output directory. Must not have slashes,
dots, etc.
"""
logging.info('entering ...')
CompileWithClosure(
js_files=[
'engine/definitions.js', 'engine/parse-srcset.js',
'engine/json-testutil.js', 'engine/parse-srcset_test.js',
'%s/validator-generated.js' % out_dir,
'%s/validator-proto-generated.js' % out_dir
],
definitions=[],
entry_points=['parse_srcset.ParseSrcsetTest'],
output_file='%s/parse-srcset_test_minified.js' % out_dir)
logging.info('... success')
def GenerateTestRunner(out_dir):
"""Generates a test runner: a nodejs script that runs our minified tests.
Args:
out_dir: directory name of the output directory. Must not have slashes,
dots, etc.
"""
logging.info('entering ...')
f = open('%s/test_runner' % out_dir, 'w')
extensions_dir = 'extensions'
# In the Github project, the extensions are located in a sibling directory
# to the validator rather than a child directory.
if not os.path.isdir(extensions_dir):
extensions_dir = '../extensions'
f.write("""#!/usr/bin/env node
global.assert = require('assert');
global.fs = require('fs');
global.path = require('path');
var JasmineRunner = require('jasmine');
var jasmine = new JasmineRunner();
process.env.TESTDATA_ROOTS = 'testdata:%s'
require('./validator_test_minified');
require('./htmlparser_test_minified');
require('./parse-css_test_minified');
require('./parse-url_test_minified');
require('./amp4ads-parse-css_test_minified');
require('./keyframes-parse-css_test_minified');
require('./parse-srcset_test_minified');
jasmine.onComplete(function (passed) {
process.exit(passed ? 0 : 1);
});
jasmine.execute();
""" % extensions_dir)
os.chmod('%s/test_runner' % out_dir, 0750)
logging.info('... success')
def RunTests(update_tests, out_dir):
"""Runs all the minified tests.
Args:
update_tests: a boolean indicating whether or not to update the test
output files.
out_dir: directory name of the output directory. Must not have slashes,
dots, etc.
"""
logging.info('entering ...')
env = os.environ.copy()
if update_tests:
env['UPDATE_VALIDATOR_TEST'] = '1'
subprocess.check_call(['node', '%s/test_runner' % out_dir], env=env)
logging.info('... success')
def Main(parsed_args):
"""The main method, which executes all build steps and runs the tests."""
logging.basicConfig(
format='[[%(filename)s %(funcName)s]] - %(message)s',
level=(logging.ERROR if os.environ.get('TRAVIS') else logging.INFO))
EnsureNodeJsIsInstalled()
CheckPrereqs()
InstallNodeDependencies()
SetupOutDir(out_dir='dist')
GenValidatorProtoascii(out_dir='dist')
GenValidatorPb2Py(out_dir='dist')
GenValidatorProtoGeneratedJs(out_dir='dist')
GenValidatorGeneratedJs(out_dir='dist')
CompileValidatorMinified(out_dir='dist')
RunSmokeTest(out_dir='dist')
RunIndexTest()
CompileValidatorTestMinified(out_dir='dist')
CompileHtmlparserTestMinified(out_dir='dist')
CompileParseCssTestMinified(out_dir='dist')
CompileParseUrlTestMinified(out_dir='dist')
CompileAmp4AdsParseCssTestMinified(out_dir='dist')
CompileKeyframesParseCssTestMinified(out_dir='dist')
CompileParseSrcsetTestMinified(out_dir='dist')
GenerateTestRunner(out_dir='dist')
RunTests(update_tests=parsed_args.update_tests, out_dir='dist')
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Build script for the AMP Validator.')
parser.add_argument(
'--update_tests',
action='store_true',
help=('If True, validator_test will overwrite the .out test files with '
'the encountered test output.'))
Main(parser.parse_args())
| apache-2.0 |
jobscore/sync-engine | inbox/test/general/test_provider_resolution.py | 3 | 2672 | import pytest
from inbox.util.url import provider_from_address
from inbox.util.url import InvalidEmailAddressError
from inbox.auth.base import handler_from_provider
from inbox.auth.generic import GenericAuthHandler
from inbox.auth.gmail import GmailAuthHandler
from inbox.basicauth import NotSupportedError
def test_provider_resolution(mock_dns_resolver):
mock_dns_resolver._load_records('inbox', 'test/data/general_test_provider_resolution.json')
test_cases = [
('foo@example.com', 'unknown'),
('foo@noresolve.com', 'unknown'),
('foo@gmail.com', 'gmail'),
('foo@postini.com', 'gmail'),
('foo@yahoo.com', 'yahoo'),
('foo@yahoo.se', 'yahoo'),
('foo@hotmail.com', 'outlook'),
('foo@outlook.com', 'outlook'),
('foo@aol.com', 'aol'),
('foo@love.com', 'aol'),
('foo@games.com', 'aol'),
('foo@exchange.mit.edu', 'eas'),
('foo@fastmail.fm', 'fastmail'),
('foo@fastmail.net', 'fastmail'),
('foo@fastmail.com', 'fastmail'),
('foo@hover.com', 'hover'),
('foo@yahoo.com', 'yahoo'),
('foo@yandex.com', 'yandex'),
('foo@mrmail.com', 'zimbra'),
('foo@icloud.com', 'icloud'),
('foo@mac.com', 'icloud'),
('foo@gmx.com', 'gmx'),
('foo@gandi.net', 'gandi'),
('foo@debuggers.co', 'gandi'),
('foo@forumone.com', 'gmail'),
('foo@getbannerman.com', 'gmail'),
('foo@inboxapp.onmicrosoft.com', 'eas'),
('foo@espertech.onmicrosoft.com', 'eas'),
('foo@doesnotexist.nilas.com', 'unknown'),
('foo@autobizbrokers.com', 'bluehost'),
]
for email, expected_provider in test_cases:
assert provider_from_address(email, lambda: mock_dns_resolver) == expected_provider
with pytest.raises(InvalidEmailAddressError):
provider_from_address('notanemail', lambda: mock_dns_resolver)
with pytest.raises(InvalidEmailAddressError):
provider_from_address('not@anemail', lambda: mock_dns_resolver)
with pytest.raises(InvalidEmailAddressError):
provider_from_address('notanemail.com', lambda: mock_dns_resolver)
def test_auth_handler_dispatch():
assert isinstance(handler_from_provider('custom'), GenericAuthHandler)
assert isinstance(handler_from_provider('fastmail'), GenericAuthHandler)
assert isinstance(handler_from_provider('aol'), GenericAuthHandler)
assert isinstance(handler_from_provider('yahoo'), GenericAuthHandler)
assert isinstance(handler_from_provider('gmail'), GmailAuthHandler)
with pytest.raises(NotSupportedError):
handler_from_provider('NOTAREALMAILPROVIDER')
| agpl-3.0 |
Shanec132006/project | server/lib/werkzeug/contrib/jsrouting.py | 318 | 8534 | # -*- coding: utf-8 -*-
"""
werkzeug.contrib.jsrouting
~~~~~~~~~~~~~~~~~~~~~~~~~~
Addon module that allows to create a JavaScript function from a map
that generates rules.
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
try:
from simplejson import dumps
except ImportError:
try:
from json import dumps
except ImportError:
def dumps(*args):
raise RuntimeError('simplejson required for jsrouting')
from inspect import getmro
from werkzeug.routing import NumberConverter
from werkzeug._compat import iteritems
def render_template(name_parts, rules, converters):
result = u''
if name_parts:
for idx in xrange(0, len(name_parts) - 1):
name = u'.'.join(name_parts[:idx + 1])
result += u"if (typeof %s === 'undefined') %s = {}\n" % (name, name)
result += '%s = ' % '.'.join(name_parts)
result += """(function (server_name, script_name, subdomain, url_scheme) {
var converters = %(converters)s;
var rules = $rules;
function in_array(array, value) {
if (array.indexOf != undefined) {
return array.indexOf(value) != -1;
}
for (var i = 0; i < array.length; i++) {
if (array[i] == value) {
return true;
}
}
return false;
}
function array_diff(array1, array2) {
array1 = array1.slice();
for (var i = array1.length-1; i >= 0; i--) {
if (in_array(array2, array1[i])) {
array1.splice(i, 1);
}
}
return array1;
}
function split_obj(obj) {
var names = [];
var values = [];
for (var name in obj) {
if (typeof(obj[name]) != 'function') {
names.push(name);
values.push(obj[name]);
}
}
return {names: names, values: values, original: obj};
}
function suitable(rule, args) {
var default_args = split_obj(rule.defaults || {});
var diff_arg_names = array_diff(rule.arguments, default_args.names);
for (var i = 0; i < diff_arg_names.length; i++) {
if (!in_array(args.names, diff_arg_names[i])) {
return false;
}
}
if (array_diff(rule.arguments, args.names).length == 0) {
if (rule.defaults == null) {
return true;
}
for (var i = 0; i < default_args.names.length; i++) {
var key = default_args.names[i];
var value = default_args.values[i];
if (value != args.original[key]) {
return false;
}
}
}
return true;
}
function build(rule, args) {
var tmp = [];
var processed = rule.arguments.slice();
for (var i = 0; i < rule.trace.length; i++) {
var part = rule.trace[i];
if (part.is_dynamic) {
var converter = converters[rule.converters[part.data]];
var data = converter(args.original[part.data]);
if (data == null) {
return null;
}
tmp.push(data);
processed.push(part.name);
} else {
tmp.push(part.data);
}
}
tmp = tmp.join('');
var pipe = tmp.indexOf('|');
var subdomain = tmp.substring(0, pipe);
var url = tmp.substring(pipe+1);
var unprocessed = array_diff(args.names, processed);
var first_query_var = true;
for (var i = 0; i < unprocessed.length; i++) {
if (first_query_var) {
url += '?';
} else {
url += '&';
}
first_query_var = false;
url += encodeURIComponent(unprocessed[i]);
url += '=';
url += encodeURIComponent(args.original[unprocessed[i]]);
}
return {subdomain: subdomain, path: url};
}
function lstrip(s, c) {
while (s && s.substring(0, 1) == c) {
s = s.substring(1);
}
return s;
}
function rstrip(s, c) {
while (s && s.substring(s.length-1, s.length) == c) {
s = s.substring(0, s.length-1);
}
return s;
}
return function(endpoint, args, force_external) {
args = split_obj(args);
var rv = null;
for (var i = 0; i < rules.length; i++) {
var rule = rules[i];
if (rule.endpoint != endpoint) continue;
if (suitable(rule, args)) {
rv = build(rule, args);
if (rv != null) {
break;
}
}
}
if (rv == null) {
return null;
}
if (!force_external && rv.subdomain == subdomain) {
return rstrip(script_name, '/') + '/' + lstrip(rv.path, '/');
} else {
return url_scheme + '://'
+ (rv.subdomain ? rv.subdomain + '.' : '')
+ server_name + rstrip(script_name, '/')
+ '/' + lstrip(rv.path, '/');
}
};
})""" % {'converters': u', '.join(converters)}
return result
def generate_map(map, name='url_map'):
"""
Generates a JavaScript function containing the rules defined in
this map, to be used with a MapAdapter's generate_javascript
method. If you don't pass a name the returned JavaScript code is
an expression that returns a function. Otherwise it's a standalone
script that assigns the function with that name. Dotted names are
resolved (so you an use a name like 'obj.url_for')
In order to use JavaScript generation, simplejson must be installed.
Note that using this feature will expose the rules
defined in your map to users. If your rules contain sensitive
information, don't use JavaScript generation!
"""
from warnings import warn
warn(DeprecationWarning('This module is deprecated'))
map.update()
rules = []
converters = []
for rule in map.iter_rules():
trace = [{
'is_dynamic': is_dynamic,
'data': data
} for is_dynamic, data in rule._trace]
rule_converters = {}
for key, converter in iteritems(rule._converters):
js_func = js_to_url_function(converter)
try:
index = converters.index(js_func)
except ValueError:
converters.append(js_func)
index = len(converters) - 1
rule_converters[key] = index
rules.append({
u'endpoint': rule.endpoint,
u'arguments': list(rule.arguments),
u'converters': rule_converters,
u'trace': trace,
u'defaults': rule.defaults
})
return render_template(name_parts=name and name.split('.') or [],
rules=dumps(rules),
converters=converters)
def generate_adapter(adapter, name='url_for', map_name='url_map'):
"""Generates the url building function for a map."""
values = {
u'server_name': dumps(adapter.server_name),
u'script_name': dumps(adapter.script_name),
u'subdomain': dumps(adapter.subdomain),
u'url_scheme': dumps(adapter.url_scheme),
u'name': name,
u'map_name': map_name
}
return u'''\
var %(name)s = %(map_name)s(
%(server_name)s,
%(script_name)s,
%(subdomain)s,
%(url_scheme)s
);''' % values
def js_to_url_function(converter):
"""Get the JavaScript converter function from a rule."""
if hasattr(converter, 'js_to_url_function'):
data = converter.js_to_url_function()
else:
for cls in getmro(type(converter)):
if cls in js_to_url_functions:
data = js_to_url_functions[cls](converter)
break
else:
return 'encodeURIComponent'
return '(function(value) { %s })' % data
def NumberConverter_js_to_url(conv):
if conv.fixed_digits:
return u'''\
var result = value.toString();
while (result.length < %s)
result = '0' + result;
return result;''' % conv.fixed_digits
return u'return value.toString();'
js_to_url_functions = {
NumberConverter: NumberConverter_js_to_url
}
| apache-2.0 |
majek/rons | rons/parser.py | 1 | 4184 | '''
Recursive decoder for Redis protocol. The code is quite reasonable and
has no external dependencies whatsoever.
To test run:
$ python -m doctest parser.py -v
'''
import collections
import itertools
class ProtocolError(Exception): pass
EMPTY=0
BULK=1
MULTIBULK=2
_State = collections.namedtuple('State', ['s', 'l', 'r', 'state'])
def State(**kwargs):
x = {'s':None, 'l':None, 'r':None, 'state':None}
x.update(kwargs)
return _State(**x)
INITIAL_STATE=State(s=EMPTY)
def initial_state():
return INITIAL_STATE
def decode(buf, state):
if state.s is EMPTY:
line, p, rest = buf.partition('\r\n')
if not p: return ( 0, None, INITIAL_STATE )
c, t, line_len = line[0], line[1:], len(line)+2
if c not in '+-:$*':
raise ProtocolError("Unexpected Redis response %r" % (line,))
if c in ('+', '-'):
return ( line_len, (c, t), INITIAL_STATE )
elif c is ':':
return ( line_len, (':', int(t)), INITIAL_STATE )
no = int(t)
if c is '$':
if no is -1:
return ( line_len, ('$', None), INITIAL_STATE )
else:
return ( line_len, None, State(s=BULK, l=no) )
elif c is '*':
if no is -1:
return ( line_len, ('*', None), INITIAL_STATE )
else:
return ( line_len, None, State(s=MULTIBULK, l=no, r=[], state=INITIAL_STATE) )
elif state.s is BULK:
if len(buf) < state.l+2: return (0, None, state)
return ( state.l+2, ('$', buf[:state.l]), INITIAL_STATE )
elif state.s is MULTIBULK:
if state.l is 0:
return ( 0, ('*', state.r), INITIAL_STATE )
else:
(c, frame, new_s_state) = decode(buf, state.state)
state = state._replace(state=new_s_state)
if frame:
state = state._replace(r=state.r + [frame],
l=state.l - 1)
return (c, None, state)
def test_decode(buf):
r'''
>>> test_decode("$-1\r\n")
[('$', None)]
>>> test_decode("$6\r\nfoobar\r\n")
[('$', 'foobar')]
>>> test_decode("*0\r\n")
[('*', [])]
>>> test_decode("*-1\r\n")
[('*', None)]
>>> test_decode("*3\r\n$3\r\nfoo\r\n$-1\r\n$3\r\nbar\r\n")
[('*', [('$', 'foo'), ('$', None), ('$', 'bar')])]
>>> test_decode("*3\r\n$3\r\nSET\r\n$5\r\nmykey\r\n$7\r\nmyvalue\r\n")
[('*', [('$', 'SET'), ('$', 'mykey'), ('$', 'myvalue')])]
>>> test_decode("*4\r\n$3\r\nfoo\r\n$3\r\nbar\r\n$5\r\nHello\r\n$5\r\nWorld\r\n")
[('*', [('$', 'foo'), ('$', 'bar'), ('$', 'Hello'), ('$', 'World')])]
>>> # All at once
>>> test_decode("$-1\r\n$6\r\nfoobar\r\n*0\r\n*-1\r\n*3\r\n$3\r\nfoo\r\n$-1\r\n$3\r\nbar\r\n*3\r\n$3\r\nSET\r\$5\r\nmykey\r\n$7\r\nmyvalue\r\n*4\r\n$3\r\nfoo\r\n$3\r\nbar\r\n$5\r\nHello\r\n$5\r\nWorld\r\n")
[('$', None), ('$', 'foobar'), ('*', []), ('*', None), ('*', [('$', 'foo'), ('$', None), ('$', 'bar')]), ('*', [('$', 'SET'), ('$', 'mykey'), ('$', 'myvalue')]), ('*', [('$', 'foo'), ('$', 'bar'), ('$', 'Hello'), ('$', 'World')])]
>>> # Other things
>>> test_decode("r\r\n")
Traceback (most recent call last):
...
ProtocolError: Unexpected Redis response 'r'
>>> test_decode("+OK\r\n")
[('+', 'OK')]
>>> test_decode("-ERROR\r\n")
[('-', 'ERROR')]
>>> test_decode("$6\r\nfoo\r\n\r\r\n")
[('$', 'foo\r\n\r')]
'''
pos, state, results = 0, initial_state(), []
while True:
(consumed, frame, state) = decode(buf[pos:], state)
if frame:
results.append( frame )
elif not consumed:
break
pos += consumed
return results
def encode(arguments):
return ''.join(itertools.chain(
('*', str(len(arguments)), '\r\n'),
*(('$', str(len(a)), '\r\n', a, '\r\n') for a in arguments)))
def test_encode(arguments):
r'''
>>> test_encode(['SET', 'mykey', 'myvalue'])
'*3\r\n$3\r\nSET\r\n$5\r\nmykey\r\n$7\r\nmyvalue\r\n'
>>> test_encode(['SET'])
'*1\r\n$3\r\nSET\r\n'
>>> test_encode([])
'*0\r\n'
'''
return encode(arguments)
| mit |
Rewardcoin/p2ppool-SGcoin | p2pool/test/test_p2p.py | 269 | 2724 | import random
from twisted.internet import defer, endpoints, protocol, reactor
from twisted.trial import unittest
from p2pool import networks, p2p
from p2pool.bitcoin import data as bitcoin_data
from p2pool.util import deferral
class Test(unittest.TestCase):
@defer.inlineCallbacks
def test_sharereq(self):
class MyNode(p2p.Node):
def __init__(self, df):
p2p.Node.__init__(self, lambda: None, 29333, networks.nets['bitcoin'], {}, set([('127.0.0.1', 9333)]), 0, 0, 0, 0)
self.df = df
def handle_share_hashes(self, hashes, peer):
peer.get_shares(
hashes=[hashes[0]],
parents=5,
stops=[],
).chainDeferred(self.df)
df = defer.Deferred()
n = MyNode(df)
n.start()
try:
yield df
finally:
yield n.stop()
@defer.inlineCallbacks
def test_tx_limit(self):
class MyNode(p2p.Node):
def __init__(self, df):
p2p.Node.__init__(self, lambda: None, 29333, networks.nets['bitcoin'], {}, set([('127.0.0.1', 9333)]), 0, 0, 0, 0)
self.df = df
self.sent_time = 0
@defer.inlineCallbacks
def got_conn(self, conn):
p2p.Node.got_conn(self, conn)
yield deferral.sleep(.5)
new_mining_txs = dict(self.mining_txs_var.value)
for i in xrange(3):
huge_tx = dict(
version=0,
tx_ins=[],
tx_outs=[dict(
value=0,
script='x'*900000,
)],
lock_time=i,
)
new_mining_txs[bitcoin_data.hash256(bitcoin_data.tx_type.pack(huge_tx))] = huge_tx
self.mining_txs_var.set(new_mining_txs)
self.sent_time = reactor.seconds()
def lost_conn(self, conn, reason):
self.df.callback(None)
try:
p2p.Protocol.max_remembered_txs_size *= 10
df = defer.Deferred()
n = MyNode(df)
n.start()
yield df
if not (n.sent_time <= reactor.seconds() <= n.sent_time + 1):
raise ValueError('node did not disconnect within 1 seconds of receiving too much tx data')
yield n.stop()
finally:
p2p.Protocol.max_remembered_txs_size //= 10
| gpl-3.0 |
ytjiang/django | django/contrib/staticfiles/handlers.py | 581 | 2328 | from django.conf import settings
from django.contrib.staticfiles import utils
from django.contrib.staticfiles.views import serve
from django.core.handlers.wsgi import WSGIHandler, get_path_info
from django.utils.six.moves.urllib.parse import urlparse
from django.utils.six.moves.urllib.request import url2pathname
class StaticFilesHandler(WSGIHandler):
"""
WSGI middleware that intercepts calls to the static files directory, as
defined by the STATIC_URL setting, and serves those files.
"""
# May be used to differentiate between handler types (e.g. in a
# request_finished signal)
handles_files = True
def __init__(self, application):
self.application = application
self.base_url = urlparse(self.get_base_url())
super(StaticFilesHandler, self).__init__()
def get_base_url(self):
utils.check_settings()
return settings.STATIC_URL
def _should_handle(self, path):
"""
Checks if the path should be handled. Ignores the path if:
* the host is provided as part of the base_url
* the request's path isn't under the media path (or equal)
"""
return path.startswith(self.base_url[2]) and not self.base_url[1]
def file_path(self, url):
"""
Returns the relative path to the media file on disk for the given URL.
"""
relative_url = url[len(self.base_url[2]):]
return url2pathname(relative_url)
def serve(self, request):
"""
Actually serves the request path.
"""
return serve(request, self.file_path(request.path), insecure=True)
def get_response(self, request):
from django.http import Http404
if self._should_handle(request.path):
try:
return self.serve(request)
except Http404 as e:
if settings.DEBUG:
from django.views import debug
return debug.technical_404_response(request, e)
return super(StaticFilesHandler, self).get_response(request)
def __call__(self, environ, start_response):
if not self._should_handle(get_path_info(environ)):
return self.application(environ, start_response)
return super(StaticFilesHandler, self).__call__(environ, start_response)
| bsd-3-clause |
gautam1168/tardis | tardis/io/model_reader.py | 5 | 7787 | #reading different model files
import numpy as np
from numpy import recfromtxt, genfromtxt
import pandas as pd
from astropy import units as u
import logging
# Adding logging support
logger = logging.getLogger(__name__)
from tardis.util import parse_quantity
class ConfigurationError(Exception):
pass
def read_density_file(density_filename, density_filetype, time_explosion, v_inner_boundary=0.0, v_outer_boundary=np.inf):
"""
read different density file formats
Parameters
----------
density_filename: ~str
filename or path of the density file
density_filetype: ~str
type of the density file
time_explosion: ~astropy.units.Quantity
time since explosion used to scale the density
"""
file_parsers = {'artis': read_artis_density,
'simple_ascii': read_simple_ascii_density}
time_of_model, index, v_inner, v_outer, unscaled_mean_densities = file_parsers[density_filetype](density_filename)
mean_densities = calculate_density_after_time(unscaled_mean_densities, time_of_model, time_explosion)
if v_inner_boundary > v_outer_boundary:
raise ConfigurationError('v_inner_boundary > v_outer_boundary '
'({0:s} > {1:s}). unphysical!'.format(
v_inner_boundary, v_outer_boundary))
if (not np.isclose(v_inner_boundary, 0.0 * u.km / u.s,
atol=1e-8 * u.km / u.s)
and v_inner_boundary > v_inner[0]):
if v_inner_boundary > v_outer[-1]:
raise ConfigurationError('Inner boundary selected outside of model')
inner_boundary_index = v_inner.searchsorted(v_inner_boundary) - 1
else:
inner_boundary_index = None
v_inner_boundary = v_inner[0]
logger.warning("v_inner_boundary requested too small for readin file."
" Boundary shifted to match file.")
if not np.isinf(v_outer_boundary) and v_outer_boundary < v_outer[-1]:
outer_boundary_index = v_outer.searchsorted(v_outer_boundary) + 1
else:
outer_boundary_index = None
v_outer_boundary = v_outer[-1]
logger.warning("v_outer_boundary requested too large for readin file. Boundary shifted to match file.")
v_inner = v_inner[inner_boundary_index:outer_boundary_index]
v_inner[0] = v_inner_boundary
v_outer = v_outer[inner_boundary_index:outer_boundary_index]
v_outer[-1] = v_outer_boundary
mean_densities = mean_densities[inner_boundary_index:outer_boundary_index]
return v_inner, v_outer, mean_densities, inner_boundary_index, outer_boundary_index
def read_abundances_file(abundance_filename, abundance_filetype, inner_boundary_index=None, outer_boundary_index=None):
"""
read different density file formats
Parameters
----------
abundance_filename: ~str
filename or path of the density file
abundance_filetype: ~str
type of the density file
inner_boundary_index: int
index of the inner shell, default None
outer_boundary_index: int
index of the outer shell, default None
"""
file_parsers = {'simple_ascii': read_simple_ascii_abundances,
'artis': read_simple_ascii_abundances}
index, abundances = file_parsers[abundance_filetype](abundance_filename)
if outer_boundary_index is not None:
outer_boundary_index_m1 = outer_boundary_index - 1
else:
outer_boundary_index_m1 = None
index = index[inner_boundary_index:outer_boundary_index]
abundances = abundances.ix[:, slice(inner_boundary_index, outer_boundary_index_m1)]
abundances.columns = np.arange(len(abundances.columns))
return index, abundances
def read_simple_ascii_density(fname):
"""
Reading a density file of the following structure (example; lines starting with a hash will be ignored):
The first density describes the mean density in the center of the model and is not used.
5 s
#index velocity [km/s] density [g/cm^3]
0 1.1e4 1.6e8
1 1.2e4 1.7e8
Parameters
----------
fname: str
filename or path with filename
Returns
-------
time_of_model: ~astropy.units.Quantity
time at which the model is valid
data: ~pandas.DataFrame
data frame containing index, velocity (in km/s) and density
"""
with open(fname) as fh:
time_of_model_string = fh.readline().strip()
time_of_model = parse_quantity(time_of_model_string)
data = recfromtxt(fname, skip_header=1, names=('index', 'velocity', 'density'), dtype=(int, float, float))
velocity = (data['velocity'] * u.km / u.s).to('cm/s')
v_inner, v_outer = velocity[:-1], velocity[1:]
mean_density = (data['density'] * u.Unit('g/cm^3'))[1:]
return time_of_model, data['index'], v_inner, v_outer, mean_density
def read_artis_density(fname):
"""
Reading a density file of the following structure (example; lines starting with a hash will be ignored):
The first density describes the mean density in the center of the model and is not used.
5
#index velocity [km/s] log10(density) [log10(g/cm^3)]
0 1.1e4 1.6e8
1 1.2e4 1.7e8
Parameters
----------
fname: str
filename or path with filename
Returns
-------
time_of_model: ~astropy.units.Quantity
time at which the model is valid
data: ~pandas.DataFrame
data frame containing index, velocity (in km/s) and density
"""
with open(fname) as fh:
for i, line in enumerate(file(fname)):
if i == 0:
no_of_shells = np.int64(line.strip())
elif i == 1:
time_of_model = u.Quantity(float(line.strip()), 'day').to('s')
elif i == 2:
break
artis_model_columns = ['index', 'velocities', 'mean_densities_0', 'ni56_fraction', 'co56_fraction', 'fe52_fraction',
'cr48_fraction']
artis_model = recfromtxt(fname, skip_header=2, usecols=(0, 1, 2, 4, 5, 6, 7), unpack=True,
dtype=[(item, np.float64) for item in artis_model_columns])
velocity = u.Quantity(artis_model['velocities'], 'km/s').to('cm/s')
mean_density = u.Quantity(10 ** artis_model['mean_densities_0'], 'g/cm^3')
v_inner, v_outer = velocity[:-1], velocity[1:]
return time_of_model, artis_model['index'], v_inner, v_outer, mean_density
def read_simple_ascii_abundances(fname):
"""
Reading an abundance file of the following structure (example; lines starting with hash will be ignored):
The first line of abundances describe the abundances in the center of the model and are not used.
#index element1, element2, ..., element30
0 0.4 0.3, .. 0.2
Parameters
----------
fname: str
filename or path with filename
Returns
-------
index: ~np.ndarray
containing the indices
abundances: ~pandas.DataFrame
data frame containing index, element1 - element30 and columns according to the shells
"""
data = np.loadtxt(fname)
index = data[1:,0].astype(int)
abundances = pd.DataFrame(data[1:,1:].transpose(), index=np.arange(1, data.shape[1]))
return index, abundances
def calculate_density_after_time(densities, time_0, time_explosion):
"""
scale the density from an initial time of the model to the time of the explosion by ^-3
Parameters:
-----------
densities: ~astropy.units.Quantity
densities
time_0: ~astropy.units.Quantity
time of the model
time_explosion: ~astropy.units.Quantity
time to be scaled to
Returns:
--------
scaled_density
"""
return densities * (time_explosion / time_0) ** -3
| bsd-3-clause |
gauribhoite/personfinder | env/google_appengine/lib/django-1.4/django/utils/tzinfo.py | 90 | 3213 | "Implementation of tzinfo classes for use with datetime.datetime."
import time
from datetime import timedelta, tzinfo
from django.utils.encoding import smart_unicode, smart_str, DEFAULT_LOCALE_ENCODING
# Python's doc say: "A tzinfo subclass must have an __init__() method that can
# be called with no arguments". FixedOffset and LocalTimezone don't honor this
# requirement. Defining __getinitargs__ is sufficient to fix copy/deepcopy as
# well as pickling/unpickling.
class FixedOffset(tzinfo):
"Fixed offset in minutes east from UTC."
def __init__(self, offset):
if isinstance(offset, timedelta):
self.__offset = offset
offset = self.__offset.seconds // 60
else:
self.__offset = timedelta(minutes=offset)
sign = '-' if offset < 0 else '+'
self.__name = u"%s%02d%02d" % (sign, abs(offset) / 60., abs(offset) % 60)
def __repr__(self):
return self.__name
def __getinitargs__(self):
return self.__offset,
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return timedelta(0)
# This implementation is used for display purposes. It uses an approximation
# for DST computations on dates >= 2038.
# A similar implementation exists in django.utils.timezone. It's used for
# timezone support (when USE_TZ = True) and focuses on correctness.
class LocalTimezone(tzinfo):
"Proxy timezone information from time module."
def __init__(self, dt):
tzinfo.__init__(self)
self.__dt = dt
self._tzname = self.tzname(dt)
def __repr__(self):
return smart_str(self._tzname)
def __getinitargs__(self):
return self.__dt,
def utcoffset(self, dt):
if self._isdst(dt):
return timedelta(seconds=-time.altzone)
else:
return timedelta(seconds=-time.timezone)
def dst(self, dt):
if self._isdst(dt):
return timedelta(seconds=-time.altzone) - timedelta(seconds=-time.timezone)
else:
return timedelta(0)
def tzname(self, dt):
try:
return smart_unicode(time.tzname[self._isdst(dt)],
DEFAULT_LOCALE_ENCODING)
except UnicodeDecodeError:
return None
def _isdst(self, dt):
tt = (dt.year, dt.month, dt.day,
dt.hour, dt.minute, dt.second,
dt.weekday(), 0, 0)
try:
stamp = time.mktime(tt)
except (OverflowError, ValueError):
# 32 bit systems can't handle dates after Jan 2038, and certain
# systems can't handle dates before ~1901-12-01:
#
# >>> time.mktime((1900, 1, 13, 0, 0, 0, 0, 0, 0))
# OverflowError: mktime argument out of range
# >>> time.mktime((1850, 1, 13, 0, 0, 0, 0, 0, 0))
# ValueError: year out of range
#
# In this case, we fake the date, because we only care about the
# DST flag.
tt = (2037,) + tt[1:]
stamp = time.mktime(tt)
tt = time.localtime(stamp)
return tt.tm_isdst > 0
| apache-2.0 |
jameskyle/KExperiment | Scripts/create_task_list.py | 1 | 1800 | #!/opt/local/bin/python
import os
import sys
import re
import fnmatch
PROJECT_NAME = "KExperiment"
source_reg = re.compile(".*\.(cpp|h)$")
task_reg = re.compile("^\s*/+\s*(TODO|FIXME|BUG|NOTE|HACK):?\s*(.*)$", re.I)
source_match = source_reg.match
task_match = task_reg.match
def main():
output = os.path.join(os.getcwd(), "{0}.tasks".format(PROJECT_NAME))
if len(sys.argv) < 2:
sys.stderr.write("You must provide a project root path\n")
exit(1)
if len(sys.argv) > 2:
output = os.path.abspath(sys.argv[2])
root = os.path.abspath(sys.argv[1])
matches = []
types = {
"todo": "err",
"fixme": "err",
"bug": "err",
"note": "info", # currently undefined
"hack": "warn"
}
for root, dirs, files in os.walk(root):
paths = [os.path.join(root, f) for f in filter(source_match, files)]
matches.extend(paths)
tasks = []
for source in matches:
with open(source, 'r') as f:
lines = f.readlines()
for line in lines:
m = task_match(line)
if m:
base = os.path.relpath(source)
line_number = lines.index(line) + 1
t = types.get(m.group(1).lower(), "info")
desc = "{0}: {1}".format(m.group(1), m.group(2))
task = "{base}\t{line}\t{type}\t{desc}"
tasks.append(task.format(base=base, line=line_number,
type=t, desc=desc))
with open(output, 'w') as f:
f.write("\n".join(tasks))
if __name__ == "__main__":
main() | gpl-3.0 |
ToonTownInfiniteRepo/ToontownInfinite | Panda3D-1.9.0/python/Tools/pynche/StripViewer.py | 100 | 15465 | """Strip viewer and related widgets.
The classes in this file implement the StripViewer shown in the top two thirds
of the main Pynche window. It consists of three StripWidgets which display
the variations in red, green, and blue respectively of the currently selected
r/g/b color value.
Each StripWidget shows the color variations that are reachable by varying an
axis of the currently selected color. So for example, if the color is
(R,G,B)=(127,163,196)
then the Red variations show colors from (0,163,196) to (255,163,196), the
Green variations show colors from (127,0,196) to (127,255,196), and the Blue
variations show colors from (127,163,0) to (127,163,255).
The selected color is always visible in all three StripWidgets, and in fact
each StripWidget highlights the selected color, and has an arrow pointing to
the selected chip, which includes the value along that particular axis.
Clicking on any chip in any StripWidget selects that color, and updates all
arrows and other windows. By toggling on Update while dragging, Pynche will
select the color under the cursor while you drag it, but be forewarned that
this can be slow.
"""
from Tkinter import *
import ColorDB
# Load this script into the Tcl interpreter and call it in
# StripWidget.set_color(). This is about as fast as it can be with the
# current _tkinter.c interface, which doesn't support Tcl Objects.
TCLPROC = '''\
proc setcolor {canv colors} {
set i 1
foreach c $colors {
$canv itemconfigure $i -fill $c -outline $c
incr i
}
}
'''
# Tcl event types
BTNDOWN = 4
BTNUP = 5
BTNDRAG = 6
SPACE = ' '
def constant(numchips):
step = 255.0 / (numchips - 1)
start = 0.0
seq = []
while numchips > 0:
seq.append(int(start))
start = start + step
numchips = numchips - 1
return seq
# red variations, green+blue = cyan constant
def constant_red_generator(numchips, red, green, blue):
seq = constant(numchips)
return map(None, [red] * numchips, seq, seq)
# green variations, red+blue = magenta constant
def constant_green_generator(numchips, red, green, blue):
seq = constant(numchips)
return map(None, seq, [green] * numchips, seq)
# blue variations, red+green = yellow constant
def constant_blue_generator(numchips, red, green, blue):
seq = constant(numchips)
return map(None, seq, seq, [blue] * numchips)
# red variations, green+blue = cyan constant
def constant_cyan_generator(numchips, red, green, blue):
seq = constant(numchips)
return map(None, seq, [green] * numchips, [blue] * numchips)
# green variations, red+blue = magenta constant
def constant_magenta_generator(numchips, red, green, blue):
seq = constant(numchips)
return map(None, [red] * numchips, seq, [blue] * numchips)
# blue variations, red+green = yellow constant
def constant_yellow_generator(numchips, red, green, blue):
seq = constant(numchips)
return map(None, [red] * numchips, [green] * numchips, seq)
class LeftArrow:
_ARROWWIDTH = 30
_ARROWHEIGHT = 15
_YOFFSET = 13
_TEXTYOFFSET = 1
_TAG = ('leftarrow',)
def __init__(self, canvas, x):
self._canvas = canvas
self.__arrow, self.__text = self._create(x)
self.move_to(x)
def _create(self, x):
arrow = self._canvas.create_line(
x, self._ARROWHEIGHT + self._YOFFSET,
x, self._YOFFSET,
x + self._ARROWWIDTH, self._YOFFSET,
arrow='first',
width=3.0,
tags=self._TAG)
text = self._canvas.create_text(
x + self._ARROWWIDTH + 13,
self._ARROWHEIGHT - self._TEXTYOFFSET,
tags=self._TAG,
text='128')
return arrow, text
def _x(self):
coords = self._canvas.coords(self._TAG)
assert coords
return coords[0]
def move_to(self, x):
deltax = x - self._x()
self._canvas.move(self._TAG, deltax, 0)
def set_text(self, text):
self._canvas.itemconfigure(self.__text, text=text)
class RightArrow(LeftArrow):
_TAG = ('rightarrow',)
def _create(self, x):
arrow = self._canvas.create_line(
x, self._YOFFSET,
x + self._ARROWWIDTH, self._YOFFSET,
x + self._ARROWWIDTH, self._ARROWHEIGHT + self._YOFFSET,
arrow='last',
width=3.0,
tags=self._TAG)
text = self._canvas.create_text(
x - self._ARROWWIDTH + 15, # BAW: kludge
self._ARROWHEIGHT - self._TEXTYOFFSET,
justify=RIGHT,
text='128',
tags=self._TAG)
return arrow, text
def _x(self):
coords = self._canvas.coords(self._TAG)
assert coords
return coords[0] + self._ARROWWIDTH
class StripWidget:
_CHIPHEIGHT = 50
_CHIPWIDTH = 10
_NUMCHIPS = 40
def __init__(self, switchboard,
master = None,
chipwidth = _CHIPWIDTH,
chipheight = _CHIPHEIGHT,
numchips = _NUMCHIPS,
generator = None,
axis = None,
label = '',
uwdvar = None,
hexvar = None):
# instance variables
self.__generator = generator
self.__axis = axis
self.__numchips = numchips
assert self.__axis in (0, 1, 2)
self.__uwd = uwdvar
self.__hexp = hexvar
# the last chip selected
self.__lastchip = None
self.__sb = switchboard
canvaswidth = numchips * (chipwidth + 1)
canvasheight = chipheight + 43 # BAW: Kludge
# create the canvas and pack it
canvas = self.__canvas = Canvas(master,
width=canvaswidth,
height=canvasheight,
## borderwidth=2,
## relief=GROOVE
)
canvas.pack()
canvas.bind('<ButtonPress-1>', self.__select_chip)
canvas.bind('<ButtonRelease-1>', self.__select_chip)
canvas.bind('<B1-Motion>', self.__select_chip)
# Load a proc into the Tcl interpreter. This is used in the
# set_color() method to speed up setting the chip colors.
canvas.tk.eval(TCLPROC)
# create the color strip
chips = self.__chips = []
x = 1
y = 30
tags = ('chip',)
for c in range(self.__numchips):
color = 'grey'
canvas.create_rectangle(
x, y, x+chipwidth, y+chipheight,
fill=color, outline=color,
tags=tags)
x = x + chipwidth + 1 # for outline
chips.append(color)
# create the strip label
self.__label = canvas.create_text(
3, y + chipheight + 8,
text=label,
anchor=W)
# create the arrow and text item
chipx = self.__arrow_x(0)
self.__leftarrow = LeftArrow(canvas, chipx)
chipx = self.__arrow_x(len(chips) - 1)
self.__rightarrow = RightArrow(canvas, chipx)
def __arrow_x(self, chipnum):
coords = self.__canvas.coords(chipnum+1)
assert coords
x0, y0, x1, y1 = coords
return (x1 + x0) / 2.0
# Invoked when one of the chips is clicked. This should just tell the
# switchboard to set the color on all the output components
def __select_chip(self, event=None):
x = event.x
y = event.y
canvas = self.__canvas
chip = canvas.find_overlapping(x, y, x, y)
if chip and (1 <= chip[0] <= self.__numchips):
color = self.__chips[chip[0]-1]
red, green, blue = ColorDB.rrggbb_to_triplet(color)
etype = int(event.type)
if (etype == BTNUP or self.__uwd.get()):
# update everyone
self.__sb.update_views(red, green, blue)
else:
# just track the arrows
self.__trackarrow(chip[0], (red, green, blue))
def __trackarrow(self, chip, rgbtuple):
# invert the last chip
if self.__lastchip is not None:
color = self.__canvas.itemcget(self.__lastchip, 'fill')
self.__canvas.itemconfigure(self.__lastchip, outline=color)
self.__lastchip = chip
# get the arrow's text
coloraxis = rgbtuple[self.__axis]
if self.__hexp.get():
# hex
text = hex(coloraxis)
else:
# decimal
text = repr(coloraxis)
# move the arrow, and set its text
if coloraxis <= 128:
# use the left arrow
self.__leftarrow.set_text(text)
self.__leftarrow.move_to(self.__arrow_x(chip-1))
self.__rightarrow.move_to(-100)
else:
# use the right arrow
self.__rightarrow.set_text(text)
self.__rightarrow.move_to(self.__arrow_x(chip-1))
self.__leftarrow.move_to(-100)
# and set the chip's outline
brightness = ColorDB.triplet_to_brightness(rgbtuple)
if brightness <= 128:
outline = 'white'
else:
outline = 'black'
self.__canvas.itemconfigure(chip, outline=outline)
def update_yourself(self, red, green, blue):
assert self.__generator
i = 1
chip = 0
chips = self.__chips = []
tk = self.__canvas.tk
# get the red, green, and blue components for all chips
for t in self.__generator(self.__numchips, red, green, blue):
rrggbb = ColorDB.triplet_to_rrggbb(t)
chips.append(rrggbb)
tred, tgreen, tblue = t
if tred <= red and tgreen <= green and tblue <= blue:
chip = i
i = i + 1
# call the raw tcl script
colors = SPACE.join(chips)
tk.eval('setcolor %s {%s}' % (self.__canvas._w, colors))
# move the arrows around
self.__trackarrow(chip, (red, green, blue))
def set(self, label, generator):
self.__canvas.itemconfigure(self.__label, text=label)
self.__generator = generator
class StripViewer:
def __init__(self, switchboard, master=None):
self.__sb = switchboard
optiondb = switchboard.optiondb()
# create a frame inside the master.
frame = Frame(master, relief=RAISED, borderwidth=1)
frame.grid(row=1, column=0, columnspan=2, sticky='NSEW')
# create the options to be used later
uwd = self.__uwdvar = BooleanVar()
uwd.set(optiondb.get('UPWHILEDRAG', 0))
hexp = self.__hexpvar = BooleanVar()
hexp.set(optiondb.get('HEXSTRIP', 0))
# create the red, green, blue strips inside their own frame
frame1 = Frame(frame)
frame1.pack(expand=YES, fill=BOTH)
self.__reds = StripWidget(switchboard, frame1,
generator=constant_cyan_generator,
axis=0,
label='Red Variations',
uwdvar=uwd, hexvar=hexp)
self.__greens = StripWidget(switchboard, frame1,
generator=constant_magenta_generator,
axis=1,
label='Green Variations',
uwdvar=uwd, hexvar=hexp)
self.__blues = StripWidget(switchboard, frame1,
generator=constant_yellow_generator,
axis=2,
label='Blue Variations',
uwdvar=uwd, hexvar=hexp)
# create a frame to contain the controls
frame2 = Frame(frame)
frame2.pack(expand=YES, fill=BOTH)
frame2.columnconfigure(0, weight=20)
frame2.columnconfigure(2, weight=20)
padx = 8
# create the black button
blackbtn = Button(frame2,
text='Black',
command=self.__toblack)
blackbtn.grid(row=0, column=0, rowspan=2, sticky=W, padx=padx)
# create the controls
uwdbtn = Checkbutton(frame2,
text='Update while dragging',
variable=uwd)
uwdbtn.grid(row=0, column=1, sticky=W)
hexbtn = Checkbutton(frame2,
text='Hexadecimal',
variable=hexp,
command=self.__togglehex)
hexbtn.grid(row=1, column=1, sticky=W)
# XXX: ignore this feature for now; it doesn't work quite right yet
## gentypevar = self.__gentypevar = IntVar()
## self.__variations = Radiobutton(frame,
## text='Variations',
## variable=gentypevar,
## value=0,
## command=self.__togglegentype)
## self.__variations.grid(row=0, column=1, sticky=W)
## self.__constants = Radiobutton(frame,
## text='Constants',
## variable=gentypevar,
## value=1,
## command=self.__togglegentype)
## self.__constants.grid(row=1, column=1, sticky=W)
# create the white button
whitebtn = Button(frame2,
text='White',
command=self.__towhite)
whitebtn.grid(row=0, column=2, rowspan=2, sticky=E, padx=padx)
def update_yourself(self, red, green, blue):
self.__reds.update_yourself(red, green, blue)
self.__greens.update_yourself(red, green, blue)
self.__blues.update_yourself(red, green, blue)
def __togglehex(self, event=None):
red, green, blue = self.__sb.current_rgb()
self.update_yourself(red, green, blue)
## def __togglegentype(self, event=None):
## which = self.__gentypevar.get()
## if which == 0:
## self.__reds.set(label='Red Variations',
## generator=constant_cyan_generator)
## self.__greens.set(label='Green Variations',
## generator=constant_magenta_generator)
## self.__blues.set(label='Blue Variations',
## generator=constant_yellow_generator)
## elif which == 1:
## self.__reds.set(label='Red Constant',
## generator=constant_red_generator)
## self.__greens.set(label='Green Constant',
## generator=constant_green_generator)
## self.__blues.set(label='Blue Constant',
## generator=constant_blue_generator)
## else:
## assert 0
## self.__sb.update_views_current()
def __toblack(self, event=None):
self.__sb.update_views(0, 0, 0)
def __towhite(self, event=None):
self.__sb.update_views(255, 255, 255)
def save_options(self, optiondb):
optiondb['UPWHILEDRAG'] = self.__uwdvar.get()
optiondb['HEXSTRIP'] = self.__hexpvar.get()
| mit |
aspose-cells/Aspose.Cells-for-Cloud | Examples/Python/Examples/GetCellStyleFromWorksheet.py | 2 | 1286 | import asposecellscloud
from asposecellscloud.CellsApi import CellsApi
from asposecellscloud.CellsApi import ApiException
import asposestoragecloud
from asposestoragecloud.StorageApi import StorageApi
apiKey = "XXXXX" #sepcify App Key
appSid = "XXXXX" #sepcify App SID
apiServer = "http://api.aspose.com/v1.1"
data_folder = "../../data/"
#Instantiate Aspose Storage API SDK
storage_apiClient = asposestoragecloud.ApiClient.ApiClient(apiKey, appSid, True)
storageApi = StorageApi(storage_apiClient)
#Instantiate Aspose Cells API SDK
api_client = asposecellscloud.ApiClient.ApiClient(apiKey, appSid, True)
cellsApi = CellsApi(api_client);
#set input file name
filename = "Sample_Test_Book.xls"
sheetName = "Sheet1"
cellName = "a1"
#upload file to aspose cloud storage
storageApi.PutCreate(Path=filename, file=data_folder + filename)
try:
#invoke Aspose.Cells Cloud SDK API to get cell style from a worksheet
response = cellsApi.GetWorksheetCellStyle(name=filename, sheetName=sheetName, cellName=cellName)
if response.Status == "OK":
cellStyle = response.Style
print "Cell Font Name :: " + cellStyle.Font.Name
except ApiException as ex:
print "ApiException:"
print "Code:" + str(ex.code)
print "Message:" + ex.message
| mit |
ULHPC/easybuild-framework | easybuild/framework/easyconfig/format/convert.py | 3 | 4431 | # #
# Copyright 2014-2017 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
# #
"""
This module implements easyconfig specific formats and their conversions.
:author: Stijn De Weirdt (Ghent University)
"""
from easybuild.framework.easyconfig.format.version import VersionOperator, ToolchainVersionOperator
from easybuild.tools.convert import Convert, DictOfStrings, ListOfStrings
class Patch(DictOfStrings):
"""Handle single patch"""
ALLOWED_KEYS = ['level', 'dest']
KEYLESS_ENTRIES = ['filename'] # filename as first element (also filename:some_path is supported)
# explicit definition of __str__ is required for unknown reason related to the way Wrapper is defined
__str__ = DictOfStrings.__str__
def _from_string(self, txt):
"""Convert from string
# shorthand
filename;level:<int>;dest:<string> -> {'filename': filename, 'level': level, 'dest': dest}
# full dict notation
filename:filename;level:<int>;dest:<string> -> {'filename': filename, 'level': level, 'dest': dest}
"""
res = DictOfStrings._from_string(self, txt)
if 'level' in res:
res['level'] = int(res['level'])
return res
class Patches(ListOfStrings):
"""Handle patches as list of Patch"""
# explicit definition of __str__ is required for unknown reason related to the way Wrapper is defined
__str__ = ListOfStrings.__str__
def _from_string(self, txt):
"""Convert from comma-separated string"""
res = ListOfStrings._from_string(self, txt)
return [Patch(x) for x in res]
class Dependency(Convert):
"""Handle dependency"""
SEPARATOR_DEP = ';'
__wraps__ = dict
def __init__(self, obj, name=None):
"""Convert pass object to a dependency, use specified name if provided."""
super(Dependency, self).__init__(obj)
if name is not None:
self['name'] = name
def _from_string(self, txt):
"""Convert from string
versop_str;tc_versop_str -> {'versop': versop, 'tc_versop': tc_versop}
"""
res = {}
items = self._split_string(txt, sep=self.SEPARATOR_DEP)
if len(items) < 1 or len(items) > 2:
msg = 'Dependency has at least one element (a version operator string), '
msg += 'and at most 2 (2nd element the toolchain version operator string). '
msg += 'Separator %s.' % self.SEPARATOR_DEP
raise ValueError(msg)
res['versop'] = VersionOperator(items[0])
if len(items) > 1:
res['tc_versop'] = ToolchainVersionOperator(items[1])
return res
def __str__(self):
"""Return string"""
tmp = [str(self['versop'])]
if 'tc_versop' in self:
tmp.append(str(self['tc_versop']))
return self.SEPARATOR_DEP.join(tmp)
def name(self):
"""Get dependency name."""
return self.get('name', None)
def version(self):
"""Get dependency version."""
if 'versop' in self:
return self['versop'].get_version_str()
else:
return None
def versionsuffix(self):
"""Get dependency versionsuffix (if any)."""
return self['versop'].suffix
def toolchain(self):
"""Get toolchain spec for dependency (if any)."""
if 'tc_versop' in self:
return self['tc_versop'].as_dict()
else:
return None
| gpl-2.0 |
valkjsaaa/sl4a | python/src/Lib/encodings/shift_jisx0213.py | 816 | 1059 | #
# shift_jisx0213.py: Python Unicode Codec for SHIFT_JISX0213
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
#
import _codecs_jp, codecs
import _multibytecodec as mbc
codec = _codecs_jp.getcodec('shift_jisx0213')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='shift_jisx0213',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
| apache-2.0 |
stainsteelcrown/nonsense-story-generator | venv/lib/python2.7/site-packages/pip/commands/__init__.py | 476 | 2236 | """
Package containing all pip commands
"""
from pip.commands.bundle import BundleCommand
from pip.commands.completion import CompletionCommand
from pip.commands.freeze import FreezeCommand
from pip.commands.help import HelpCommand
from pip.commands.list import ListCommand
from pip.commands.search import SearchCommand
from pip.commands.show import ShowCommand
from pip.commands.install import InstallCommand
from pip.commands.uninstall import UninstallCommand
from pip.commands.unzip import UnzipCommand
from pip.commands.zip import ZipCommand
from pip.commands.wheel import WheelCommand
commands = {
BundleCommand.name: BundleCommand,
CompletionCommand.name: CompletionCommand,
FreezeCommand.name: FreezeCommand,
HelpCommand.name: HelpCommand,
SearchCommand.name: SearchCommand,
ShowCommand.name: ShowCommand,
InstallCommand.name: InstallCommand,
UninstallCommand.name: UninstallCommand,
UnzipCommand.name: UnzipCommand,
ZipCommand.name: ZipCommand,
ListCommand.name: ListCommand,
WheelCommand.name: WheelCommand,
}
commands_order = [
InstallCommand,
UninstallCommand,
FreezeCommand,
ListCommand,
ShowCommand,
SearchCommand,
WheelCommand,
ZipCommand,
UnzipCommand,
BundleCommand,
HelpCommand,
]
def get_summaries(ignore_hidden=True, ordered=True):
"""Yields sorted (command name, command summary) tuples."""
if ordered:
cmditems = _sort_commands(commands, commands_order)
else:
cmditems = commands.items()
for name, command_class in cmditems:
if ignore_hidden and command_class.hidden:
continue
yield (name, command_class.summary)
def get_similar_commands(name):
"""Command name auto-correct."""
from difflib import get_close_matches
close_commands = get_close_matches(name, commands.keys())
if close_commands:
guess = close_commands[0]
else:
guess = False
return guess
def _sort_commands(cmddict, order):
def keyfn(key):
try:
return order.index(key[1])
except ValueError:
# unordered items should come last
return 0xff
return sorted(cmddict.items(), key=keyfn)
| mit |
kagayakidan/scikit-learn | sklearn/manifold/tests/test_isomap.py | 226 | 3941 | from itertools import product
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_almost_equal
from sklearn import datasets
from sklearn import manifold
from sklearn import neighbors
from sklearn import pipeline
from sklearn import preprocessing
from sklearn.utils.testing import assert_less
eigen_solvers = ['auto', 'dense', 'arpack']
path_methods = ['auto', 'FW', 'D']
def test_isomap_simple_grid():
# Isomap should preserve distances when all neighbors are used
N_per_side = 5
Npts = N_per_side ** 2
n_neighbors = Npts - 1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(N_per_side), repeat=2)))
# distances from each point to all others
G = neighbors.kneighbors_graph(X, n_neighbors,
mode='distance').toarray()
for eigen_solver in eigen_solvers:
for path_method in path_methods:
clf = manifold.Isomap(n_neighbors=n_neighbors, n_components=2,
eigen_solver=eigen_solver,
path_method=path_method)
clf.fit(X)
G_iso = neighbors.kneighbors_graph(clf.embedding_,
n_neighbors,
mode='distance').toarray()
assert_array_almost_equal(G, G_iso)
def test_isomap_reconstruction_error():
# Same setup as in test_isomap_simple_grid, with an added dimension
N_per_side = 5
Npts = N_per_side ** 2
n_neighbors = Npts - 1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(N_per_side), repeat=2)))
# add noise in a third dimension
rng = np.random.RandomState(0)
noise = 0.1 * rng.randn(Npts, 1)
X = np.concatenate((X, noise), 1)
# compute input kernel
G = neighbors.kneighbors_graph(X, n_neighbors,
mode='distance').toarray()
centerer = preprocessing.KernelCenterer()
K = centerer.fit_transform(-0.5 * G ** 2)
for eigen_solver in eigen_solvers:
for path_method in path_methods:
clf = manifold.Isomap(n_neighbors=n_neighbors, n_components=2,
eigen_solver=eigen_solver,
path_method=path_method)
clf.fit(X)
# compute output kernel
G_iso = neighbors.kneighbors_graph(clf.embedding_,
n_neighbors,
mode='distance').toarray()
K_iso = centerer.fit_transform(-0.5 * G_iso ** 2)
# make sure error agrees
reconstruction_error = np.linalg.norm(K - K_iso) / Npts
assert_almost_equal(reconstruction_error,
clf.reconstruction_error())
def test_transform():
n_samples = 200
n_components = 10
noise_scale = 0.01
# Create S-curve dataset
X, y = datasets.samples_generator.make_s_curve(n_samples, random_state=0)
# Compute isomap embedding
iso = manifold.Isomap(n_components, 2)
X_iso = iso.fit_transform(X)
# Re-embed a noisy version of the points
rng = np.random.RandomState(0)
noise = noise_scale * rng.randn(*X.shape)
X_iso2 = iso.transform(X + noise)
# Make sure the rms error on re-embedding is comparable to noise_scale
assert_less(np.sqrt(np.mean((X_iso - X_iso2) ** 2)), 2 * noise_scale)
def test_pipeline():
# check that Isomap works fine as a transformer in a Pipeline
# only checks that no error is raised.
# TODO check that it actually does something useful
X, y = datasets.make_blobs(random_state=0)
clf = pipeline.Pipeline(
[('isomap', manifold.Isomap()),
('clf', neighbors.KNeighborsClassifier())])
clf.fit(X, y)
assert_less(.9, clf.score(X, y))
| bsd-3-clause |
fauferoth/assignment | .mywaflib/waflib/extras/color_gcc.py | 7 | 1138 | #!/usr/bin/env python
# encoding: utf-8
# Replaces the default formatter by one which understands GCC output and colorizes it.
__author__ = __maintainer__ = "Jérôme Carretero <cJ-waf@zougloub.eu>"
__copyright__ = "Jérôme Carretero, 2012"
import sys
from waflib import Logs
class ColorGCCFormatter(Logs.formatter):
def __init__(self, colors):
self.colors = colors
Logs.formatter.__init__(self)
def format(self, rec):
frame = sys._getframe()
while frame:
func = frame.f_code.co_name
if func == 'exec_command':
cmd = frame.f_locals['cmd']
if isinstance(cmd, list) and ('gcc' in cmd[0] or 'g++' in cmd[0]):
lines = []
for line in rec.msg.splitlines():
if 'warning: ' in line:
lines.append(self.colors.YELLOW + line)
elif 'error: ' in line:
lines.append(self.colors.RED + line)
elif 'note: ' in line:
lines.append(self.colors.CYAN + line)
else:
lines.append(line)
rec.msg = "\n".join(lines)
frame = frame.f_back
return Logs.formatter.format(self, rec)
def options(opt):
Logs.log.handlers[0].setFormatter(ColorGCCFormatter(Logs.colors))
| bsd-3-clause |
Juniper/neutron | neutron/common/log.py | 22 | 1332 | # Copyright (C) 2013 eNovance SAS <licensing@enovance.com>
#
# Author: Sylvain Afchain <sylvain.afchain@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Log helper functions."""
from neutron.openstack.common import log as logging
LOG = logging.getLogger(__name__)
def log(method):
"""Decorator helping to log method calls."""
def wrapper(*args, **kwargs):
instance = args[0]
data = {"class_name": (instance.__class__.__module__ + '.'
+ instance.__class__.__name__),
"method_name": method.__name__,
"args": args[1:], "kwargs": kwargs}
LOG.debug(_('%(class_name)s method %(method_name)s'
' called with arguments %(args)s %(kwargs)s'), data)
return method(*args, **kwargs)
return wrapper
| apache-2.0 |
tamnm/kodi.mp3.zing.vn | dev/plugin.video.tvzing/bs4/element.py | 82 | 49756 | import collections
import re
import sys
import warnings
from bs4.dammit import EntitySubstitution
DEFAULT_OUTPUT_ENCODING = "utf-8"
PY3K = (sys.version_info[0] > 2)
whitespace_re = re.compile("\s+")
def _alias(attr):
"""Alias one attribute name to another for backward compatibility"""
@property
def alias(self):
return getattr(self, attr)
@alias.setter
def alias(self):
return setattr(self, attr)
return alias
class NamespacedAttribute(unicode):
def __new__(cls, prefix, name, namespace=None):
if name is None:
obj = unicode.__new__(cls, prefix)
else:
obj = unicode.__new__(cls, prefix + ":" + name)
obj.prefix = prefix
obj.name = name
obj.namespace = namespace
return obj
class AttributeValueWithCharsetSubstitution(unicode):
"""A stand-in object for a character encoding specified in HTML."""
class CharsetMetaAttributeValue(AttributeValueWithCharsetSubstitution):
"""A generic stand-in for the value of a meta tag's 'charset' attribute.
When Beautiful Soup parses the markup '<meta charset="utf8">', the
value of the 'charset' attribute will be one of these objects.
"""
def __new__(cls, original_value):
obj = unicode.__new__(cls, original_value)
obj.original_value = original_value
return obj
def encode(self, encoding):
return encoding
class ContentMetaAttributeValue(AttributeValueWithCharsetSubstitution):
"""A generic stand-in for the value of a meta tag's 'content' attribute.
When Beautiful Soup parses the markup:
<meta http-equiv="content-type" content="text/html; charset=utf8">
The value of the 'content' attribute will be one of these objects.
"""
CHARSET_RE = re.compile("((^|;)\s*charset=)([^;]*)", re.M)
def __new__(cls, original_value):
match = cls.CHARSET_RE.search(original_value)
if match is None:
# No substitution necessary.
return unicode.__new__(unicode, original_value)
obj = unicode.__new__(cls, original_value)
obj.original_value = original_value
return obj
def encode(self, encoding):
def rewrite(match):
return match.group(1) + encoding
return self.CHARSET_RE.sub(rewrite, self.original_value)
class PageElement(object):
"""Contains the navigational information for some part of the page
(either a tag or a piece of text)"""
# There are five possible values for the "formatter" argument passed in
# to methods like encode() and prettify():
#
# "html" - All Unicode characters with corresponding HTML entities
# are converted to those entities on output.
# "minimal" - Bare ampersands and angle brackets are converted to
# XML entities: & < >
# None - The null formatter. Unicode characters are never
# converted to entities. This is not recommended, but it's
# faster than "minimal".
# A function - This function will be called on every string that
# needs to undergo entity substition
FORMATTERS = {
"html" : EntitySubstitution.substitute_html,
"minimal" : EntitySubstitution.substitute_xml,
None : None
}
@classmethod
def format_string(self, s, formatter='minimal'):
"""Format the given string using the given formatter."""
if not callable(formatter):
formatter = self.FORMATTERS.get(
formatter, EntitySubstitution.substitute_xml)
if formatter is None:
output = s
else:
output = formatter(s)
return output
def setup(self, parent=None, previous_element=None):
"""Sets up the initial relations between this element and
other elements."""
self.parent = parent
self.previous_element = previous_element
if previous_element is not None:
self.previous_element.next_element = self
self.next_element = None
self.previous_sibling = None
self.next_sibling = None
if self.parent is not None and self.parent.contents:
self.previous_sibling = self.parent.contents[-1]
self.previous_sibling.next_sibling = self
nextSibling = _alias("next_sibling") # BS3
previousSibling = _alias("previous_sibling") # BS3
def replace_with(self, replace_with):
if replace_with is self:
return
if replace_with is self.parent:
raise ValueError("Cannot replace a Tag with its parent.")
old_parent = self.parent
my_index = self.parent.index(self)
self.extract()
old_parent.insert(my_index, replace_with)
return self
replaceWith = replace_with # BS3
def unwrap(self):
my_parent = self.parent
my_index = self.parent.index(self)
self.extract()
for child in reversed(self.contents[:]):
my_parent.insert(my_index, child)
return self
replace_with_children = unwrap
replaceWithChildren = unwrap # BS3
def wrap(self, wrap_inside):
me = self.replace_with(wrap_inside)
wrap_inside.append(me)
return wrap_inside
def extract(self):
"""Destructively rips this element out of the tree."""
if self.parent is not None:
del self.parent.contents[self.parent.index(self)]
#Find the two elements that would be next to each other if
#this element (and any children) hadn't been parsed. Connect
#the two.
last_child = self._last_descendant()
next_element = last_child.next_element
if self.previous_element is not None:
self.previous_element.next_element = next_element
if next_element is not None:
next_element.previous_element = self.previous_element
self.previous_element = None
last_child.next_element = None
self.parent = None
if self.previous_sibling is not None:
self.previous_sibling.next_sibling = self.next_sibling
if self.next_sibling is not None:
self.next_sibling.previous_sibling = self.previous_sibling
self.previous_sibling = self.next_sibling = None
return self
def _last_descendant(self):
"Finds the last element beneath this object to be parsed."
last_child = self
while hasattr(last_child, 'contents') and last_child.contents:
last_child = last_child.contents[-1]
return last_child
# BS3: Not part of the API!
_lastRecursiveChild = _last_descendant
def insert(self, position, new_child):
if new_child is self:
raise ValueError("Cannot insert a tag into itself.")
if (isinstance(new_child, basestring)
and not isinstance(new_child, NavigableString)):
new_child = NavigableString(new_child)
position = min(position, len(self.contents))
if hasattr(new_child, 'parent') and new_child.parent is not None:
# We're 'inserting' an element that's already one
# of this object's children.
if new_child.parent is self:
current_index = self.index(new_child)
if current_index < position:
# We're moving this element further down the list
# of this object's children. That means that when
# we extract this element, our target index will
# jump down one.
position -= 1
new_child.extract()
new_child.parent = self
previous_child = None
if position == 0:
new_child.previous_sibling = None
new_child.previous_element = self
else:
previous_child = self.contents[position - 1]
new_child.previous_sibling = previous_child
new_child.previous_sibling.next_sibling = new_child
new_child.previous_element = previous_child._last_descendant()
if new_child.previous_element is not None:
new_child.previous_element.next_element = new_child
new_childs_last_element = new_child._last_descendant()
if position >= len(self.contents):
new_child.next_sibling = None
parent = self
parents_next_sibling = None
while parents_next_sibling is None and parent is not None:
parents_next_sibling = parent.next_sibling
parent = parent.parent
if parents_next_sibling is not None:
# We found the element that comes next in the document.
break
if parents_next_sibling is not None:
new_childs_last_element.next_element = parents_next_sibling
else:
# The last element of this tag is the last element in
# the document.
new_childs_last_element.next_element = None
else:
next_child = self.contents[position]
new_child.next_sibling = next_child
if new_child.next_sibling is not None:
new_child.next_sibling.previous_sibling = new_child
new_childs_last_element.next_element = next_child
if new_childs_last_element.next_element is not None:
new_childs_last_element.next_element.previous_element = new_childs_last_element
self.contents.insert(position, new_child)
def append(self, tag):
"""Appends the given tag to the contents of this tag."""
self.insert(len(self.contents), tag)
def insert_before(self, predecessor):
"""Makes the given element the immediate predecessor of this one.
The two elements will have the same parent, and the given element
will be immediately before this one.
"""
if self is predecessor:
raise ValueError("Can't insert an element before itself.")
parent = self.parent
if parent is None:
raise ValueError(
"Element has no parent, so 'before' has no meaning.")
# Extract first so that the index won't be screwed up if they
# are siblings.
if isinstance(predecessor, PageElement):
predecessor.extract()
index = parent.index(self)
parent.insert(index, predecessor)
def insert_after(self, successor):
"""Makes the given element the immediate successor of this one.
The two elements will have the same parent, and the given element
will be immediately after this one.
"""
if self is successor:
raise ValueError("Can't insert an element after itself.")
parent = self.parent
if parent is None:
raise ValueError(
"Element has no parent, so 'after' has no meaning.")
# Extract first so that the index won't be screwed up if they
# are siblings.
if isinstance(successor, PageElement):
successor.extract()
index = parent.index(self)
parent.insert(index+1, successor)
def find_next(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the first item that matches the given criteria and
appears after this Tag in the document."""
return self._find_one(self.find_all_next, name, attrs, text, **kwargs)
findNext = find_next # BS3
def find_all_next(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns all items that match the given criteria and appear
after this Tag in the document."""
return self._find_all(name, attrs, text, limit, self.next_elements,
**kwargs)
findAllNext = find_all_next # BS3
def find_next_sibling(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the closest sibling to this Tag that matches the
given criteria and appears after this Tag in the document."""
return self._find_one(self.find_next_siblings, name, attrs, text,
**kwargs)
findNextSibling = find_next_sibling # BS3
def find_next_siblings(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns the siblings of this Tag that match the given
criteria and appear after this Tag in the document."""
return self._find_all(name, attrs, text, limit,
self.next_siblings, **kwargs)
findNextSiblings = find_next_siblings # BS3
fetchNextSiblings = find_next_siblings # BS2
def find_previous(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the first item that matches the given criteria and
appears before this Tag in the document."""
return self._find_one(
self.find_all_previous, name, attrs, text, **kwargs)
findPrevious = find_previous # BS3
def find_all_previous(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns all items that match the given criteria and appear
before this Tag in the document."""
return self._find_all(name, attrs, text, limit, self.previous_elements,
**kwargs)
findAllPrevious = find_all_previous # BS3
fetchPrevious = find_all_previous # BS2
def find_previous_sibling(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the closest sibling to this Tag that matches the
given criteria and appears before this Tag in the document."""
return self._find_one(self.find_previous_siblings, name, attrs, text,
**kwargs)
findPreviousSibling = find_previous_sibling # BS3
def find_previous_siblings(self, name=None, attrs={}, text=None,
limit=None, **kwargs):
"""Returns the siblings of this Tag that match the given
criteria and appear before this Tag in the document."""
return self._find_all(name, attrs, text, limit,
self.previous_siblings, **kwargs)
findPreviousSiblings = find_previous_siblings # BS3
fetchPreviousSiblings = find_previous_siblings # BS2
def find_parent(self, name=None, attrs={}, **kwargs):
"""Returns the closest parent of this Tag that matches the given
criteria."""
# NOTE: We can't use _find_one because findParents takes a different
# set of arguments.
r = None
l = self.find_parents(name, attrs, 1)
if l:
r = l[0]
return r
findParent = find_parent # BS3
def find_parents(self, name=None, attrs={}, limit=None, **kwargs):
"""Returns the parents of this Tag that match the given
criteria."""
return self._find_all(name, attrs, None, limit, self.parents,
**kwargs)
findParents = find_parents # BS3
fetchParents = find_parents # BS2
@property
def next(self):
return self.next_element
@property
def previous(self):
return self.previous_element
#These methods do the real heavy lifting.
def _find_one(self, method, name, attrs, text, **kwargs):
r = None
l = method(name, attrs, text, 1, **kwargs)
if l:
r = l[0]
return r
def _find_all(self, name, attrs, text, limit, generator, **kwargs):
"Iterates over a generator looking for things that match."
if isinstance(name, SoupStrainer):
strainer = name
elif text is None and not limit and not attrs and not kwargs:
# Optimization to find all tags.
if name is True or name is None:
return [element for element in generator
if isinstance(element, Tag)]
# Optimization to find all tags with a given name.
elif isinstance(name, basestring):
return [element for element in generator
if isinstance(element, Tag) and element.name == name]
else:
strainer = SoupStrainer(name, attrs, text, **kwargs)
else:
# Build a SoupStrainer
strainer = SoupStrainer(name, attrs, text, **kwargs)
results = ResultSet(strainer)
while True:
try:
i = next(generator)
except StopIteration:
break
if i:
found = strainer.search(i)
if found:
results.append(found)
if limit and len(results) >= limit:
break
return results
#These generators can be used to navigate starting from both
#NavigableStrings and Tags.
@property
def next_elements(self):
i = self.next_element
while i is not None:
yield i
i = i.next_element
@property
def next_siblings(self):
i = self.next_sibling
while i is not None:
yield i
i = i.next_sibling
@property
def previous_elements(self):
i = self.previous_element
while i is not None:
yield i
i = i.previous_element
@property
def previous_siblings(self):
i = self.previous_sibling
while i is not None:
yield i
i = i.previous_sibling
@property
def parents(self):
i = self.parent
while i is not None:
yield i
i = i.parent
# Methods for supporting CSS selectors.
tag_name_re = re.compile('^[a-z0-9]+$')
# /^(\w+)\[(\w+)([=~\|\^\$\*]?)=?"?([^\]"]*)"?\]$/
# \---/ \---/\-------------/ \-------/
# | | | |
# | | | The value
# | | ~,|,^,$,* or =
# | Attribute
# Tag
attribselect_re = re.compile(
r'^(?P<tag>\w+)?\[(?P<attribute>\w+)(?P<operator>[=~\|\^\$\*]?)' +
r'=?"?(?P<value>[^\]"]*)"?\]$'
)
def _attr_value_as_string(self, value, default=None):
"""Force an attribute value into a string representation.
A multi-valued attribute will be converted into a
space-separated stirng.
"""
value = self.get(value, default)
if isinstance(value, list) or isinstance(value, tuple):
value =" ".join(value)
return value
def _attribute_checker(self, operator, attribute, value=''):
"""Create a function that performs a CSS selector operation.
Takes an operator, attribute and optional value. Returns a
function that will return True for elements that match that
combination.
"""
if operator == '=':
# string representation of `attribute` is equal to `value`
return lambda el: el._attr_value_as_string(attribute) == value
elif operator == '~':
# space-separated list representation of `attribute`
# contains `value`
def _includes_value(element):
attribute_value = element.get(attribute, [])
if not isinstance(attribute_value, list):
attribute_value = attribute_value.split()
return value in attribute_value
return _includes_value
elif operator == '^':
# string representation of `attribute` starts with `value`
return lambda el: el._attr_value_as_string(
attribute, '').startswith(value)
elif operator == '$':
# string represenation of `attribute` ends with `value`
return lambda el: el._attr_value_as_string(
attribute, '').endswith(value)
elif operator == '*':
# string representation of `attribute` contains `value`
return lambda el: value in el._attr_value_as_string(attribute, '')
elif operator == '|':
# string representation of `attribute` is either exactly
# `value` or starts with `value` and then a dash.
def _is_or_starts_with_dash(element):
attribute_value = element._attr_value_as_string(attribute, '')
return (attribute_value == value or attribute_value.startswith(
value + '-'))
return _is_or_starts_with_dash
else:
return lambda el: el.has_attr(attribute)
def select(self, selector):
"""Perform a CSS selection operation on the current element."""
tokens = selector.split()
current_context = [self]
for index, token in enumerate(tokens):
if tokens[index - 1] == '>':
# already found direct descendants in last step. skip this
# step.
continue
m = self.attribselect_re.match(token)
if m is not None:
# Attribute selector
tag, attribute, operator, value = m.groups()
if not tag:
tag = True
checker = self._attribute_checker(operator, attribute, value)
found = []
for context in current_context:
found.extend(
[el for el in context.find_all(tag) if checker(el)])
current_context = found
continue
if '#' in token:
# ID selector
tag, id = token.split('#', 1)
if tag == "":
tag = True
el = current_context[0].find(tag, {'id': id})
if el is None:
return [] # No match
current_context = [el]
continue
if '.' in token:
# Class selector
tag_name, klass = token.split('.', 1)
if not tag_name:
tag_name = True
classes = set(klass.split('.'))
found = []
def classes_match(tag):
if tag_name is not True and tag.name != tag_name:
return False
if not tag.has_attr('class'):
return False
return classes.issubset(tag['class'])
for context in current_context:
found.extend(context.find_all(classes_match))
current_context = found
continue
if token == '*':
# Star selector
found = []
for context in current_context:
found.extend(context.findAll(True))
current_context = found
continue
if token == '>':
# Child selector
tag = tokens[index + 1]
if not tag:
tag = True
found = []
for context in current_context:
found.extend(context.find_all(tag, recursive=False))
current_context = found
continue
# Here we should just have a regular tag
if not self.tag_name_re.match(token):
return []
found = []
for context in current_context:
found.extend(context.findAll(token))
current_context = found
return current_context
# Old non-property versions of the generators, for backwards
# compatibility with BS3.
def nextGenerator(self):
return self.next_elements
def nextSiblingGenerator(self):
return self.next_siblings
def previousGenerator(self):
return self.previous_elements
def previousSiblingGenerator(self):
return self.previous_siblings
def parentGenerator(self):
return self.parents
class NavigableString(unicode, PageElement):
PREFIX = ''
SUFFIX = ''
def __new__(cls, value):
"""Create a new NavigableString.
When unpickling a NavigableString, this method is called with
the string in DEFAULT_OUTPUT_ENCODING. That encoding needs to be
passed in to the superclass's __new__ or the superclass won't know
how to handle non-ASCII characters.
"""
if isinstance(value, unicode):
return unicode.__new__(cls, value)
return unicode.__new__(cls, value, DEFAULT_OUTPUT_ENCODING)
def __getnewargs__(self):
return (unicode(self),)
def __getattr__(self, attr):
"""text.string gives you text. This is for backwards
compatibility for Navigable*String, but for CData* it lets you
get the string without the CData wrapper."""
if attr == 'string':
return self
else:
raise AttributeError(
"'%s' object has no attribute '%s'" % (
self.__class__.__name__, attr))
def output_ready(self, formatter="minimal"):
output = self.format_string(self, formatter)
return self.PREFIX + output + self.SUFFIX
class PreformattedString(NavigableString):
"""A NavigableString not subject to the normal formatting rules.
The string will be passed into the formatter (to trigger side effects),
but the return value will be ignored.
"""
def output_ready(self, formatter="minimal"):
"""CData strings are passed into the formatter.
But the return value is ignored."""
self.format_string(self, formatter)
return self.PREFIX + self + self.SUFFIX
class CData(PreformattedString):
PREFIX = u'<![CDATA['
SUFFIX = u']]>'
class ProcessingInstruction(PreformattedString):
PREFIX = u'<?'
SUFFIX = u'?>'
class Comment(PreformattedString):
PREFIX = u'<!--'
SUFFIX = u'-->'
class Declaration(PreformattedString):
PREFIX = u'<!'
SUFFIX = u'!>'
class Doctype(PreformattedString):
@classmethod
def for_name_and_ids(cls, name, pub_id, system_id):
value = name
if pub_id is not None:
value += ' PUBLIC "%s"' % pub_id
if system_id is not None:
value += ' "%s"' % system_id
elif system_id is not None:
value += ' SYSTEM "%s"' % system_id
return Doctype(value)
PREFIX = u'<!DOCTYPE '
SUFFIX = u'>\n'
class Tag(PageElement):
"""Represents a found HTML tag with its attributes and contents."""
def __init__(self, parser=None, builder=None, name=None, namespace=None,
prefix=None, attrs=None, parent=None, previous=None):
"Basic constructor."
if parser is None:
self.parser_class = None
else:
# We don't actually store the parser object: that lets extracted
# chunks be garbage-collected.
self.parser_class = parser.__class__
if name is None:
raise ValueError("No value provided for new tag's name.")
self.name = name
self.namespace = namespace
self.prefix = prefix
if attrs is None:
attrs = {}
elif builder.cdata_list_attributes:
attrs = builder._replace_cdata_list_attribute_values(
self.name, attrs)
else:
attrs = dict(attrs)
self.attrs = attrs
self.contents = []
self.setup(parent, previous)
self.hidden = False
# Set up any substitutions, such as the charset in a META tag.
if builder is not None:
builder.set_up_substitutions(self)
self.can_be_empty_element = builder.can_be_empty_element(name)
else:
self.can_be_empty_element = False
parserClass = _alias("parser_class") # BS3
@property
def is_empty_element(self):
"""Is this tag an empty-element tag? (aka a self-closing tag)
A tag that has contents is never an empty-element tag.
A tag that has no contents may or may not be an empty-element
tag. It depends on the builder used to create the tag. If the
builder has a designated list of empty-element tags, then only
a tag whose name shows up in that list is considered an
empty-element tag.
If the builder has no designated list of empty-element tags,
then any tag with no contents is an empty-element tag.
"""
return len(self.contents) == 0 and self.can_be_empty_element
isSelfClosing = is_empty_element # BS3
@property
def string(self):
"""Convenience property to get the single string within this tag.
:Return: If this tag has a single string child, return value
is that string. If this tag has no children, or more than one
child, return value is None. If this tag has one child tag,
return value is the 'string' attribute of the child tag,
recursively.
"""
if len(self.contents) != 1:
return None
child = self.contents[0]
if isinstance(child, NavigableString):
return child
return child.string
@string.setter
def string(self, string):
self.clear()
self.append(string.__class__(string))
def _all_strings(self, strip=False):
"""Yield all child strings, possibly stripping them."""
for descendant in self.descendants:
if not isinstance(descendant, NavigableString):
continue
if strip:
descendant = descendant.strip()
if len(descendant) == 0:
continue
yield descendant
strings = property(_all_strings)
@property
def stripped_strings(self):
for string in self._all_strings(True):
yield string
def get_text(self, separator=u"", strip=False):
"""
Get all child strings, concatenated using the given separator.
"""
return separator.join([s for s in self._all_strings(strip)])
getText = get_text
text = property(get_text)
def decompose(self):
"""Recursively destroys the contents of this tree."""
self.extract()
i = self
while i is not None:
next = i.next_element
i.__dict__.clear()
i = next
def clear(self, decompose=False):
"""
Extract all children. If decompose is True, decompose instead.
"""
if decompose:
for element in self.contents[:]:
if isinstance(element, Tag):
element.decompose()
else:
element.extract()
else:
for element in self.contents[:]:
element.extract()
def index(self, element):
"""
Find the index of a child by identity, not value. Avoids issues with
tag.contents.index(element) getting the index of equal elements.
"""
for i, child in enumerate(self.contents):
if child is element:
return i
raise ValueError("Tag.index: element not in tag")
def get(self, key, default=None):
"""Returns the value of the 'key' attribute for the tag, or
the value given for 'default' if it doesn't have that
attribute."""
return self.attrs.get(key, default)
def has_attr(self, key):
return key in self.attrs
def __hash__(self):
return str(self).__hash__()
def __getitem__(self, key):
"""tag[key] returns the value of the 'key' attribute for the tag,
and throws an exception if it's not there."""
return self.attrs[key]
def __iter__(self):
"Iterating over a tag iterates over its contents."
return iter(self.contents)
def __len__(self):
"The length of a tag is the length of its list of contents."
return len(self.contents)
def __contains__(self, x):
return x in self.contents
def __nonzero__(self):
"A tag is non-None even if it has no contents."
return True
def __setitem__(self, key, value):
"""Setting tag[key] sets the value of the 'key' attribute for the
tag."""
self.attrs[key] = value
def __delitem__(self, key):
"Deleting tag[key] deletes all 'key' attributes for the tag."
self.attrs.pop(key, None)
def __call__(self, *args, **kwargs):
"""Calling a tag like a function is the same as calling its
find_all() method. Eg. tag('a') returns a list of all the A tags
found within this tag."""
return self.find_all(*args, **kwargs)
def __getattr__(self, tag):
#print "Getattr %s.%s" % (self.__class__, tag)
if len(tag) > 3 and tag.endswith('Tag'):
# BS3: soup.aTag -> "soup.find("a")
tag_name = tag[:-3]
warnings.warn(
'.%sTag is deprecated, use .find("%s") instead.' % (
tag_name, tag_name))
return self.find(tag_name)
# We special case contents to avoid recursion.
elif not tag.startswith("__") and not tag=="contents":
return self.find(tag)
raise AttributeError(
"'%s' object has no attribute '%s'" % (self.__class__, tag))
def __eq__(self, other):
"""Returns true iff this tag has the same name, the same attributes,
and the same contents (recursively) as the given tag."""
if self is other:
return True
if (not hasattr(other, 'name') or
not hasattr(other, 'attrs') or
not hasattr(other, 'contents') or
self.name != other.name or
self.attrs != other.attrs or
len(self) != len(other)):
return False
for i, my_child in enumerate(self.contents):
if my_child != other.contents[i]:
return False
return True
def __ne__(self, other):
"""Returns true iff this tag is not identical to the other tag,
as defined in __eq__."""
return not self == other
def __repr__(self, encoding=DEFAULT_OUTPUT_ENCODING):
"""Renders this tag as a string."""
return self.encode(encoding)
def __unicode__(self):
return self.decode()
def __str__(self):
return self.encode()
if PY3K:
__str__ = __repr__ = __unicode__
def encode(self, encoding=DEFAULT_OUTPUT_ENCODING,
indent_level=None, formatter="minimal",
errors="xmlcharrefreplace"):
# Turn the data structure into Unicode, then encode the
# Unicode.
u = self.decode(indent_level, encoding, formatter)
return u.encode(encoding, errors)
def decode(self, indent_level=None,
eventual_encoding=DEFAULT_OUTPUT_ENCODING,
formatter="minimal"):
"""Returns a Unicode representation of this tag and its contents.
:param eventual_encoding: The tag is destined to be
encoded into this encoding. This method is _not_
responsible for performing that encoding. This information
is passed in so that it can be substituted in if the
document contains a <META> tag that mentions the document's
encoding.
"""
attrs = []
if self.attrs:
for key, val in sorted(self.attrs.items()):
if val is None:
decoded = key
else:
if isinstance(val, list) or isinstance(val, tuple):
val = ' '.join(val)
elif not isinstance(val, basestring):
val = unicode(val)
elif (
isinstance(val, AttributeValueWithCharsetSubstitution)
and eventual_encoding is not None):
val = val.encode(eventual_encoding)
text = self.format_string(val, formatter)
decoded = (
unicode(key) + '='
+ EntitySubstitution.quoted_attribute_value(text))
attrs.append(decoded)
close = ''
closeTag = ''
prefix = ''
if self.prefix:
prefix = self.prefix + ":"
if self.is_empty_element:
close = '/'
else:
closeTag = '</%s%s>' % (prefix, self.name)
pretty_print = (indent_level is not None)
if pretty_print:
space = (' ' * (indent_level - 1))
indent_contents = indent_level + 1
else:
space = ''
indent_contents = None
contents = self.decode_contents(
indent_contents, eventual_encoding, formatter)
if self.hidden:
# This is the 'document root' object.
s = contents
else:
s = []
attribute_string = ''
if attrs:
attribute_string = ' ' + ' '.join(attrs)
if pretty_print:
s.append(space)
s.append('<%s%s%s%s>' % (
prefix, self.name, attribute_string, close))
if pretty_print:
s.append("\n")
s.append(contents)
if pretty_print and contents and contents[-1] != "\n":
s.append("\n")
if pretty_print and closeTag:
s.append(space)
s.append(closeTag)
if pretty_print and closeTag and self.next_sibling:
s.append("\n")
s = ''.join(s)
return s
def prettify(self, encoding=None, formatter="minimal"):
if encoding is None:
return self.decode(True, formatter=formatter)
else:
return self.encode(encoding, True, formatter=formatter)
def decode_contents(self, indent_level=None,
eventual_encoding=DEFAULT_OUTPUT_ENCODING,
formatter="minimal"):
"""Renders the contents of this tag as a Unicode string.
:param eventual_encoding: The tag is destined to be
encoded into this encoding. This method is _not_
responsible for performing that encoding. This information
is passed in so that it can be substituted in if the
document contains a <META> tag that mentions the document's
encoding.
"""
pretty_print = (indent_level is not None)
s = []
for c in self:
text = None
if isinstance(c, NavigableString):
text = c.output_ready(formatter)
elif isinstance(c, Tag):
s.append(c.decode(indent_level, eventual_encoding,
formatter))
if text and indent_level:
text = text.strip()
if text:
if pretty_print:
s.append(" " * (indent_level - 1))
s.append(text)
if pretty_print:
s.append("\n")
return ''.join(s)
def encode_contents(
self, indent_level=None, encoding=DEFAULT_OUTPUT_ENCODING,
formatter="minimal"):
"""Renders the contents of this tag as a bytestring."""
contents = self.decode_contents(indent_level, encoding, formatter)
return contents.encode(encoding)
# Old method for BS3 compatibility
def renderContents(self, encoding=DEFAULT_OUTPUT_ENCODING,
prettyPrint=False, indentLevel=0):
if not prettyPrint:
indentLevel = None
return self.encode_contents(
indent_level=indentLevel, encoding=encoding)
#Soup methods
def find(self, name=None, attrs={}, recursive=True, text=None,
**kwargs):
"""Return only the first child of this Tag matching the given
criteria."""
r = None
l = self.find_all(name, attrs, recursive, text, 1, **kwargs)
if l:
r = l[0]
return r
findChild = find
def find_all(self, name=None, attrs={}, recursive=True, text=None,
limit=None, **kwargs):
"""Extracts a list of Tag objects that match the given
criteria. You can specify the name of the Tag and any
attributes you want the Tag to have.
The value of a key-value pair in the 'attrs' map can be a
string, a list of strings, a regular expression object, or a
callable that takes a string and returns whether or not the
string matches for some custom definition of 'matches'. The
same is true of the tag name."""
generator = self.descendants
if not recursive:
generator = self.children
return self._find_all(name, attrs, text, limit, generator, **kwargs)
findAll = find_all # BS3
findChildren = find_all # BS2
#Generator methods
@property
def children(self):
# return iter() to make the purpose of the method clear
return iter(self.contents) # XXX This seems to be untested.
@property
def descendants(self):
if not len(self.contents):
return
stopNode = self._last_descendant().next_element
current = self.contents[0]
while current is not stopNode:
yield current
current = current.next_element
# Old names for backwards compatibility
def childGenerator(self):
return self.children
def recursiveChildGenerator(self):
return self.descendants
# This was kind of misleading because has_key() (attributes) was
# different from __in__ (contents). has_key() is gone in Python 3,
# anyway.
has_key = has_attr
# Next, a couple classes to represent queries and their results.
class SoupStrainer(object):
"""Encapsulates a number of ways of matching a markup element (tag or
text)."""
def __init__(self, name=None, attrs={}, text=None, **kwargs):
self.name = self._normalize_search_value(name)
if not isinstance(attrs, dict):
# Treat a non-dict value for attrs as a search for the 'class'
# attribute.
kwargs['class'] = attrs
attrs = None
if 'class_' in kwargs:
# Treat class_="foo" as a search for the 'class'
# attribute, overriding any non-dict value for attrs.
kwargs['class'] = kwargs['class_']
del kwargs['class_']
if kwargs:
if attrs:
attrs = attrs.copy()
attrs.update(kwargs)
else:
attrs = kwargs
normalized_attrs = {}
for key, value in attrs.items():
normalized_attrs[key] = self._normalize_search_value(value)
self.attrs = normalized_attrs
self.text = self._normalize_search_value(text)
def _normalize_search_value(self, value):
# Leave it alone if it's a Unicode string, a callable, a
# regular expression, a boolean, or None.
if (isinstance(value, unicode) or callable(value) or hasattr(value, 'match')
or isinstance(value, bool) or value is None):
return value
# If it's a bytestring, convert it to Unicode, treating it as UTF-8.
if isinstance(value, bytes):
return value.decode("utf8")
# If it's listlike, convert it into a list of strings.
if hasattr(value, '__iter__'):
new_value = []
for v in value:
if (hasattr(v, '__iter__') and not isinstance(v, bytes)
and not isinstance(v, unicode)):
# This is almost certainly the user's mistake. In the
# interests of avoiding infinite loops, we'll let
# it through as-is rather than doing a recursive call.
new_value.append(v)
else:
new_value.append(self._normalize_search_value(v))
return new_value
# Otherwise, convert it into a Unicode string.
# The unicode(str()) thing is so this will do the same thing on Python 2
# and Python 3.
return unicode(str(value))
def __str__(self):
if self.text:
return self.text
else:
return "%s|%s" % (self.name, self.attrs)
def search_tag(self, markup_name=None, markup_attrs={}):
found = None
markup = None
if isinstance(markup_name, Tag):
markup = markup_name
markup_attrs = markup
call_function_with_tag_data = (
isinstance(self.name, collections.Callable)
and not isinstance(markup_name, Tag))
if ((not self.name)
or call_function_with_tag_data
or (markup and self._matches(markup, self.name))
or (not markup and self._matches(markup_name, self.name))):
if call_function_with_tag_data:
match = self.name(markup_name, markup_attrs)
else:
match = True
markup_attr_map = None
for attr, match_against in list(self.attrs.items()):
if not markup_attr_map:
if hasattr(markup_attrs, 'get'):
markup_attr_map = markup_attrs
else:
markup_attr_map = {}
for k, v in markup_attrs:
markup_attr_map[k] = v
attr_value = markup_attr_map.get(attr)
if not self._matches(attr_value, match_against):
match = False
break
if match:
if markup:
found = markup
else:
found = markup_name
if found and self.text and not self._matches(found.string, self.text):
found = None
return found
searchTag = search_tag
def search(self, markup):
# print 'looking for %s in %s' % (self, markup)
found = None
# If given a list of items, scan it for a text element that
# matches.
if hasattr(markup, '__iter__') and not isinstance(markup, (Tag, basestring)):
for element in markup:
if isinstance(element, NavigableString) \
and self.search(element):
found = element
break
# If it's a Tag, make sure its name or attributes match.
# Don't bother with Tags if we're searching for text.
elif isinstance(markup, Tag):
if not self.text or self.name or self.attrs:
found = self.search_tag(markup)
# If it's text, make sure the text matches.
elif isinstance(markup, NavigableString) or \
isinstance(markup, basestring):
if not self.name and not self.attrs and self._matches(markup, self.text):
found = markup
else:
raise Exception(
"I don't know how to match against a %s" % markup.__class__)
return found
def _matches(self, markup, match_against):
# print u"Matching %s against %s" % (markup, match_against)
result = False
if isinstance(markup, list) or isinstance(markup, tuple):
# This should only happen when searching a multi-valued attribute
# like 'class'.
if (isinstance(match_against, unicode)
and ' ' in match_against):
# A bit of a special case. If they try to match "foo
# bar" on a multivalue attribute's value, only accept
# the literal value "foo bar"
#
# XXX This is going to be pretty slow because we keep
# splitting match_against. But it shouldn't come up
# too often.
return (whitespace_re.split(match_against) == markup)
else:
for item in markup:
if self._matches(item, match_against):
return True
return False
if match_against is True:
# True matches any non-None value.
return markup is not None
if isinstance(match_against, collections.Callable):
return match_against(markup)
# Custom callables take the tag as an argument, but all
# other ways of matching match the tag name as a string.
if isinstance(markup, Tag):
markup = markup.name
# Ensure that `markup` is either a Unicode string, or None.
markup = self._normalize_search_value(markup)
if markup is None:
# None matches None, False, an empty string, an empty list, and so on.
return not match_against
if isinstance(match_against, unicode):
# Exact string match
return markup == match_against
if hasattr(match_against, 'match'):
# Regexp match
return match_against.search(markup)
if hasattr(match_against, '__iter__'):
# The markup must be an exact match against something
# in the iterable.
return markup in match_against
class ResultSet(list):
"""A ResultSet is just a list that keeps track of the SoupStrainer
that created it."""
def __init__(self, source):
list.__init__([])
self.source = source
| gpl-2.0 |
energyPATHWAYS/energyPATHWAYS | energyPATHWAYS/dispatch_maintenance.py | 1 | 7610 |
from pyomo.environ import *
import numpy as np
import util
import config as cfg
import pdb
import pandas as pd
import copy
import dispatch_budget
import logging
def surplus_capacity(model):
return model.surplus_capacity + model.peak_penalty * model.weight_on_peak_penalty
def define_penalty_to_preference_high_cost_gen_maint_during_peak(model):
# if forced to choose between having high cost or low cost gen be on maintenance when load is high, we'd rather high cost gen be doing maintenance
# this should lower production cost overall and make maintenance schedules less random
return model.peak_penalty == sum([sum([model.marginal_costs[g]*model.max_load_by_group[i]*model.scheduled_maintenance[i, g] for g in model.g])
for i in model.i])
def feasible_maintenance_constraint_0(model, i, g):
return model.scheduled_maintenance[i, g] >= 0
def feasible_maintenance_constraint_1(model, i, g):
return model.scheduled_maintenance[i, g] <= 1
def define_available_gen(model, i):
return model.available_gen[i] == sum([(1 - model.scheduled_maintenance[i, g]) * model.pmax[g] for g in model.g])
def meet_maintenance_constraint(model, g):
# average maintenance across the hours == annual maintenance rate
return sum([model.scheduled_maintenance[i, g] * model.group_lengths[i] for i in model.i]) == model.annual_maintenace_hours[g]
def define_surplus_capacity(model, i):
return model.surplus_capacity >= model.available_gen[i] - model.max_load_by_group[i]
def scale_load_to_system(load, pmaxs, typical_reserve=1.15):
max_load = load.max()
sum_cap = sum(pmaxs)
if (max_load * typical_reserve) > sum_cap:
assert max_load != 0
load2 = load * (sum_cap / (max_load * typical_reserve))
return load2
else:
return load
def schedule_generator_maintenance(load, pmaxs, annual_maintenance_rates, dispatch_periods, marginal_costs, print_opt=False):
# annual maintenance rates must be between zero and one
annual_maintenance_rates = np.clip(annual_maintenance_rates, 0, 1)
# gives the index for the change between dispatch_periods
group_cuts = list(np.where(np.diff(dispatch_periods) != 0)[0] + 1) if dispatch_periods is not None else None
group_lengths = np.array([group_cuts[0]] + list(np.diff(group_cuts)) + [len(load) - group_cuts[-1]])
num_groups = len(group_cuts) + 1
# necessary to scale load in some cases for the optimization to work. Basically, load shouldn't be > gen
load_scaled = scale_load_to_system(load, pmaxs)
max_load_by_group = np.array([np.max(ls) for ls in np.array_split(load_scaled, np.array(group_cuts))])
annual_maintenace_hours = annual_maintenance_rates*len(load)
pmaxs_zero = np.nonzero(pmaxs==0)[0]
pmaxs_not_zero = np.nonzero(pmaxs)[0]
estimated_peak_penalty = sum(sum(np.outer(marginal_costs[pmaxs_not_zero],max_load_by_group).T*annual_maintenance_rates[pmaxs_not_zero]))
estimated_surplus_capacity = (pmaxs.sum() - max_load_by_group.min())*(1-annual_maintenance_rates.mean())
weight_on_peak_penalty = estimated_surplus_capacity/estimated_peak_penalty/10.
model = ConcreteModel()
# INPUT PARAMS
model.i = RangeSet(0, num_groups - 1)
model.g = RangeSet(0, len(pmaxs_not_zero) - 1)
model.annual_maintenace_hours = Param(model.g, initialize=dict(zip(model.g.keys(), annual_maintenace_hours[pmaxs_not_zero])))
model.pmax = Param(model.g, initialize=dict(zip(model.g.keys(), pmaxs[pmaxs_not_zero])))
model.marginal_costs = Param(model.g, initialize=dict(zip(model.g.keys(), marginal_costs[pmaxs_not_zero])))
model.max_load_by_group = Param(model.i, initialize=dict(zip(model.i.keys(), max_load_by_group)))
model.group_lengths = Param(model.i, initialize=dict(zip(model.i.keys(), group_lengths)))
model.weight_on_peak_penalty = Param(default=weight_on_peak_penalty)
# DECISIONS VARIABLES
model.available_gen = Var(model.i, within=NonNegativeReals)
model.scheduled_maintenance = Var(model.i, model.g, within=NonNegativeReals)
model.surplus_capacity = Var(within=NonNegativeReals)
model.peak_penalty = Var(within=NonNegativeReals)
# CONSTRAINTS
model.define_available_gen = Constraint(model.i, rule=define_available_gen)
model.feasible_maintenance_constraint_0 = Constraint(model.i, model.g, rule=feasible_maintenance_constraint_0)
model.feasible_maintenance_constraint_1 = Constraint(model.i, model.g, rule=feasible_maintenance_constraint_1)
model.meet_maintenance_constraint = Constraint(model.g, rule=meet_maintenance_constraint)
model.define_surplus_capacity = Constraint(model.i, rule=define_surplus_capacity)
model.define_penalty_to_preference_high_cost_gen_maint_during_peak = Constraint(rule=define_penalty_to_preference_high_cost_gen_maint_during_peak)
# OBJECTIVE
model.objective = Objective(rule=surplus_capacity, sense=minimize)
# SOLVE AND EXPORT RESULTS
solver = SolverFactory(cfg.solver_name or "cbc") # use cbc by default for testing, when you import config in a test, solver_name is None
results = solver.solve(model, tee=print_opt)
model.solutions.load_from(results)
scheduled_maintenance = np.empty((num_groups, len(pmaxs)))
scheduled_maintenance[:, pmaxs_zero] = annual_maintenance_rates[pmaxs_zero]
scheduled_maintenance[:, pmaxs_not_zero] = np.array([[model.scheduled_maintenance[i, g].value for i in model.i.keys()] for g in model.g.keys()]).T
return scheduled_maintenance
def schedule_generator_maintenance_loop(load, pmaxs, annual_maintenance_rates, dispatch_periods, scheduling_order):
# if nothing else, better to schedule the large generators first
scheduling_order = np.argsort(-pmaxs) if scheduling_order is None else scheduling_order
# annual maintenance rates must be between zero and one
annual_maintenance_rates = np.clip(annual_maintenance_rates, 0, 1)
# gives the index for the change between dispatch_periods
group_cuts = list(np.where(np.diff(dispatch_periods) != 0)[0] + 1) if dispatch_periods is not None else None
group_lengths = np.array([group_cuts[0]] + list(np.diff(group_cuts)) + [len(load) - group_cuts[-1]])
num_groups = len(group_cuts) + 1
# necessary to scale load in some cases for the optimization to work. Basically, load shouldn't be > gen
load_scaled = scale_load_to_system(load, pmaxs)
load_scaled = np.concatenate([[np.max(ls)]*gl for gl, ls in zip(group_lengths, np.array_split(load_scaled, np.array(group_cuts)))])
pmaxs_clipped = copy.deepcopy(pmaxs)
pmaxs_clipped = np.clip(pmaxs_clipped, 1e-1, None)
maintenance_energy = annual_maintenance_rates*pmaxs_clipped*len(load)
scheduled_maintenance = np.zeros((num_groups, len(pmaxs)))
# loop through and schedule maintenance for each generator one at a time. Update the net load after each one.
for i in scheduling_order:
energy_allocation = dispatch_budget.dispatch_to_energy_budget(load_scaled, -maintenance_energy[i], pmins=0, pmaxs=pmaxs_clipped[i])
scheduled_maintenance[:, i] = np.clip(np.array([np.mean(ls) for ls in np.array_split(energy_allocation, np.array(group_cuts))])/pmaxs_clipped[i], 0, 1)
load_scaled += np.concatenate([[sm * pmaxs[i]]*gl for gl, sm in zip(group_lengths, scheduled_maintenance[:, i])])
if not all(np.isclose(annual_maintenance_rates, (scheduled_maintenance.T * group_lengths).sum(axis=1)/len(load))):
logging.warning("scheduled maintance rates don't all match the annual maintenance rates")
return scheduled_maintenance | mit |
OmarIthawi/edx-platform | common/lib/xmodule/xmodule/open_ended_grading_classes/grading_service_module.py | 18 | 6195 | # This class gives a common interface for logging into the grading controller
import json
import logging
import requests
import dogstats_wrapper as dog_stats_api
from requests.exceptions import RequestException, ConnectionError, HTTPError
from .combined_open_ended_rubric import CombinedOpenEndedRubric, RubricParsingError
from lxml import etree
log = logging.getLogger(__name__)
class GradingServiceError(Exception):
"""
Exception for grading service. Shown when Open Response Assessment servers cannot be reached.
"""
pass
class GradingService(object):
"""
Interface to staff grading backend.
"""
def __init__(self, config):
self.username = config['username']
self.password = config['password']
self.session = requests.Session()
self.system = config['system']
def _login(self):
"""
Log into the staff grading service.
Raises requests.exceptions.HTTPError if something goes wrong.
Returns the decoded json dict of the response.
"""
response = self.session.post(self.login_url,
{'username': self.username,
'password': self.password, })
response.raise_for_status()
return response.json()
def _metric_name(self, suffix):
"""
Return a metric name for datadog, using `self.METRIC_NAME` as
a prefix, and `suffix` as the suffix.
Arguments:
suffix (str): The metric suffix to use.
"""
return '{}.{}'.format(self.METRIC_NAME, suffix)
def _record_result(self, action, data, tags=None):
"""
Log results from an API call to an ORA service to datadog.
Arguments:
action (str): The ORA action being recorded.
data (dict): The data returned from the ORA service. Should contain the key 'success'.
tags (list): A list of tags to attach to the logged metric.
"""
if tags is None:
tags = []
tags.append(u'result:{}'.format(data.get('success', False)))
tags.append(u'action:{}'.format(action))
dog_stats_api.increment(self._metric_name('request.count'), tags=tags)
def post(self, url, data, allow_redirects=False):
"""
Make a post request to the grading controller. Returns the parsed json results of that request.
"""
try:
op = lambda: self.session.post(url, data=data,
allow_redirects=allow_redirects)
response_json = self._try_with_login(op)
except (RequestException, ConnectionError, HTTPError, ValueError) as err:
# reraise as promised GradingServiceError, but preserve stacktrace.
#This is a dev_facing_error
error_string = "Problem posting data to the grading controller. URL: {0}, data: {1}".format(url, data)
log.error(error_string)
raise GradingServiceError(error_string)
return response_json
def get(self, url, params, allow_redirects=False):
"""
Make a get request to the grading controller. Returns the parsed json results of that request.
"""
op = lambda: self.session.get(url,
allow_redirects=allow_redirects,
params=params)
try:
response_json = self._try_with_login(op)
except (RequestException, ConnectionError, HTTPError, ValueError) as err:
# reraise as promised GradingServiceError, but preserve stacktrace.
#This is a dev_facing_error
error_string = "Problem getting data from the grading controller. URL: {0}, params: {1}".format(url, params)
log.error(error_string)
raise GradingServiceError(error_string)
return response_json
def _try_with_login(self, operation):
"""
Call operation(), which should return a requests response object. If
the request fails with a 'login_required' error, call _login() and try
the operation again.
Returns the result of operation(). Does not catch exceptions.
"""
response = operation()
resp_json = response.json()
if (resp_json
and resp_json.get('success') is False
and resp_json.get('error') == 'login_required'):
# apparently we aren't logged in. Try to fix that.
r = self._login()
if r and not r.get('success'):
log.warning("Couldn't log into ORA backend. Response: %s",
r)
# try again
response = operation()
response.raise_for_status()
resp_json = response.json()
return resp_json
def _render_rubric(self, response, view_only=False):
"""
Given an HTTP Response json with the key 'rubric', render out the html
required to display the rubric and put it back into the response
returns the updated response as a dictionary that can be serialized later
"""
try:
if 'rubric' in response:
rubric = response['rubric']
rubric_renderer = CombinedOpenEndedRubric(self.system, view_only)
rubric_dict = rubric_renderer.render_rubric(rubric)
success = rubric_dict['success']
rubric_html = rubric_dict['html']
response['rubric'] = rubric_html
return response
# if we can't parse the rubric into HTML,
except (etree.XMLSyntaxError, RubricParsingError):
#This is a dev_facing_error
log.exception("Cannot parse rubric string. Raw string: {0}".format(response['rubric']))
return {'success': False,
'error': 'Error displaying submission'}
except ValueError:
#This is a dev_facing_error
log.exception("Error parsing response: {0}".format(response))
return {'success': False,
'error': "Error displaying submission"}
| agpl-3.0 |
tlakshman26/cinder-bug-fix-volume-conversion-full | cinder/api/v2/volumes.py | 2 | 18272 | # Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The volumes api."""
import ast
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import uuidutils
import webob
from webob import exc
from cinder.api import common
from cinder.api.openstack import wsgi
from cinder.api.v2.views import volumes as volume_views
from cinder.api import xmlutil
from cinder import consistencygroup as consistencygroupAPI
from cinder import exception
from cinder.i18n import _, _LI
from cinder.image import glance
from cinder import utils
from cinder import volume as cinder_volume
from cinder.volume import utils as volume_utils
from cinder.volume import volume_types
query_volume_filters_opt = cfg.ListOpt('query_volume_filters',
default=['name', 'status', 'metadata',
'availability_zone'],
help="Volume filter options which "
"non-admin user could use to "
"query volumes. Default values "
"are: ['name', 'status', "
"'metadata', 'availability_zone']")
CONF = cfg.CONF
CONF.register_opt(query_volume_filters_opt)
LOG = logging.getLogger(__name__)
SCHEDULER_HINTS_NAMESPACE =\
"http://docs.openstack.org/block-service/ext/scheduler-hints/api/v2"
def make_attachment(elem):
elem.set('id')
elem.set('attachment_id')
elem.set('server_id')
elem.set('host_name')
elem.set('volume_id')
elem.set('device')
def make_volume(elem):
elem.set('id')
elem.set('status')
elem.set('size')
elem.set('availability_zone')
elem.set('created_at')
elem.set('name')
elem.set('bootable')
elem.set('description')
elem.set('volume_type')
elem.set('snapshot_id')
elem.set('source_volid')
elem.set('consistencygroup_id')
elem.set('multiattach')
attachments = xmlutil.SubTemplateElement(elem, 'attachments')
attachment = xmlutil.SubTemplateElement(attachments, 'attachment',
selector='attachments')
make_attachment(attachment)
# Attach metadata node
elem.append(common.MetadataTemplate())
volume_nsmap = {None: xmlutil.XMLNS_VOLUME_V2, 'atom': xmlutil.XMLNS_ATOM}
class VolumeTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('volume', selector='volume')
make_volume(root)
return xmlutil.MasterTemplate(root, 1, nsmap=volume_nsmap)
class VolumesTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('volumes')
elem = xmlutil.SubTemplateElement(root, 'volume', selector='volumes')
make_volume(elem)
return xmlutil.MasterTemplate(root, 1, nsmap=volume_nsmap)
class CommonDeserializer(wsgi.MetadataXMLDeserializer):
"""Common deserializer to handle xml-formatted volume requests.
Handles standard volume attributes as well as the optional metadata
attribute
"""
metadata_deserializer = common.MetadataXMLDeserializer()
def _extract_scheduler_hints(self, volume_node):
"""Marshal the scheduler hints attribute of a parsed request."""
node =\
self.find_first_child_named_in_namespace(volume_node,
SCHEDULER_HINTS_NAMESPACE,
"scheduler_hints")
if node:
scheduler_hints = {}
for child in self.extract_elements(node):
scheduler_hints.setdefault(child.nodeName, [])
value = self.extract_text(child).strip()
scheduler_hints[child.nodeName].append(value)
return scheduler_hints
else:
return None
def _extract_volume(self, node):
"""Marshal the volume attribute of a parsed request."""
volume = {}
volume_node = self.find_first_child_named(node, 'volume')
attributes = ['name', 'description', 'size',
'volume_type', 'availability_zone', 'imageRef',
'image_id', 'snapshot_id', 'source_volid',
'consistencygroup_id']
for attr in attributes:
if volume_node.getAttribute(attr):
volume[attr] = volume_node.getAttribute(attr)
metadata_node = self.find_first_child_named(volume_node, 'metadata')
if metadata_node is not None:
volume['metadata'] = self.extract_metadata(metadata_node)
scheduler_hints = self._extract_scheduler_hints(volume_node)
if scheduler_hints:
volume['scheduler_hints'] = scheduler_hints
return volume
class CreateDeserializer(CommonDeserializer):
"""Deserializer to handle xml-formatted create volume requests.
Handles standard volume attributes as well as the optional metadata
attribute
"""
def default(self, string):
"""Deserialize an xml-formatted volume create request."""
dom = utils.safe_minidom_parse_string(string)
volume = self._extract_volume(dom)
return {'body': {'volume': volume}}
class VolumeController(wsgi.Controller):
"""The Volumes API controller for the OpenStack API."""
_view_builder_class = volume_views.ViewBuilder
def __init__(self, ext_mgr):
self.volume_api = cinder_volume.API()
self.consistencygroup_api = consistencygroupAPI.API()
self.ext_mgr = ext_mgr
super(VolumeController, self).__init__()
@wsgi.serializers(xml=VolumeTemplate)
def show(self, req, id):
"""Return data about the given volume."""
context = req.environ['cinder.context']
try:
vol = self.volume_api.get(context, id, viewable_admin_meta=True)
req.cache_db_volume(vol)
except exception.VolumeNotFound as error:
raise exc.HTTPNotFound(explanation=error.msg)
utils.add_visible_admin_metadata(vol)
return self._view_builder.detail(req, vol)
def delete(self, req, id):
"""Delete a volume."""
context = req.environ['cinder.context']
LOG.info(_LI("Delete volume with id: %s"), id, context=context)
try:
volume = self.volume_api.get(context, id)
self.volume_api.delete(context, volume)
except exception.VolumeNotFound as error:
raise exc.HTTPNotFound(explanation=error.msg)
except exception.VolumeAttached:
msg = _("Volume cannot be deleted while in attached state")
raise exc.HTTPBadRequest(explanation=msg)
return webob.Response(status_int=202)
@wsgi.serializers(xml=VolumesTemplate)
def index(self, req):
"""Returns a summary list of volumes."""
return self._get_volumes(req, is_detail=False)
@wsgi.serializers(xml=VolumesTemplate)
def detail(self, req):
"""Returns a detailed list of volumes."""
return self._get_volumes(req, is_detail=True)
def _get_volumes(self, req, is_detail):
"""Returns a list of volumes, transformed through view builder."""
context = req.environ['cinder.context']
params = req.params.copy()
marker, limit, offset = common.get_pagination_params(params)
sort_keys, sort_dirs = common.get_sort_params(params)
filters = params
utils.remove_invalid_filter_options(context,
filters,
self._get_volume_filter_options())
# NOTE(thingee): v2 API allows name instead of display_name
if 'name' in sort_keys:
sort_keys[sort_keys.index('name')] = 'display_name'
if 'name' in filters:
filters['display_name'] = filters['name']
del filters['name']
for k, v in filters.items():
try:
filters[k] = ast.literal_eval(v)
except (ValueError, SyntaxError):
LOG.debug('Could not evaluate value %s, assuming string', v)
volumes = self.volume_api.get_all(context, marker, limit,
sort_keys=sort_keys,
sort_dirs=sort_dirs,
filters=filters,
viewable_admin_meta=True,
offset=offset)
for volume in volumes:
utils.add_visible_admin_metadata(volume)
req.cache_db_volumes(volumes.objects)
if is_detail:
volumes = self._view_builder.detail_list(req, volumes)
else:
volumes = self._view_builder.summary_list(req, volumes)
return volumes
def _image_uuid_from_ref(self, image_ref, context):
# If the image ref was generated by nova api, strip image_ref
# down to an id.
image_uuid = None
try:
image_uuid = image_ref.split('/').pop()
except AttributeError:
msg = _("Invalid imageRef provided.")
raise exc.HTTPBadRequest(explanation=msg)
image_service = glance.get_default_image_service()
# First see if this is an actual image ID
if uuidutils.is_uuid_like(image_uuid):
try:
image = image_service.show(context, image_uuid)
if 'id' in image:
return image['id']
except Exception:
# Pass and see if there is a matching image name
pass
# Could not find by ID, check if it is an image name
try:
params = {'filters': {'name': image_ref}}
images = list(image_service.detail(context, **params))
if len(images) > 1:
msg = _("Multiple matches found for '%s', use an ID to be more"
" specific.") % image_ref
raise exc.HTTPConflict(msg)
for img in images:
return img['id']
except Exception:
# Pass and let default not found error handling take care of it
pass
msg = _("Invalid image identifier or unable to "
"access requested image.")
raise exc.HTTPBadRequest(explanation=msg)
@wsgi.response(202)
@wsgi.serializers(xml=VolumeTemplate)
@wsgi.deserializers(xml=CreateDeserializer)
def create(self, req, body):
"""Creates a new volume."""
self.assert_valid_body(body, 'volume')
LOG.debug('Create volume request body: %s', body)
context = req.environ['cinder.context']
volume = body['volume']
kwargs = {}
self.validate_name_and_description(volume)
# NOTE(thingee): v2 API allows name instead of display_name
if 'name' in volume:
volume['display_name'] = volume.pop('name')
# NOTE(thingee): v2 API allows description instead of
# display_description
if 'description' in volume:
volume['display_description'] = volume.pop('description')
if 'image_id' in volume:
volume['imageRef'] = volume.get('image_id')
del volume['image_id']
req_volume_type = volume.get('volume_type', None)
if req_volume_type:
try:
if not uuidutils.is_uuid_like(req_volume_type):
kwargs['volume_type'] = \
volume_types.get_volume_type_by_name(
context, req_volume_type)
else:
kwargs['volume_type'] = volume_types.get_volume_type(
context, req_volume_type)
except exception.VolumeTypeNotFound as error:
raise exc.HTTPNotFound(explanation=error.msg)
kwargs['metadata'] = volume.get('metadata', None)
snapshot_id = volume.get('snapshot_id')
if snapshot_id is not None:
try:
kwargs['snapshot'] = self.volume_api.get_snapshot(context,
snapshot_id)
except exception.SnapshotNotFound as error:
raise exc.HTTPNotFound(explanation=error.msg)
else:
kwargs['snapshot'] = None
source_volid = volume.get('source_volid')
if source_volid is not None:
try:
kwargs['source_volume'] = \
self.volume_api.get_volume(context,
source_volid)
except exception.VolumeNotFound as error:
raise exc.HTTPNotFound(explanation=error.msg)
else:
kwargs['source_volume'] = None
source_replica = volume.get('source_replica')
if source_replica is not None:
try:
src_vol = self.volume_api.get_volume(context,
source_replica)
if src_vol['replication_status'] == 'disabled':
explanation = _('source volume id:%s is not'
' replicated') % source_replica
raise exc.HTTPBadRequest(explanation=explanation)
kwargs['source_replica'] = src_vol
except exception.VolumeNotFound as error:
raise exc.HTTPNotFound(explanation=error.msg)
else:
kwargs['source_replica'] = None
consistencygroup_id = volume.get('consistencygroup_id')
if consistencygroup_id is not None:
try:
kwargs['consistencygroup'] = \
self.consistencygroup_api.get(context,
consistencygroup_id)
except exception.ConsistencyGroupNotFound as error:
raise exc.HTTPNotFound(explanation=error.msg)
else:
kwargs['consistencygroup'] = None
size = volume.get('size', None)
if size is None and kwargs['snapshot'] is not None:
size = kwargs['snapshot']['volume_size']
elif size is None and kwargs['source_volume'] is not None:
size = kwargs['source_volume']['size']
elif size is None and kwargs['source_replica'] is not None:
size = kwargs['source_replica']['size']
LOG.info(_LI("Create volume of %s GB"), size, context=context)
if self.ext_mgr.is_loaded('os-image-create'):
image_ref = volume.get('imageRef')
if image_ref is not None:
image_uuid = self._image_uuid_from_ref(image_ref, context)
kwargs['image_id'] = image_uuid
kwargs['availability_zone'] = volume.get('availability_zone', None)
kwargs['scheduler_hints'] = volume.get('scheduler_hints', None)
multiattach = volume.get('multiattach', False)
kwargs['multiattach'] = multiattach
new_volume = self.volume_api.create(context,
size,
volume.get('display_name'),
volume.get('display_description'),
**kwargs)
retval = self._view_builder.detail(req, new_volume)
return retval
def _get_volume_filter_options(self):
"""Return volume search options allowed by non-admin."""
return CONF.query_volume_filters
@wsgi.serializers(xml=VolumeTemplate)
def update(self, req, id, body):
"""Update a volume."""
context = req.environ['cinder.context']
if not body:
msg = _("Missing request body")
raise exc.HTTPBadRequest(explanation=msg)
if 'volume' not in body:
msg = _("Missing required element '%s' in request body") % 'volume'
raise exc.HTTPBadRequest(explanation=msg)
volume = body['volume']
update_dict = {}
valid_update_keys = (
'name',
'description',
'display_name',
'display_description',
'metadata',
)
for key in valid_update_keys:
if key in volume:
update_dict[key] = volume[key]
self.validate_name_and_description(update_dict)
# NOTE(thingee): v2 API allows name instead of display_name
if 'name' in update_dict:
update_dict['display_name'] = update_dict.pop('name')
# NOTE(thingee): v2 API allows description instead of
# display_description
if 'description' in update_dict:
update_dict['display_description'] = update_dict.pop('description')
try:
volume = self.volume_api.get(context, id, viewable_admin_meta=True)
volume_utils.notify_about_volume_usage(context, volume,
'update.start')
self.volume_api.update(context, volume, update_dict)
except exception.VolumeNotFound as error:
raise exc.HTTPNotFound(explanation=error.msg)
volume.update(update_dict)
utils.add_visible_admin_metadata(volume)
volume_utils.notify_about_volume_usage(context, volume,
'update.end')
return self._view_builder.detail(req, volume)
def create_resource(ext_mgr):
return wsgi.Resource(VolumeController(ext_mgr))
| apache-2.0 |
alisonken1/openlp-projector-2.0 | openlp/projectors/projectormanager.py | 1 | 8836 | # -*- coding: utf-8 -*-
# vim: autoindent shiftwidth=4 expandtab textwidth=80 tabstop=4 softtabstop=4
###############################################################################
# OpenLP - Open Source Lyrics Projection #
# --------------------------------------------------------------------------- #
# Copyright (c) 2008-2014 Raoul Snyman #
# Portions copyright (c) 2008-2014 Tim Bentley, Gerald Britton, Jonathan #
# Corwin, Samuel Findlay, Michael Gorven, Scott Guerrieri, Matthias Hub, #
# Meinert Jordan, Armin Köhler, Erik Lundin, Edwin Lunando, Brian T. Meyer. #
# Joshua Miller, Stevan Pettit, Andreas Preikschat, Mattias Põldaru, #
# Christian Richter, Philip Ridout, Simon Scudder, Jeffrey Smith, #
# Maikel Stuivenberg, Martin Thompson, Jon Tibble, Dave Warnock, #
# Frode Woldsund, Martin Zibricky, Ken Roberts #
# --------------------------------------------------------------------------- #
# This program is free software; you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation; version 2 of the License. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# You should have received a copy of the GNU General Public License along #
# with this program; if not, write to the Free Software Foundation, Inc., 59 #
# Temple Place, Suite 330, Boston, MA 02111-1307 USA #
###############################################################################
__version__ = '0.0.2'
__v = __version__.split('.')
__version_hex__ = int(__v[0]) << 24 | \
int(__v[1]) << 16 | \
int(__v[2]) << 8
__module = 'projectors'
import logging
log = logging.getLogger(__name__)
import os
from PyQt4 import QtCore, QtGui
from openlp.core.lib import OpenLPToolbar, Receiver, SettingsManager
from openlp.core.lib import build_icon, check_item_selected, check_directory_exists, translate
from openlp.core.lib.db import Manager
from openlp.core.lib.settings import Settings
from openlp.core.lib.ui import UiStrings, critical_error_message_box, create_widget_action
from openlp.core.utils import AppLocation
from openlp.core.projectors import ProjectorForm
from openlp.core.projectors.db import init_schema, Projector
class ProjectorManager(QtGui.QWidget):
"""
Manages the projetor connections window
"""
log.info('ProjectorManager loaded')
def __init__(self, mainwindow, parent=None):
super(ProjectorManager, self).__init__(parent)
self.mainwindow = mainwindow
self.settingsSection = u'projectors'
self.manager = Manager(plugin_name=u'projectors', init_schema=init_schema)
self.projectorForm = ProjectorForm(self)
# Layout section
self.layout = QtGui.QVBoxLayout(self)
self.layout.setSpacing(0)
self.layout.setMargin(0)
self.layout.setObjectName(u'layout')
self.toolbar = OpenLPToolbar(self)
self.toolbar.setObjectName(u'toolbar')
self.toolbar.addToolbarAction(u'newProjector',
text=UiStrings().NewTheme,
icon=u':/general/general_new.png',
tooltip=translate('OpenLP.Projector', 'Add a new projector.'),
triggers=self.onAddProjector)
self.toolbar.addToolbarAction(u'editProjector',
text=translate('OpenLP.Projector', 'Edit projector'),
icon=u':/general/general_edit.png',
tooltip=translate('OpenLP.Projector', 'Edit a projector.'),
triggers=self.onEditProjector)
self.deleteToolbarAction = self.toolbar.addToolbarAction(u'deleteProjector',
text=translate('OpenLP.ThemeManager', 'Delete selected projector'),
icon=u':/general/general_delete.png',
tooltip=translate('OpenLP.ThemeManager', 'Delete selected projector.'),
triggers=self.onDeleteProjector)
self.layout.addWidget(self.toolbar)
# Projector manager list
self.projectorWidget = QtGui.QWidgetAction(self.toolbar)
self.projectorWidget.setObjectName(u'projectorWidget')
self.projectorListWidget = QtGui.QListWidget(self)
self.projectorListWidget.setIconSize(QtCore.QSize(75, 50))
#self.projectorListWidget.setContextMenuPolicy(QtCore.Qt.CustomContextmenu)
self.projectorListWidget.setObjectName(u'projectorListWidget')
self.layout.addWidget(self.projectorListWidget)
QtCore.QObject.connect(self.projectorListWidget,
QtCore.SIGNAL('projectorContextMenu(QPoint)'),
self.contextMenu)
# build the context menu
self.menu = QtGui.QMenu()
self.editAction = create_widget_action(self.menu,
text=translate('OpenLP.ProjectorManager', '&Edit Projector'),
icon=u':/general/general_edit.png', triggers=self.onEditProjector)
self.deleteAction = create_widget_action(self.menu,
text=translate('OpenLP.ProjectorManager', '&Delete Projector'),
icon=u':/general/general_delete.png', triggers=self.onDeleteProjector)
# Signals
QtCore.QObject.connect(self.projectorListWidget, QtCore.SIGNAL(
u'currentItemChanged(QListWidgetItem *, QListWidgetItem *)'),
self.checkListState)
QtCore.QObject.connect(Receiver.get_receiver(),
QtCore.SIGNAL(u'config_updated'), self.configUpdated)
# Variables
self.projectorList = []
self.path = AppLocation.get_section_data_path(self.settingsSection)
log.debug('Setting data path location to %s' % self.path)
self.configUpdated()
def contextMenu(self, point):
"""
Build the Right Click Context menu and set state depending on
the type of theme.
"""
log.debug(u'contextMenu(point=%s)' % point)
item = self.projectionListWidget.itemAt(point)
if item is None:
return
real_projector_name = unicode(item.data(QtCore.Qt.UserRole).toString())
theme_name = unicode(item.text())
visible = real_projector_name == projector_name
self.deleteAction.setVisible(visible)
self.globalAction.setVisible(visible)
self.menu.exec_(self.themeListWidget.mapToGlobal(point))
def configUpdated(self):
# Configuration updated - see if we are enabled or disabled
enabled = Settings().value(
self.settingsSection + u'/enabled',
QtCore.QVariant(True)).toBool()
e = 'Enabling' if enabled else 'Disabling'
log.debug(u'configUpdated() - %s projector controls' % e)
self.setVisible(enabled)
if len(self.projectorList) >= 1:
# Call each projector instance and either stop or start
for p in self.projectorList:
e = 'Starting' if enabled else 'Stopping'
log.debug('%s projector %s' % (e, 'testing'))
if e:
p.start()
else:
p.stop()
def checkListState(self, item):
log.debug(u'checkListState()')
if item is None:
return
def contextMenu(self, point):
log.debug(u'contextMenu()')
"""
Build the right-click context menu.
"""
item = self.projectorListWidget.itemAt(point)
if item is None:
return
self.deleteAction.setVisible(visible)
self.editAction.setVisible(visible)
self.menu.exec_(self.projectorListWidget.mapToGlobal(point))
def onAddProjector(self):
log.debug(u'onAddProjector()')
self.projectorForm.exec_()
def onEditProjector(self):
log.debug(u'onEditProjector()')
if check_item_selected(self.projectorListWidget, translate(
'OpenLP.ProjectorManager', 'You must select a projector to edit.')):
# Change this to index
item = self.projectorListWidget.currentRow()
self.projectorForm.exec_(projector=projectorList[item])
def onDeleteProjector(self):
log.debug(u'onDeleteProjector()')
# Delete projetor from db
return
def loadProjectors(self):
log.debug(u'loadProjectors()')
return | gpl-2.0 |
akhmadMizkat/odoo | addons/mail/models/res_users.py | 3 | 6450 | # -*- coding: utf-8 -*-
from openerp import _, api, fields, models
import openerp
class Users(models.Model):
""" Update of res.users class
- add a preference about sending emails about notifications
- make a new user follow itself
- add a welcome message
- add suggestion preference
- if adding groups to an user, check mail.channels linked to this user
group, and the user. This is done by overriding the write method.
"""
_name = 'res.users'
_inherit = ['mail.alias.mixin', 'res.users']
alias_id = fields.Many2one('mail.alias', 'Alias', ondelete="restrict", required=True,
help="Email address internally associated with this user. Incoming "\
"emails will appear in the user's notifications.", copy=False, auto_join=True)
def __init__(self, pool, cr):
""" Override of __init__ to add access rights on notification_email_send
and alias fields. Access rights are disabled by default, but allowed
on some specific fields defined in self.SELF_{READ/WRITE}ABLE_FIELDS.
"""
init_res = super(Users, self).__init__(pool, cr)
# duplicate list to avoid modifying the original reference
self.SELF_WRITEABLE_FIELDS = list(self.SELF_WRITEABLE_FIELDS)
self.SELF_WRITEABLE_FIELDS.extend(['notify_email'])
# duplicate list to avoid modifying the original reference
self.SELF_READABLE_FIELDS = list(self.SELF_READABLE_FIELDS)
self.SELF_READABLE_FIELDS.extend(['notify_email', 'alias_domain', 'alias_name'])
return init_res
def get_alias_model_name(self, vals):
return self._name
def get_alias_values(self):
values = super(Users, self).get_alias_values()
values['alias_force_thread_id'] = self.id
return values
@api.model
def create(self, values):
if not values.get('login', False):
action = self.env.ref('base.action_res_users')
msg = _("You cannot create a new user from here.\n To create new user please go to configuration panel.")
raise openerp.exceptions.RedirectWarning(msg, action.id, _('Go to the configuration panel'))
user = super(Users, self).create(values)
# create a welcome message
user._create_welcome_message()
return user
@api.multi
def write(self, vals):
write_res = super(Users, self).write(vals)
if vals.get('groups_id'):
# form: {'group_ids': [(3, 10), (3, 3), (4, 10), (4, 3)]} or {'group_ids': [(6, 0, [ids]}
user_group_ids = [command[1] for command in vals['groups_id'] if command[0] == 4]
user_group_ids += [id for command in vals['groups_id'] if command[0] == 6 for id in command[2]]
self.env['mail.channel'].search([('group_ids', 'in', user_group_ids)])._subscribe_users()
return write_res
def copy_data(self, *args, **kwargs):
data = super(Users, self).copy_data(*args, **kwargs)
if data and data.get('alias_name'):
data['alias_name'] = data['login']
return data
def _create_welcome_message(self):
self.ensure_one()
if not self.has_group('base.group_user'):
return False
company_name = self.company_id.name if self.company_id else ''
body = _('%s has joined the %s network.') % (self.name, company_name)
# TODO change SUPERUSER_ID into user.id but catch errors
return self.partner_id.sudo().message_post(body=body)
def _message_post_get_pid(self):
self.ensure_one()
if 'thread_model' in self.env.context:
self = self.with_context(thread_model='res.users')
return self.partner_id.id
@api.multi
def message_post(self, **kwargs):
""" Redirect the posting of message on res.users as a private discussion.
This is done because when giving the context of Chatter on the
various mailboxes, we do not have access to the current partner_id. """
current_pids = []
partner_ids = kwargs.get('partner_ids', [])
user_pid = self._message_post_get_pid()
for partner_id in partner_ids:
if isinstance(partner_id, (list, tuple)) and partner_id[0] == 4 and len(partner_id) == 2:
current_pids.append(partner_id[1])
elif isinstance(partner_id, (list, tuple)) and partner_id[0] == 6 and len(partner_id) == 3:
current_pids.append(partner_id[2])
elif isinstance(partner_id, (int, long)):
current_pids.append(partner_id)
if user_pid not in current_pids:
partner_ids.append(user_pid)
kwargs['partner_ids'] = partner_ids
# ??
# if context and context.get('thread_model') == 'res.partner':
# return self.pool['res.partner'].message_post(cr, uid, user_pid, **kwargs)
return self.env['mail.thread'].message_post(**kwargs) # ??
def message_update(self, msg_dict, update_vals=None):
return True
def message_subscribe(self, partner_ids=None, channel_ids=None, subtype_ids=None, force=True):
return True
@api.cr_uid_context
def message_get_partner_info_from_emails(self, cr, uid, emails, link_mail=False, context=None):
return self.pool.get('mail.thread').message_get_partner_info_from_emails(cr, uid, emails, link_mail=link_mail, context=context)
@api.multi
def message_get_suggested_recipients(self):
return dict((res_id, list()) for res_id in self._ids)
class res_groups_mail_channel(models.Model):
""" Update of res.groups class
- if adding users from a group, check mail.channels linked to this user
group and subscribe them. This is done by overriding the write method.
"""
_name = 'res.groups'
_inherit = 'res.groups'
@api.multi
def write(self, vals, context=None):
write_res = super(res_groups_mail_channel, self).write(vals)
if vals.get('users'):
# form: {'group_ids': [(3, 10), (3, 3), (4, 10), (4, 3)]} or {'group_ids': [(6, 0, [ids]}
user_ids = [command[1] for command in vals['users'] if command[0] == 4]
user_ids += [id for command in vals['users'] if command[0] == 6 for id in command[2]]
self.env['mail.channel'].search([('group_ids', 'in', self._ids)])._subscribe_users()
return write_res
| gpl-3.0 |
c960657/dd-agent | dogstatsd.py | 3 | 18023 | #!/opt/datadog-agent/embedded/bin/python
# (C) Datadog, Inc. 2010-2016
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
"""
A Python Statsd implementation with some datadog special sauce.
"""
# set up logging before importing any other components
from config import initialize_logging # noqa
initialize_logging('dogstatsd')
from utils.proxy import set_no_proxy_settings # noqa
set_no_proxy_settings()
# stdlib
import logging
import optparse
import os
import select
import signal
import socket
import sys
import threading
from time import sleep, time
from urllib import urlencode
import zlib
# For pickle & PID files, see issue 293
os.umask(022)
# 3rd party
import requests
import simplejson as json
# project
from aggregator import get_formatter, MetricsBucketAggregator
from checks.check_status import DogstatsdStatus
from checks.metric_types import MetricTypes
from config import get_config, get_version
from daemon import AgentSupervisor, Daemon
from util import chunks, get_hostname, get_uuid, plural
from utils.pidfile import PidFile
# urllib3 logs a bunch of stuff at the info level
requests_log = logging.getLogger("requests.packages.urllib3")
requests_log.setLevel(logging.WARN)
requests_log.propagate = True
log = logging.getLogger('dogstatsd')
PID_NAME = "dogstatsd"
PID_DIR = None
# Dogstatsd constants in seconds
DOGSTATSD_FLUSH_INTERVAL = 10
DOGSTATSD_AGGREGATOR_BUCKET_SIZE = 10
WATCHDOG_TIMEOUT = 120
UDP_SOCKET_TIMEOUT = 5
# Since we call flush more often than the metrics aggregation interval, we should
# log a bunch of flushes in a row every so often.
FLUSH_LOGGING_PERIOD = 70
FLUSH_LOGGING_INITIAL = 10
FLUSH_LOGGING_COUNT = 5
EVENT_CHUNK_SIZE = 50
COMPRESS_THRESHOLD = 1024
def add_serialization_status_metric(status, hostname):
"""
Add a metric to track the number of metric serializations,
tagged by their status.
"""
interval = 10.0
value = 1
return {
'tags': ["status:{0}".format(status)],
'metric': 'datadog.dogstatsd.serialization_status',
'interval': interval,
'device_name': None,
'host': hostname,
'points': [(time(), value / interval)],
'type': MetricTypes.RATE,
}
def unicode_metrics(metrics):
for i, metric in enumerate(metrics):
for key, value in metric.items():
if isinstance(value, basestring):
metric[key] = unicode(value, errors='replace')
elif isinstance(value, tuple) or isinstance(value, list):
value_list = list(value)
for j, value_element in enumerate(value_list):
if isinstance(value_element, basestring):
value_list[j] = unicode(value_element, errors='replace')
metric[key] = tuple(value_list)
metrics[i] = metric
return metrics
def serialize_metrics(metrics, hostname):
try:
metrics.append(add_serialization_status_metric("success", hostname))
serialized = json.dumps({"series": metrics})
except UnicodeDecodeError as e:
log.exception("Unable to serialize payload. Trying to replace bad characters. %s", e)
metrics.append(add_serialization_status_metric("failure", hostname))
try:
log.error(metrics)
serialized = json.dumps({"series": unicode_metrics(metrics)})
except Exception as e:
log.exception("Unable to serialize payload. Giving up. %s", e)
serialized = json.dumps({"series": [add_serialization_status_metric("permanent_failure", hostname)]})
if len(serialized) > COMPRESS_THRESHOLD:
headers = {'Content-Type': 'application/json',
'Content-Encoding': 'deflate'}
serialized = zlib.compress(serialized)
else:
headers = {'Content-Type': 'application/json'}
return serialized, headers
def serialize_event(event):
return json.dumps(event)
class Reporter(threading.Thread):
"""
The reporter periodically sends the aggregated metrics to the
server.
"""
def __init__(self, interval, metrics_aggregator, api_host, api_key=None,
use_watchdog=False, event_chunk_size=None):
threading.Thread.__init__(self)
self.interval = int(interval)
self.finished = threading.Event()
self.metrics_aggregator = metrics_aggregator
self.flush_count = 0
self.log_count = 0
self.hostname = get_hostname()
self.watchdog = None
if use_watchdog:
from util import Watchdog
self.watchdog = Watchdog(WATCHDOG_TIMEOUT)
self.api_key = api_key
self.api_host = api_host
self.event_chunk_size = event_chunk_size or EVENT_CHUNK_SIZE
def stop(self):
log.info("Stopping reporter")
self.finished.set()
def run(self):
log.info("Reporting to %s every %ss" % (self.api_host, self.interval))
log.debug("Watchdog enabled: %s" % bool(self.watchdog))
# Persist a start-up message.
DogstatsdStatus().persist()
while not self.finished.isSet(): # Use camel case isSet for 2.4 support.
self.finished.wait(self.interval)
self.metrics_aggregator.send_packet_count('datadog.dogstatsd.packet.count')
self.flush()
if self.watchdog:
self.watchdog.reset()
# Clean up the status messages.
log.debug("Stopped reporter")
DogstatsdStatus.remove_latest_status()
def flush(self):
try:
self.flush_count += 1
self.log_count += 1
packets_per_second = self.metrics_aggregator.packets_per_second(self.interval)
packet_count = self.metrics_aggregator.total_count
metrics = self.metrics_aggregator.flush()
count = len(metrics)
if self.flush_count % FLUSH_LOGGING_PERIOD == 0:
self.log_count = 0
if count:
self.submit(metrics)
events = self.metrics_aggregator.flush_events()
event_count = len(events)
if event_count:
self.submit_events(events)
service_checks = self.metrics_aggregator.flush_service_checks()
service_check_count = len(service_checks)
if service_check_count:
self.submit_service_checks(service_checks)
should_log = self.flush_count <= FLUSH_LOGGING_INITIAL or self.log_count <= FLUSH_LOGGING_COUNT
log_func = log.info
if not should_log:
log_func = log.debug
log_func("Flush #%s: flushed %s metric%s, %s event%s, and %s service check run%s" % (self.flush_count, count, plural(count), event_count, plural(event_count), service_check_count, plural(service_check_count)))
if self.flush_count == FLUSH_LOGGING_INITIAL:
log.info("First flushes done, %s flushes will be logged every %s flushes." % (FLUSH_LOGGING_COUNT, FLUSH_LOGGING_PERIOD))
# Persist a status message.
packet_count = self.metrics_aggregator.total_count
DogstatsdStatus(
flush_count=self.flush_count,
packet_count=packet_count,
packets_per_second=packets_per_second,
metric_count=count,
event_count=event_count,
service_check_count=service_check_count,
).persist()
except Exception:
if self.finished.isSet():
log.debug("Couldn't flush metrics, but that's expected as we're stopping")
else:
log.exception("Error flushing metrics")
def submit(self, metrics):
body, headers = serialize_metrics(metrics, self.hostname)
params = {}
if self.api_key:
params['api_key'] = self.api_key
url = '%s/api/v1/series?%s' % (self.api_host, urlencode(params))
self.submit_http(url, body, headers)
def submit_events(self, events):
headers = {'Content-Type':'application/json'}
event_chunk_size = self.event_chunk_size
for chunk in chunks(events, event_chunk_size):
payload = {
'apiKey': self.api_key,
'events': {
'api': chunk
},
'uuid': get_uuid(),
'internalHostname': get_hostname()
}
params = {}
if self.api_key:
params['api_key'] = self.api_key
url = '%s/intake?%s' % (self.api_host, urlencode(params))
self.submit_http(url, json.dumps(payload), headers)
def submit_http(self, url, data, headers):
headers["DD-Dogstatsd-Version"] = get_version()
log.debug("Posting payload to %s" % url)
try:
start_time = time()
r = requests.post(url, data=data, timeout=5, headers=headers)
r.raise_for_status()
if r.status_code >= 200 and r.status_code < 205:
log.debug("Payload accepted")
status = r.status_code
duration = round((time() - start_time) * 1000.0, 4)
log.debug("%s POST %s (%sms)" % (status, url, duration))
except Exception:
log.exception("Unable to post payload.")
try:
log.error("Received status code: {0}".format(r.status_code))
except Exception:
pass
def submit_service_checks(self, service_checks):
headers = {'Content-Type':'application/json'}
params = {}
if self.api_key:
params['api_key'] = self.api_key
url = '{0}/api/v1/check_run?{1}'.format(self.api_host, urlencode(params))
self.submit_http(url, json.dumps(service_checks), headers)
class Server(object):
"""
A statsd udp server.
"""
def __init__(self, metrics_aggregator, host, port, forward_to_host=None, forward_to_port=None):
self.host = host
self.port = int(port)
self.address = (self.host, self.port)
self.metrics_aggregator = metrics_aggregator
self.buffer_size = 1024 * 8
self.running = False
self.should_forward = forward_to_host is not None
self.forward_udp_sock = None
# In case we want to forward every packet received to another statsd server
if self.should_forward:
if forward_to_port is None:
forward_to_port = 8125
log.info("External statsd forwarding enabled. All packets received will be forwarded to %s:%s" % (forward_to_host, forward_to_port))
try:
self.forward_udp_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.forward_udp_sock.connect((forward_to_host, forward_to_port))
except Exception:
log.exception("Error while setting up connection to external statsd server")
def start(self):
""" Run the server. """
# Bind to the UDP socket.
# IPv4 only
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.socket.setblocking(0)
try:
self.socket.bind(self.address)
except socket.gaierror:
if self.address[0] == 'localhost':
log.warning("Warning localhost seems undefined in your host file, using 127.0.0.1 instead")
self.address = ('127.0.0.1', self.address[1])
self.socket.bind(self.address)
log.info('Listening on host & port: %s' % str(self.address))
# Inline variables for quick look-up.
buffer_size = self.buffer_size
aggregator_submit = self.metrics_aggregator.submit_packets
sock = [self.socket]
socket_recv = self.socket.recv
select_select = select.select
select_error = select.error
timeout = UDP_SOCKET_TIMEOUT
should_forward = self.should_forward
forward_udp_sock = self.forward_udp_sock
# Run our select loop.
self.running = True
while self.running:
try:
ready = select_select(sock, [], [], timeout)
if ready[0]:
message = socket_recv(buffer_size)
aggregator_submit(message)
if should_forward:
forward_udp_sock.send(message)
except select_error, se:
# Ignore interrupted system calls from sigterm.
errno = se[0]
if errno != 4:
raise
except (KeyboardInterrupt, SystemExit):
break
except Exception:
log.exception('Error receiving datagram')
def stop(self):
self.running = False
class Dogstatsd(Daemon):
""" This class is the dogstatsd daemon. """
def __init__(self, pid_file, server, reporter, autorestart):
Daemon.__init__(self, pid_file, autorestart=autorestart)
self.server = server
self.reporter = reporter
def _handle_sigterm(self, signum, frame):
log.debug("Caught sigterm. Stopping run loop.")
self.server.stop()
def run(self):
# Gracefully exit on sigterm.
signal.signal(signal.SIGTERM, self._handle_sigterm)
# Handle Keyboard Interrupt
signal.signal(signal.SIGINT, self._handle_sigterm)
# Start the reporting thread before accepting data
self.reporter.start()
try:
try:
self.server.start()
except Exception, e:
log.exception('Error starting server')
raise e
finally:
# The server will block until it's done. Once we're here, shutdown
# the reporting thread.
self.reporter.stop()
self.reporter.join()
log.info("Dogstatsd is stopped")
# Restart if asked to restart
if self.autorestart:
sys.exit(AgentSupervisor.RESTART_EXIT_STATUS)
@classmethod
def info(self):
logging.getLogger().setLevel(logging.ERROR)
return DogstatsdStatus.print_latest_status()
def init(config_path=None, use_watchdog=False, use_forwarder=False, args=None):
"""Configure the server and the reporting thread.
"""
c = get_config(parse_args=False, cfg_path=config_path)
if (not c['use_dogstatsd'] and
(args and args[0] in ['start', 'restart'] or not args)):
log.info("Dogstatsd is disabled. Exiting")
# We're exiting purposefully, so exit with zero (supervisor's expected
# code). HACK: Sleep a little bit so supervisor thinks we've started cleanly
# and thus can exit cleanly.
sleep(4)
sys.exit(0)
log.debug("Configuring dogstatsd")
port = c['dogstatsd_port']
interval = DOGSTATSD_FLUSH_INTERVAL
api_key = c['api_key']
aggregator_interval = DOGSTATSD_AGGREGATOR_BUCKET_SIZE
non_local_traffic = c['non_local_traffic']
forward_to_host = c.get('statsd_forward_host')
forward_to_port = c.get('statsd_forward_port')
event_chunk_size = c.get('event_chunk_size')
recent_point_threshold = c.get('recent_point_threshold', None)
target = c['dd_url']
if use_forwarder:
target = c['dogstatsd_target']
hostname = get_hostname(c)
# Create the aggregator (which is the point of communication between the
# server and reporting threads.
assert 0 < interval
aggregator = MetricsBucketAggregator(
hostname,
aggregator_interval,
recent_point_threshold=recent_point_threshold,
formatter=get_formatter(c),
histogram_aggregates=c.get('histogram_aggregates'),
histogram_percentiles=c.get('histogram_percentiles'),
utf8_decoding=c['utf8_decoding']
)
# Start the reporting thread.
reporter = Reporter(interval, aggregator, target, api_key, use_watchdog, event_chunk_size)
# Start the server on an IPv4 stack
# Default to loopback
server_host = c['bind_host']
# If specified, bind to all addressses
if non_local_traffic:
server_host = ''
server = Server(aggregator, server_host, port, forward_to_host=forward_to_host, forward_to_port=forward_to_port)
return reporter, server, c
def main(config_path=None):
""" The main entry point for the unix version of dogstatsd. """
# Deprecation notice
from utils.deprecations import deprecate_old_command_line_tools
deprecate_old_command_line_tools()
COMMANDS_START_DOGSTATSD = [
'start',
'stop',
'restart',
'status'
]
parser = optparse.OptionParser("%prog [start|stop|restart|status]")
parser.add_option('-u', '--use-local-forwarder', action='store_true',
dest="use_forwarder", default=False)
opts, args = parser.parse_args()
if not args or args[0] in COMMANDS_START_DOGSTATSD:
reporter, server, cnf = init(config_path, use_watchdog=True, use_forwarder=opts.use_forwarder, args=args)
daemon = Dogstatsd(PidFile(PID_NAME, PID_DIR).get_path(), server, reporter,
cnf.get('autorestart', False))
# If no args were passed in, run the server in the foreground.
if not args:
daemon.start(foreground=True)
return 0
# Otherwise, we're process the deamon command.
else:
command = args[0]
if command == 'start':
daemon.start()
elif command == 'stop':
daemon.stop()
elif command == 'restart':
daemon.restart()
elif command == 'status':
daemon.status()
elif command == 'info':
return Dogstatsd.info()
else:
sys.stderr.write("Unknown command: %s\n\n" % command)
parser.print_help()
return 1
return 0
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause |
blopker/PCLite | pclite/http/downloaders/__init__.py | 1 | 1722 | '''
The MIT License
Copyright (c) Bo Lopker, http://blopker.com
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
'''
Module to determine the correct downloader to use.
By @blopker
'''
from . import requests
from . import null
from . import wget
from ... import logger
log = logger.get(__name__)
# Check if this OS supports SSL
try:
import ssl
SSL = True
except ImportError:
SSL = False
def get():
if not SSL and wget.is_available():
log.debug('Using WGET downloader.')
return wget.WgetDownloader()
if SSL:
log.debug('Using Requests downloader.')
return requests.RequestsDownloader()
log.error('No suitable downloader found. Everything is terrible.')
return null.NullDownloader()
| mit |
tersmitten/ansible | lib/ansible/modules/storage/glusterfs/gluster_peer.py | 37 | 5845 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2015 Nandaja Varma <nvarma@redhat.com>
# Copyright 2018 Red Hat, Inc.
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gluster_peer
short_description: Attach/Detach peers to/from the cluster
description:
- Create or diminish a GlusterFS trusted storage pool. A set of nodes can be
added into an existing trusted storage pool or a new storage pool can be
formed. Or, nodes can be removed from an existing trusted storage pool.
version_added: "2.6"
author: Sachidananda Urs (@sac)
options:
state:
choices: ["present", "absent"]
default: "present"
description:
- Determines whether the nodes should be attached to the pool or
removed from the pool. If the state is present, nodes will be
attached to the pool. If state is absent, nodes will be detached
from the pool.
required: true
nodes:
description:
- List of nodes that have to be probed into the pool.
required: true
force:
type: bool
default: "false"
description:
- Applicable only while removing the nodes from the pool. gluster
will refuse to detach a node from the pool if any one of the node
is down, in such cases force can be used.
requirements:
- GlusterFS > 3.2
notes:
- This module does not support check mode.
'''
EXAMPLES = '''
- name: Create a trusted storage pool
gluster_peer:
state: present
nodes:
- 10.0.1.5
- 10.0.1.10
- name: Delete a node from the trusted storage pool
gluster_peer:
state: absent
nodes:
- 10.0.1.10
- name: Delete a node from the trusted storage pool by force
gluster_peer:
state: absent
nodes:
- 10.0.0.1
force: true
'''
RETURN = '''
'''
from ansible.module_utils.basic import AnsibleModule
from distutils.version import LooseVersion
class Peer(object):
def __init__(self, module):
self.module = module
self.state = self.module.params['state']
self.nodes = self.module.params['nodes']
self.glustercmd = self.module.get_bin_path('gluster', True)
self.lang = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
self.action = ''
self.force = ''
def gluster_peer_ops(self):
if not self.nodes:
self.module.fail_json(msg="nodes list cannot be empty")
self.force = 'force' if self.module.params.get('force') else ''
if self.state == 'present':
self.nodes = self.get_to_be_probed_hosts(self.nodes)
self.action = 'probe'
# In case of peer probe, we do not need `force'
self.force = ''
else:
self.action = 'detach'
self.call_peer_commands()
def get_to_be_probed_hosts(self, hosts):
peercmd = [self.glustercmd, 'pool', 'list', '--mode=script']
rc, output, err = self.module.run_command(peercmd,
environ_update=self.lang)
peers_in_cluster = [line.split('\t')[1].strip() for
line in filter(None, output.split('\n')[1:])]
try:
peers_in_cluster.remove('localhost')
except ValueError:
# It is ok not to have localhost in list
pass
hosts_to_be_probed = [host for host in hosts if host not in
peers_in_cluster]
return hosts_to_be_probed
def call_peer_commands(self):
result = {}
result['msg'] = ''
result['changed'] = False
for node in self.nodes:
peercmd = [self.glustercmd, 'peer', self.action, node, '--mode=script']
if self.force:
peercmd.append(self.force)
rc, out, err = self.module.run_command(peercmd,
environ_update=self.lang)
if rc:
result['rc'] = rc
result['msg'] = err
# Fail early, do not wait for the loop to finish
self.module.fail_json(**result)
else:
if 'already in peer' in out or \
'localhost not needed' in out:
result['changed'] |= False
else:
result['changed'] = True
self.module.exit_json(**result)
def main():
module = AnsibleModule(
argument_spec=dict(
force=dict(type='bool', required=False),
nodes=dict(type='list', required=True),
state=dict(type='str', choices=['absent', 'present'],
default='present'),
),
supports_check_mode=False
)
pops = Peer(module)
required_version = "3.2"
# Verify if required GlusterFS version is installed
if is_invalid_gluster_version(module, required_version):
module.fail_json(msg="GlusterFS version > %s is required" %
required_version)
pops.gluster_peer_ops()
def is_invalid_gluster_version(module, required_version):
cmd = module.get_bin_path('gluster', True) + ' --version'
result = module.run_command(cmd)
ver_line = result[1].split('\n')[0]
version = ver_line.split(' ')[1]
# If the installed version is less than 3.2, it is an invalid version
# return True
return LooseVersion(version) < LooseVersion(required_version)
if __name__ == "__main__":
main()
| gpl-3.0 |
sergiusens/snapcraft | tests/unit/repo/test_deb.py | 1 | 19745 | # -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2015-2018 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import apt
import os
from subprocess import CalledProcessError
from unittest.mock import ANY, DEFAULT, call, patch, MagicMock
from testtools.matchers import Contains, Equals, FileExists, Not
import snapcraft
from snapcraft.internal import repo
from snapcraft.internal.repo import errors
from tests import fixture_setup, unit
from . import RepoBaseTestCase
class UbuntuTestCase(RepoBaseTestCase):
def setUp(self):
super().setUp()
patcher = patch("snapcraft.repo._deb.apt.Cache")
self.mock_cache = patcher.start()
self.addCleanup(patcher.stop)
def _fetch_binary(download_dir, **kwargs):
path = os.path.join(download_dir, "fake-package.deb")
open(path, "w").close()
return path
self.mock_package = MagicMock()
self.mock_package.candidate.fetch_binary.side_effect = _fetch_binary
self.mock_cache.return_value.get_changes.return_value = [self.mock_package]
@patch("snapcraft.internal.repo._deb._AptCache.fetch_binary")
@patch("snapcraft.internal.repo._deb.apt.apt_pkg")
def test_cache_update_failed(self, mock_apt_pkg, mock_fetch_binary):
fake_package_path = os.path.join(self.path, "fake-package.deb")
open(fake_package_path, "w").close()
mock_fetch_binary.return_value = fake_package_path
self.mock_cache().is_virtual_package.return_value = False
self.mock_cache().update.side_effect = apt.cache.FetchFailedException()
project_options = snapcraft.ProjectOptions(use_geoip=False)
ubuntu = repo.Ubuntu(self.tempdir, project_options=project_options)
self.assertRaises(errors.CacheUpdateFailedError, ubuntu.get, ["fake-package"])
@patch("shutil.rmtree")
@patch("snapcraft.internal.repo._deb._AptCache.fetch_binary")
@patch("snapcraft.internal.repo._deb.apt.apt_pkg")
def test_cache_hashsum_mismatch(self, mock_apt_pkg, mock_fetch_binary, mock_rmtree):
fake_package_path = os.path.join(self.path, "fake-package.deb")
open(fake_package_path, "w").close()
mock_fetch_binary.return_value = fake_package_path
self.mock_cache().is_virtual_package.return_value = False
self.mock_cache().update.side_effect = [
apt.cache.FetchFailedException(
"E:Failed to fetch copy:foo Hash Sum mismatch"
),
DEFAULT,
]
project_options = snapcraft.ProjectOptions(use_geoip=False)
ubuntu = repo.Ubuntu(self.tempdir, project_options=project_options)
ubuntu.get(["fake-package"])
def test_get_pkg_name_parts_name_only(self):
name, version = repo.get_pkg_name_parts("hello")
self.assertThat(name, Equals("hello"))
self.assertThat(version, Equals(None))
def test_get_pkg_name_parts_all(self):
name, version = repo.get_pkg_name_parts("hello:i386=2.10-1")
self.assertThat(name, Equals("hello:i386"))
self.assertThat(version, Equals("2.10-1"))
def test_get_pkg_name_parts_no_arch(self):
name, version = repo.get_pkg_name_parts("hello=2.10-1")
self.assertThat(name, Equals("hello"))
self.assertThat(version, Equals("2.10-1"))
@patch("snapcraft.internal.repo._deb._AptCache.fetch_binary")
@patch("snapcraft.internal.repo._deb.apt.apt_pkg")
def test_get_package(self, mock_apt_pkg, mock_fetch_binary):
fake_package_path = os.path.join(self.path, "fake-package.deb")
open(fake_package_path, "w").close()
mock_fetch_binary.return_value = fake_package_path
self.mock_cache().is_virtual_package.return_value = False
project_options = snapcraft.ProjectOptions(use_geoip=False)
ubuntu = repo.Ubuntu(self.tempdir, project_options=project_options)
ubuntu.get(["fake-package"])
mock_apt_pkg.assert_has_calls(
[
call.config.set("Apt::Install-Recommends", "False"),
call.config.find_file("Dir::Etc::Trusted"),
call.config.set("Dir::Etc::Trusted", ANY),
call.config.find_file("Dir::Etc::TrustedParts"),
call.config.set("Dir::Etc::TrustedParts", ANY),
call.config.clear("APT::Update::Post-Invoke-Success"),
]
)
self.mock_cache.assert_has_calls(
[
call(memonly=True, rootdir=ANY),
call().update(fetch_progress=ANY, sources_list=ANY),
call().open(),
]
)
# __getitem__ is tricky
self.assertThat(
self.mock_cache.return_value.__getitem__.call_args_list,
Contains(call("fake-package")),
)
# Verify that the package was actually fetched and copied into the
# requested location.
self.assertThat(
os.path.join(self.tempdir, "download", "fake-package.deb"), FileExists()
)
@patch("snapcraft.internal.repo._deb._AptCache.fetch_binary")
@patch("snapcraft.internal.repo._deb.apt.apt_pkg")
def test_get_multiarch_package(self, mock_apt_pkg, mock_fetch_binary):
fake_package_path = os.path.join(self.path, "fake-package.deb")
open(fake_package_path, "w").close()
mock_fetch_binary.return_value = fake_package_path
self.mock_cache().is_virtual_package.return_value = False
project_options = snapcraft.ProjectOptions(use_geoip=False)
ubuntu = repo.Ubuntu(self.tempdir, project_options=project_options)
ubuntu.get(["fake-package:arch"])
mock_apt_pkg.assert_has_calls(
[
call.config.set("Apt::Install-Recommends", "False"),
call.config.find_file("Dir::Etc::Trusted"),
call.config.set("Dir::Etc::Trusted", ANY),
call.config.find_file("Dir::Etc::TrustedParts"),
call.config.set("Dir::Etc::TrustedParts", ANY),
call.config.clear("APT::Update::Post-Invoke-Success"),
]
)
self.mock_cache.assert_has_calls(
[
call(memonly=True, rootdir=ANY),
call().update(fetch_progress=ANY, sources_list=ANY),
call().open(),
]
)
# __getitem__ is tricky
self.assertThat(
self.mock_cache.return_value.__getitem__.call_args_list,
Contains(call("fake-package:arch")),
)
# Verify that the package was actually fetched and copied into the
# requested location.
self.assertThat(
os.path.join(self.tempdir, "download", "fake-package.deb"), FileExists()
)
@patch("snapcraft.repo._deb._get_geoip_country_code_prefix")
def test_sources_is_none_uses_default(self, mock_cc):
mock_cc.return_value = "ar"
self.maxDiff = None
sources_list = repo._deb._format_sources_list(
"", use_geoip=True, deb_arch="amd64"
)
expected_sources_list = """deb http://ar.archive.ubuntu.com/ubuntu/ xenial main restricted
deb http://ar.archive.ubuntu.com/ubuntu/ xenial-updates main restricted
deb http://ar.archive.ubuntu.com/ubuntu/ xenial universe
deb http://ar.archive.ubuntu.com/ubuntu/ xenial-updates universe
deb http://ar.archive.ubuntu.com/ubuntu/ xenial multiverse
deb http://ar.archive.ubuntu.com/ubuntu/ xenial-updates multiverse
deb http://security.ubuntu.com/ubuntu xenial-security main restricted
deb http://security.ubuntu.com/ubuntu xenial-security universe
deb http://security.ubuntu.com/ubuntu xenial-security multiverse
"""
self.assertThat(sources_list, Equals(expected_sources_list))
def test_no_geoip_uses_default_archive(self):
sources_list = repo._deb._format_sources_list(
repo._deb._DEFAULT_SOURCES, deb_arch="amd64", use_geoip=False
)
expected_sources_list = """deb http://archive.ubuntu.com/ubuntu/ xenial main restricted
deb http://archive.ubuntu.com/ubuntu/ xenial-updates main restricted
deb http://archive.ubuntu.com/ubuntu/ xenial universe
deb http://archive.ubuntu.com/ubuntu/ xenial-updates universe
deb http://archive.ubuntu.com/ubuntu/ xenial multiverse
deb http://archive.ubuntu.com/ubuntu/ xenial-updates multiverse
deb http://security.ubuntu.com/ubuntu xenial-security main restricted
deb http://security.ubuntu.com/ubuntu xenial-security universe
deb http://security.ubuntu.com/ubuntu xenial-security multiverse
"""
self.assertThat(sources_list, Equals(expected_sources_list))
@patch("snapcraft.internal.repo._deb._get_geoip_country_code_prefix")
def test_sources_amd64_vivid(self, mock_cc):
self.maxDiff = None
mock_cc.return_value = "ar"
sources_list = repo._deb._format_sources_list(
repo._deb._DEFAULT_SOURCES,
deb_arch="amd64",
use_geoip=True,
release="vivid",
)
expected_sources_list = """deb http://ar.archive.ubuntu.com/ubuntu/ vivid main restricted
deb http://ar.archive.ubuntu.com/ubuntu/ vivid-updates main restricted
deb http://ar.archive.ubuntu.com/ubuntu/ vivid universe
deb http://ar.archive.ubuntu.com/ubuntu/ vivid-updates universe
deb http://ar.archive.ubuntu.com/ubuntu/ vivid multiverse
deb http://ar.archive.ubuntu.com/ubuntu/ vivid-updates multiverse
deb http://security.ubuntu.com/ubuntu vivid-security main restricted
deb http://security.ubuntu.com/ubuntu vivid-security universe
deb http://security.ubuntu.com/ubuntu vivid-security multiverse
"""
self.assertThat(sources_list, Equals(expected_sources_list))
@patch("snapcraft.repo._deb._get_geoip_country_code_prefix")
def test_sources_armhf_trusty(self, mock_cc):
sources_list = repo._deb._format_sources_list(
repo._deb._DEFAULT_SOURCES, deb_arch="armhf", release="trusty"
)
expected_sources_list = """deb http://ports.ubuntu.com/ubuntu-ports/ trusty main restricted
deb http://ports.ubuntu.com/ubuntu-ports/ trusty-updates main restricted
deb http://ports.ubuntu.com/ubuntu-ports/ trusty universe
deb http://ports.ubuntu.com/ubuntu-ports/ trusty-updates universe
deb http://ports.ubuntu.com/ubuntu-ports/ trusty multiverse
deb http://ports.ubuntu.com/ubuntu-ports/ trusty-updates multiverse
deb http://ports.ubuntu.com/ubuntu-ports trusty-security main restricted
deb http://ports.ubuntu.com/ubuntu-ports trusty-security universe
deb http://ports.ubuntu.com/ubuntu-ports trusty-security multiverse
"""
self.assertThat(sources_list, Equals(expected_sources_list))
self.assertFalse(mock_cc.called)
class UbuntuTestCaseWithFakeAptCache(RepoBaseTestCase):
def setUp(self):
super().setUp()
self.fake_apt_cache = fixture_setup.FakeAptCache()
self.useFixture(self.fake_apt_cache)
def test_get_installed_packages(self):
for name, version, installed in (
("test-installed-package", "test-installed-package-version", True),
("test-not-installed-package", "dummy", False),
):
self.fake_apt_cache.add_package(
fixture_setup.FakeAptCachePackage(name, version, installed=installed)
)
self.assertThat(
repo.Repo.get_installed_packages(),
Equals(["test-installed-package=test-installed-package-version"]),
)
class AutokeepTestCase(RepoBaseTestCase):
def test_autokeep(self):
self.fake_apt_cache = fixture_setup.FakeAptCache()
self.useFixture(self.fake_apt_cache)
self.test_packages = (
"main-package",
"dependency",
"sub-dependency",
"conflicting-dependency",
)
self.fake_apt_cache.add_packages(self.test_packages)
self.fake_apt_cache.cache["main-package"].dependencies = [
[
fixture_setup.FakeAptBaseDependency(
"dependency", [self.fake_apt_cache.cache["dependency"]]
),
fixture_setup.FakeAptBaseDependency(
"conflicting-dependency",
[self.fake_apt_cache.cache["conflicting-dependency"]],
),
]
]
self.fake_apt_cache.cache["dependency"].dependencies = [
[
fixture_setup.FakeAptBaseDependency(
"sub-dependency", [self.fake_apt_cache.cache["sub-dependency"]]
)
]
]
self.fake_apt_cache.cache["conflicting-dependency"].conflicts = [
self.fake_apt_cache.cache["dependency"]
]
project_options = snapcraft.ProjectOptions()
ubuntu = repo.Ubuntu(self.tempdir, project_options=project_options)
ubuntu.get(["main-package", "conflicting-dependency"])
# Verify that the package was actually fetched and copied into the
# requested location.
self.assertThat(
os.path.join(self.tempdir, "download", "main-package.deb"), FileExists()
)
self.assertThat(
os.path.join(self.tempdir, "download", "conflicting-dependency.deb"),
FileExists(),
)
self.assertThat(
os.path.join(self.tempdir, "download", "dependency.deb"),
Not(FileExists()),
"Dependency should not have been fetched",
)
self.assertThat(
os.path.join(self.tempdir, "download", "sub-dependency.deb"),
Not(FileExists()),
"Sub-dependency should not have been fetched",
)
class BuildPackagesTestCase(unit.TestCase):
def setUp(self):
super().setUp()
self.fake_apt_cache = fixture_setup.FakeAptCache()
self.useFixture(self.fake_apt_cache)
self.test_packages = (
"package-not-installed",
"package-installed",
"another-uninstalled",
"another-installed",
"repeated-package",
"repeated-package",
"versioned-package=0.2",
"versioned-package",
)
self.fake_apt_cache.add_packages(self.test_packages)
self.fake_apt_cache.cache["package-installed"].installed = True
self.fake_apt_cache.cache["another-installed"].installed = True
self.fake_apt_cache.cache["versioned-package"].version = "0.1"
def get_installable_packages(self, packages):
return [
"package-not-installed",
"another-uninstalled",
"repeated-package",
"versioned-package=0.2",
]
@patch("os.environ")
def install_test_packages(self, test_pkgs, mock_env):
mock_env.copy.return_value = {}
repo.Ubuntu.install_build_packages(test_pkgs)
@patch("snapcraft.repo._deb.is_dumb_terminal")
@patch("subprocess.check_call")
def test_install_build_package(self, mock_check_call, mock_is_dumb_terminal):
mock_is_dumb_terminal.return_value = False
self.install_test_packages(self.test_packages)
installable = self.get_installable_packages(self.test_packages)
mock_check_call.assert_has_calls(
[
call(
"sudo apt-get --no-install-recommends -y "
"-o Dpkg::Progress-Fancy=1 install".split()
+ sorted(set(installable)),
env={
"DEBIAN_FRONTEND": "noninteractive",
"DEBCONF_NONINTERACTIVE_SEEN": "true",
},
)
]
)
@patch("snapcraft.repo._deb.is_dumb_terminal")
@patch("subprocess.check_call")
def test_install_buid_package_in_dumb_terminal(
self, mock_check_call, mock_is_dumb_terminal
):
mock_is_dumb_terminal.return_value = True
self.install_test_packages(self.test_packages)
installable = self.get_installable_packages(self.test_packages)
mock_check_call.assert_has_calls(
[
call(
"sudo apt-get --no-install-recommends -y install".split()
+ sorted(set(installable)),
env={
"DEBIAN_FRONTEND": "noninteractive",
"DEBCONF_NONINTERACTIVE_SEEN": "true",
},
)
]
)
@patch("subprocess.check_call")
def test_install_buid_package_marks_auto_installed(self, mock_check_call):
self.install_test_packages(self.test_packages)
installable = self.get_installable_packages(self.test_packages)
mock_check_call.assert_has_calls(
[
call(
"sudo apt-mark auto".split() + sorted(set(installable)),
env={
"DEBIAN_FRONTEND": "noninteractive",
"DEBCONF_NONINTERACTIVE_SEEN": "true",
},
)
]
)
@patch("subprocess.check_call")
def test_mark_installed_auto_error_is_not_fatal(self, mock_check_call):
error = CalledProcessError(101, "bad-cmd")
mock_check_call.side_effect = lambda c, env: error if "apt-mark" in c else None
self.install_test_packages(["package-not-installed"])
def test_invalid_package_requested(self):
self.assertRaises(
errors.BuildPackageNotFoundError,
repo.Ubuntu.install_build_packages,
["package-does-not-exist"],
)
@patch("subprocess.check_call")
def test_broken_package_requested(self, mock_check_call):
self.fake_apt_cache.add_packages(("package-not-installable",))
self.fake_apt_cache.cache["package-not-installable"].dependencies = [
[fixture_setup.FakeAptBaseDependency("broken-dependency", [])]
]
self.assertRaises(
errors.PackageBrokenError,
repo.Ubuntu.install_build_packages,
["package-not-installable"],
)
@patch("subprocess.check_call")
def test_broken_package_apt_install(self, mock_check_call):
mock_check_call.side_effect = CalledProcessError(100, "apt-get")
self.fake_apt_cache.add_packages(("package-not-installable",))
raised = self.assertRaises(
errors.BuildPackagesNotInstalledError,
repo.Ubuntu.install_build_packages,
["package-not-installable"],
)
self.assertThat(raised.packages, Equals("package-not-installable"))
@patch("subprocess.check_call")
def test_refresh_buid_packages(self, mock_check_call):
repo.Ubuntu.refresh_build_packages()
mock_check_call.assert_called_once_with(["sudo", "apt", "update"])
@patch(
"subprocess.check_call",
side_effect=CalledProcessError(returncode=1, cmd=["sudo", "apt", "update"]),
)
def test_refresh_buid_packages_fails(self, mock_check_call):
self.assertRaises(
errors.CacheUpdateFailedError, repo.Ubuntu.refresh_build_packages
)
mock_check_call.assert_called_once_with(["sudo", "apt", "update"])
| gpl-3.0 |
ryfeus/lambda-packs | Selenium_PhantomJS/source/service.py | 1 | 1299 | #!/usr/bin/env python
import httplib2
import datetime
import time
import os
import selenium
import json
import boto3
import requests
from dateutil.parser import parse
from selenium import webdriver
from selenium.webdriver.support.ui import Select
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from apiclient.discovery import build
from oauth2client.client import GoogleCredentials
def handler(event, context):
# set user agent
user_agent = ("Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.111 Safari/537.36")
dcap = dict(DesiredCapabilities.PHANTOMJS)
dcap["phantomjs.page.settings.userAgent"] = user_agent
dcap["phantomjs.page.settings.javascriptEnabled"] = True
browser = webdriver.PhantomJS(service_log_path=os.path.devnull, executable_path="/var/task/phantomjs", service_args=['--ignore-ssl-errors=true'], desired_capabilities=dcap)
browser.get('https://en.wikipedia.org/wiki/Special:Random')
line = browser.find_element_by_class_name('firstHeading').text
print(line)
body = {
"message": "Your lambda function executed successfully!",
"event": line
}
response = {
"statusCode": 200,
"body": json.dumps(body)
}
return response
| mit |
Communities-Communications/cc-odoo | openerp/addons/base/res/res_country.py | 283 | 4728 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
def location_name_search(self, cr, user, name='', args=None, operator='ilike',
context=None, limit=100):
if not args:
args = []
ids = []
if len(name) == 2:
ids = self.search(cr, user, [('code', 'ilike', name)] + args,
limit=limit, context=context)
search_domain = [('name', operator, name)]
if ids: search_domain.append(('id', 'not in', ids))
ids.extend(self.search(cr, user, search_domain + args,
limit=limit, context=context))
locations = self.name_get(cr, user, ids, context)
return sorted(locations, key=lambda (id, name): ids.index(id))
class Country(osv.osv):
_name = 'res.country'
_description = 'Country'
_columns = {
'name': fields.char('Country Name',
help='The full name of the country.', required=True, translate=True),
'code': fields.char('Country Code', size=2,
help='The ISO country code in two chars.\n'
'You can use this field for quick search.'),
'address_format': fields.text('Address Format', help="""You can state here the usual format to use for the \
addresses belonging to this country.\n\nYou can use the python-style string patern with all the field of the address \
(for example, use '%(street)s' to display the field 'street') plus
\n%(state_name)s: the name of the state
\n%(state_code)s: the code of the state
\n%(country_name)s: the name of the country
\n%(country_code)s: the code of the country"""),
'currency_id': fields.many2one('res.currency', 'Currency'),
'image': fields.binary("Image"),
'country_group_ids': fields.many2many('res.country.group', 'res_country_res_country_group_rel', 'res_country_id', 'res_country_group_id', string='Country Groups'),
}
_sql_constraints = [
('name_uniq', 'unique (name)',
'The name of the country must be unique !'),
('code_uniq', 'unique (code)',
'The code of the country must be unique !')
]
_defaults = {
'address_format': "%(street)s\n%(street2)s\n%(city)s %(state_code)s %(zip)s\n%(country_name)s",
}
_order='name'
name_search = location_name_search
def create(self, cursor, user, vals, context=None):
if vals.get('code'):
vals['code'] = vals['code'].upper()
return super(Country, self).create(cursor, user, vals,
context=context)
def write(self, cursor, user, ids, vals, context=None):
if vals.get('code'):
vals['code'] = vals['code'].upper()
return super(Country, self).write(cursor, user, ids, vals,
context=context)
class CountryGroup(osv.osv):
_description="Country Group"
_name = 'res.country.group'
_columns = {
'name': fields.char('Name', required=True),
'country_ids': fields.many2many('res.country', 'res_country_res_country_group_rel', 'res_country_group_id', 'res_country_id', string='Countries'),
}
class CountryState(osv.osv):
_description="Country state"
_name = 'res.country.state'
_columns = {
'country_id': fields.many2one('res.country', 'Country',
required=True),
'name': fields.char('State Name', required=True,
help='Administrative divisions of a country. E.g. Fed. State, Departement, Canton'),
'code': fields.char('State Code', size=3,
help='The state code in max. three chars.', required=True),
}
_order = 'code'
name_search = location_name_search
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
tqchen/tvm | python/tvm/topi/sparse/csrmm.py | 2 | 4109 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""TVM operator compute SpMM in CSR format."""
from __future__ import absolute_import
import tvm
from tvm import te
from .. import tag
from ..util import simplify
def csrmm_default(data, indices, indptr, weight, bias=None):
# pylint: disable=invalid-name
"""The default implementation of csrmm in topi.
Parameters
----------
data : tvm.te.Tensor
1-D with shape [nonzeros]
indices : tvm.te.Tensor
1-D with shape [nonzeros]
indptr : tvm.te.Tensor
1-D with shape [m+1]
weight : tvm.te.Tensor
2-D with shape [k, n]
bias : tvm.te.Tensor, optional
1-D with shape [m]
Returns
-------
output : tvm.te.Tensor
2-D with shape [m, n]
"""
assert (
len(data.shape) == 1
and len(indices.shape) == 1
and len(indptr.shape) == 1
and len(weight.shape) == 2
), "only support 2-dim csrmm"
assert isinstance(
weight, te.tensor.Tensor
), "weight matrix is assumed to be tvm.te.Tensor, but weight is `%s`" % (type(weight))
if bias is not None:
assert len(bias.shape) == 1
M = simplify(indptr.shape[0] - 1)
_, N = weight.shape
def csrmm_default_ir(data, indices, indptr, weight, out):
"""define ir for csrmm"""
irb = tvm.tir.ir_builder.create()
data_ptr = irb.buffer_ptr(data)
indices_ptr = irb.buffer_ptr(indices)
indptr_ptr = irb.buffer_ptr(indptr)
weight_ptr = irb.buffer_ptr(weight)
out_ptr = irb.buffer_ptr(out)
M = simplify(indptr.shape[0] - 1)
_, N = weight.shape
with irb.for_range(0, N, for_type="vectorize", name="n") as n:
with irb.for_range(0, M, for_type="parallel", name="row") as row:
dot = irb.allocate("float32", (1,), name="dot", scope="local")
out_ptr[row * N + n] = 0.0
dot[0] = 0.0
row_start = indptr_ptr[row]
row_end = indptr_ptr[row + 1]
row_elems = row_end - row_start
with irb.for_range(0, row_elems, name="idx") as idx:
elem = row_start + idx
dot[0] += data_ptr[elem] * weight_ptr[indices_ptr[elem] * N + n]
out_ptr[row * N + n] += dot[0]
return irb.get()
oshape = (M, N)
matmul = te.extern(
oshape,
[data, indices, indptr, weight],
lambda ins, outs: csrmm_default_ir(ins[0], ins[1], ins[2], ins[3], outs[0]),
tag="csrmm",
dtype="float32",
name="out",
)
if bias is not None:
matmul = te.compute(oshape, lambda i, j: matmul[i, j] + bias[i], tag=tag.BROADCAST)
return matmul
def csrmm(a, b, c=None):
"""The `csrmm` routine performs a matrix-matrix operation defined as :math:`C := A*B + C`,
where `B` and `C` are dense matrices, `A` is an m-by-k sparse matrix in the CSR format.
Parameters
----------
a : tvm.contrib.sparse.CSRNDArray
2-D sparse matrix with shape [m, k]
b : tvm.te.Tensor
2-D dense matrix with shape [k, n]
c : tvm.te.Tensor, optional
1-D dense vector with shape [n]
Returns
-------
output : tvm.te.Tensor
2-D with shape [m, n]
"""
return csrmm_default(a.data, a.indices, a.indptr, b, c)
| apache-2.0 |
rednaxelafx/apache-spark | resource-managers/kubernetes/integration-tests/tests/worker_memory_check.py | 26 | 1585 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import resource
import sys
from pyspark.sql import SparkSession
if __name__ == "__main__":
"""
Usage: worker_memory_check [Memory_in_Mi]
"""
spark = SparkSession \
.builder \
.appName("PyMemoryTest") \
.getOrCreate()
sc = spark.sparkContext
if len(sys.argv) < 2:
print("Usage: worker_memory_check [Memory_in_Mi]", file=sys.stderr)
sys.exit(-1)
def f(x):
rLimit = resource.getrlimit(resource.RLIMIT_AS)
print("RLimit is " + str(rLimit))
return rLimit
resourceValue = sc.parallelize([1]).map(f).collect()[0][0]
print("Resource Value is " + str(resourceValue))
truthCheck = (resourceValue == int(sys.argv[1]))
print("PySpark Worker Memory Check is: " + str(truthCheck))
spark.stop()
| apache-2.0 |
GermanRuizMarcos/Classical-Composer-Classification | code_10_1/classification.py | 1 | 30838 | '''
AUDIO CLASSICAL COMPOSER IDENTIFICATION BASED ON:
A SPECTRAL BANDWISE FEATURE-BASED SYSTEM
'''
import essentia
from essentia.standard import *
import glob
import numpy as np
import arff
from scipy import stats
import collections
import cv2
import matplotlib
import matplotlib.pyplot as plt
#### gabor filters
def build_filters():
filters = []
ksize = 31
for theta in np.arange(0, np.pi, np.pi / 16):
kern = cv2.getGaborKernel((ksize, ksize), 4.0, theta, 10.0, 0.5, 0, ktype=cv2.CV_32F)
kern /= 1.5*kern.sum()
filters.append(kern)
return filters
def process(img, filters):
accum = np.zeros_like(img)
for kern in filters:
fimg = cv2.filter2D(img, cv2.CV_8UC3, kern)
np.maximum(accum, fimg, accum)
return accum
###
# Dataset creation with specific attributes (spectral features) and a specific class (composer's name)
'''
Audio files trasformed into the frequency domain through a 1024-sample STFT with 50% overlap.
The spectrum is divided into 50 mel-spaced bands.
'''
dirList = glob.glob("/home/usuario/Escritorio/SMC/2 term/Music Information Retrieval/Classical Composer Identification/datasets/bach/*.wav")
fft = FFT()
melbands = MelBands(numberBands = 50)
flatness = FlatnessDB()
rolloff = RollOff()
centroid = SpectralCentroidTime()
flux = Flux()
energy = EnergyBand()
zero = ZeroCrossingRate()
spectrum = Spectrum()
w = Windowing(type = 'hann')
mfcc = MFCC()
silence = SilenceRate(thresholds = [0.01])
f = open('definitive_train.txt', 'wb')
f.write('@RELATION "composer dataset"\n')
f.write('\n')
f.write('@ATTRIBUTE filename STRING\n')
f.write('@ATTRIBUTE MFCC-0 REAL\n')
f.write('@ATTRIBUTE MFCC-1 REAL\n')
f.write('@ATTRIBUTE MFCC-2 REAL\n')
f.write('@ATTRIBUTE MFCC-3 REAL\n')
f.write('@ATTRIBUTE MFCC-4 REAL\n')
f.write('@ATTRIBUTE MFCC-5 REAL\n')
f.write('@ATTRIBUTE MFCC-6 REAL\n')
f.write('@ATTRIBUTE MFCC-7 REAL\n')
f.write('@ATTRIBUTE MFCC-8 REAL\n')
f.write('@ATTRIBUTE MFCC-9 REAL\n')
f.write('@ATTRIBUTE MFCC-10 REAL\n')
f.write('@ATTRIBUTE MFCC-11 REAL\n')
f.write('@ATTRIBUTE MFCC-12 REAL\n')
f.write('@ATTRIBUTE flatness-mean REAL\n')
f.write('@ATTRIBUTE flatness-variance REAL\n')
f.write('@ATTRIBUTE rolloff-mean REAL\n')
f.write('@ATTRIBUTE rolloff-variance REAL\n')
f.write('@ATTRIBUTE centroid-mean REAL\n')
f.write('@ATTRIBUTE centroid-variance REAL\n')
f.write('@ATTRIBUTE flux-mean REAL\n')
f.write('@ATTRIBUTE flux-variance REAL\n')
f.write('@ATTRIBUTE energy-mean REAL\n')
f.write('@ATTRIBUTE energy-variance REAL\n')
f.write('@ATTRIBUTE ZCR-mean REAL\n')
f.write('@ATTRIBUTE ZCR-variance REAL\n')
f.write('@ATTRIBUTE flatness-std REAL\n')
f.write('@ATTRIBUTE flatness-hmean REAL\n')
f.write('@ATTRIBUTE silences REAL\n')
f.write('@ATTRIBUTE gaborfilter-mean REAL\n')
f.write('@ATTRIBUTE gaborfilter-variance REAL\n')
f.write('@ATTRIBUTE composer {bach, beethoven, chopin, haydn, liszt, mendelssohn, mozart, vivaldi}\n')
f.write('\n')
f.write('@DATA\n')
dirimg = '/home/usuario/Escritorio/SMC/2 term/Music Information Retrieval/Classical Composer Identification/code_10/pictures/bach'
dirname = str(dirimg) +'/*.png'
piclist = glob.glob(dirname)
counter = 0
for audio_file in dirList:
# Selecting the expectrogram
for item in piclist:
if item.split('/')[-1].split('.')[0] == audio_file.split('/')[-1].split('.')[0]:
picname = str(dirimg)+'/'+str(audio_file.split('/')[-1].split('.')[0]) + '.png'
flat = []
rol = []
cen = []
flu = []
ene = []
zer = []
mfccs = []
stft = []
sil = []
mean_counter = []
# Loading audio
audio = MonoLoader(filename = audio_file)()
# Features extraction
for frame in FrameGenerator(audio, frameSize = 1024, hopSize = 512, startFromZero=True):
bands = melbands(spectrum(frame))
stft.append(fft(frame))
flat.append(flatness(bands))
rol.append(rolloff(bands))
cen.append(centroid(bands))
flu.append(flux(bands))
ene.append(energy(bands))
zer.append(zero(frame))
mfcc_bands, mfcc_coeffs = mfcc(spectrum(w(frame)))
mfccs.append(mfcc_coeffs)
sil.append(silence(frame))
rate = collections.Counter()
rate.update(sil)
rate = rate.most_common(1)
composer = 'bach'
# Gabor filter analysis
if __name__ == '__main__':
import sys
print __doc__
try:
img_fn = sys.argv[1]
except:
img_fn = picname
img = cv2.imread(img_fn)
if img is None:
print 'Failed to load image file:', img_fn
sys.exit(1)
filters = build_filters()
res1 = process(img, filters)
for i in range(len(res1)-1):
for j in range(len(res1[i])-1):
mean_counter.append(np.mean(res1[i][j]))
f.write('%s' %audio_file.split('/')[-1].split('.')[0].split('bach')[0])
f.write(',')
f.write('%r' %np.mean(mfccs[0]))
f.write(',')
f.write('%r' %np.mean(mfccs[1]))
f.write(',')
f.write('%r' %np.mean(mfccs[2]))
f.write(',')
f.write('%r' %np.mean(mfccs[3]))
f.write(',')
f.write('%r' %np.mean(mfccs[4]))
f.write(',')
f.write('%r' %np.mean(mfccs[5]))
f.write(',')
f.write('%r' %np.mean(mfccs[6]))
f.write(',')
f.write('%r' %np.mean(mfccs[7]))
f.write(',')
f.write('%r' %np.mean(mfccs[8]))
f.write(',')
f.write('%r' %np.mean(mfccs[9]))
f.write(',')
f.write('%r' %np.mean(mfccs[10]))
f.write(',')
f.write('%r' %np.mean(mfccs[11]))
f.write(',')
f.write('%r' %np.mean(mfccs[12]))
f.write(',')
f.write('%r' %np.mean(flat))
f.write(',')
f.write('%r' %np.var(flat))
f.write(',')
f.write('%r' %np.mean(rol))
f.write(',')
f.write('%r' %np.var(rol))
f.write(',')
f.write('%r' %np.mean(cen))
f.write(',')
f.write('%r' %np.var(cen))
f.write(',')
f.write('%r' %np.mean(flu))
f.write(',')
f.write('%r' %np.var(flu))
f.write(',')
f.write('%r' %np.mean(ene))
f.write(',')
f.write('%r' %np.var(ene))
f.write(',')
f.write('%r' %np.mean(zer))
f.write(',')
f.write('%r' %np.var(zer))
f.write(',')
f.write('%r' %np.std(flat))
f.write(',')
f.write('%r' %stats.hmean(flat))
f.write(',')
f.write('%r' %rate[0][1])
f.write(',')
f.write('%r' %np.var(mean_counter))
f.write(',')
f.write('%r' %np.std(mean_counter))
f.write(',')
f.write('%s' %composer)
f.write('\n')
counter += 1
# 2
dirList = glob.glob("/home/usuario/Escritorio/SMC/2 term/Music Information Retrieval/Classical Composer Identification/datasets/beethoven/*.wav")
dirimg = '/home/usuario/Escritorio/SMC/2 term/Music Information Retrieval/Classical Composer Identification/code_10/pictures/beethoven'
dirname = str(dirimg) +'/*.png'
piclist = glob.glob(dirname)
counter = 0
for audio_file in dirList:
# Selecting the expectrogram
for item in piclist:
if item.split('/')[-1].split('.')[0] == audio_file.split('/')[-1].split('.')[0]:
picname = str(dirimg)+'/'+str(audio_file.split('/')[-1].split('.')[0]) + '.png'
flat = []
rol = []
cen = []
flu = []
ene = []
zer = []
mfccs = []
stft = []
sil = []
mean_counter = []
# Loading audio
audio = MonoLoader(filename = audio_file)()
# Features extraction
for frame in FrameGenerator(audio, frameSize = 1024, hopSize = 512, startFromZero=True):
bands = melbands(spectrum(frame))
stft.append(fft(frame))
flat.append(flatness(bands))
rol.append(rolloff(bands))
cen.append(centroid(bands))
flu.append(flux(bands))
ene.append(energy(bands))
zer.append(zero(frame))
mfcc_bands, mfcc_coeffs = mfcc(spectrum(w(frame)))
mfccs.append(mfcc_coeffs)
sil.append(silence(frame))
rate = collections.Counter()
rate.update(sil)
rate = rate.most_common(1)
composer = 'beethoven'
# Gabor filter analysis
if __name__ == '__main__':
import sys
print __doc__
try:
img_fn = sys.argv[1]
except:
img_fn = picname
img = cv2.imread(img_fn)
if img is None:
print 'Failed to load image file:', img_fn
sys.exit(1)
filters = build_filters()
res1 = process(img, filters)
for i in range(len(res1)-1):
for j in range(len(res1[i])-1):
mean_counter.append(np.mean(res1[i][j]))
f.write('%s' %audio_file.split('/')[-1].split('.')[0].split('beethoven')[0])
f.write(',')
f.write('%r' %np.mean(mfccs[0]))
f.write(',')
f.write('%r' %np.mean(mfccs[1]))
f.write(',')
f.write('%r' %np.mean(mfccs[2]))
f.write(',')
f.write('%r' %np.mean(mfccs[3]))
f.write(',')
f.write('%r' %np.mean(mfccs[4]))
f.write(',')
f.write('%r' %np.mean(mfccs[5]))
f.write(',')
f.write('%r' %np.mean(mfccs[6]))
f.write(',')
f.write('%r' %np.mean(mfccs[7]))
f.write(',')
f.write('%r' %np.mean(mfccs[8]))
f.write(',')
f.write('%r' %np.mean(mfccs[9]))
f.write(',')
f.write('%r' %np.mean(mfccs[10]))
f.write(',')
f.write('%r' %np.mean(mfccs[11]))
f.write(',')
f.write('%r' %np.mean(mfccs[12]))
f.write(',')
f.write('%r' %np.mean(flat))
f.write(',')
f.write('%r' %np.var(flat))
f.write(',')
f.write('%r' %np.mean(rol))
f.write(',')
f.write('%r' %np.var(rol))
f.write(',')
f.write('%r' %np.mean(cen))
f.write(',')
f.write('%r' %np.var(cen))
f.write(',')
f.write('%r' %np.mean(flu))
f.write(',')
f.write('%r' %np.var(flu))
f.write(',')
f.write('%r' %np.mean(ene))
f.write(',')
f.write('%r' %np.var(ene))
f.write(',')
f.write('%r' %np.mean(zer))
f.write(',')
f.write('%r' %np.var(zer))
f.write(',')
f.write('%r' %np.std(flat))
f.write(',')
f.write('%r' %stats.hmean(flat))
f.write(',')
f.write('%r' %rate[0][1])
f.write(',')
f.write('%r' %np.var(mean_counter))
f.write(',')
f.write('%r' %np.std(mean_counter))
f.write(',')
f.write('%s' %composer)
f.write('\n')
counter += 1
# 3
dirList = glob.glob("/home/usuario/Escritorio/SMC/2 term/Music Information Retrieval/Classical Composer Identification/datasets/chopin/*.wav")
dirimg = '/home/usuario/Escritorio/SMC/2 term/Music Information Retrieval/Classical Composer Identification/code_10/pictures/chopin'
dirname = str(dirimg) +'/*.png'
piclist = glob.glob(dirname)
counter = 0
for audio_file in dirList:
# Selecting the expectrogram
for item in piclist:
if item.split('/')[-1].split('.')[0] == audio_file.split('/')[-1].split('.')[0]:
picname = str(dirimg)+'/'+str(audio_file.split('/')[-1].split('.')[0]) + '.png'
flat = []
rol = []
cen = []
flu = []
ene = []
zer = []
mfccs = []
stft = []
sil = []
mean_counter = []
# Loading audio
audio = MonoLoader(filename = audio_file)()
# Features extraction
for frame in FrameGenerator(audio, frameSize = 1024, hopSize = 512, startFromZero=True):
bands = melbands(spectrum(frame))
stft.append(fft(frame))
flat.append(flatness(bands))
rol.append(rolloff(bands))
cen.append(centroid(bands))
flu.append(flux(bands))
ene.append(energy(bands))
zer.append(zero(frame))
mfcc_bands, mfcc_coeffs = mfcc(spectrum(w(frame)))
mfccs.append(mfcc_coeffs)
sil.append(silence(frame))
rate = collections.Counter()
rate.update(sil)
rate = rate.most_common(1)
composer = 'chopin'
# Gabor filter analysis
if __name__ == '__main__':
import sys
print __doc__
try:
img_fn = sys.argv[1]
except:
img_fn = picname
img = cv2.imread(img_fn)
if img is None:
print 'Failed to load image file:', img_fn
sys.exit(1)
filters = build_filters()
res1 = process(img, filters)
for i in range(len(res1)-1):
for j in range(len(res1[i])-1):
mean_counter.append(np.mean(res1[i][j]))
f.write('%s' %audio_file.split('/')[-1].split('.')[0].split('chopin')[0])
f.write(',')
f.write('%r' %np.mean(mfccs[0]))
f.write(',')
f.write('%r' %np.mean(mfccs[1]))
f.write(',')
f.write('%r' %np.mean(mfccs[2]))
f.write(',')
f.write('%r' %np.mean(mfccs[3]))
f.write(',')
f.write('%r' %np.mean(mfccs[4]))
f.write(',')
f.write('%r' %np.mean(mfccs[5]))
f.write(',')
f.write('%r' %np.mean(mfccs[6]))
f.write(',')
f.write('%r' %np.mean(mfccs[7]))
f.write(',')
f.write('%r' %np.mean(mfccs[8]))
f.write(',')
f.write('%r' %np.mean(mfccs[9]))
f.write(',')
f.write('%r' %np.mean(mfccs[10]))
f.write(',')
f.write('%r' %np.mean(mfccs[11]))
f.write(',')
f.write('%r' %np.mean(mfccs[12]))
f.write(',')
f.write('%r' %np.mean(flat))
f.write(',')
f.write('%r' %np.var(flat))
f.write(',')
f.write('%r' %np.mean(rol))
f.write(',')
f.write('%r' %np.var(rol))
f.write(',')
f.write('%r' %np.mean(cen))
f.write(',')
f.write('%r' %np.var(cen))
f.write(',')
f.write('%r' %np.mean(flu))
f.write(',')
f.write('%r' %np.var(flu))
f.write(',')
f.write('%r' %np.mean(ene))
f.write(',')
f.write('%r' %np.var(ene))
f.write(',')
f.write('%r' %np.mean(zer))
f.write(',')
f.write('%r' %np.var(zer))
f.write(',')
f.write('%r' %np.std(flat))
f.write(',')
f.write('%r' %stats.hmean(flat))
f.write(',')
f.write('%r' %rate[0][1])
f.write(',')
f.write('%r' %np.var(mean_counter))
f.write(',')
f.write('%r' %np.std(mean_counter))
f.write(',')
f.write('%s' %composer)
f.write('\n')
counter += 1
# 4
dirList = glob.glob("/home/usuario/Escritorio/SMC/2 term/Music Information Retrieval/Classical Composer Identification/datasets/haydn/*.wav")
dirimg = '/home/usuario/Escritorio/SMC/2 term/Music Information Retrieval/Classical Composer Identification/code_10/pictures/haydn'
dirname = str(dirimg) +'/*.png'
piclist = glob.glob(dirname)
counter = 0
for audio_file in dirList:
# Selecting the expectrogram
for item in piclist:
if item.split('/')[-1].split('.')[0] == audio_file.split('/')[-1].split('.')[0]:
picname = str(dirimg)+'/'+str(audio_file.split('/')[-1].split('.')[0]) + '.png'
flat = []
rol = []
cen = []
flu = []
ene = []
zer = []
mfccs = []
stft = []
sil = []
mean_counter = []
# Loading audio
audio = MonoLoader(filename = audio_file)()
# Features extraction
for frame in FrameGenerator(audio, frameSize = 1024, hopSize = 512, startFromZero=True):
bands = melbands(spectrum(frame))
stft.append(fft(frame))
flat.append(flatness(bands))
rol.append(rolloff(bands))
cen.append(centroid(bands))
flu.append(flux(bands))
ene.append(energy(bands))
zer.append(zero(frame))
mfcc_bands, mfcc_coeffs = mfcc(spectrum(w(frame)))
mfccs.append(mfcc_coeffs)
sil.append(silence(frame))
rate = collections.Counter()
rate.update(sil)
rate = rate.most_common(1)
composer = 'haydn'
# Gabor filter analysis
if __name__ == '__main__':
import sys
print __doc__
try:
img_fn = sys.argv[1]
except:
img_fn = picname
img = cv2.imread(img_fn)
if img is None:
print 'Failed to load image file:', img_fn
sys.exit(1)
filters = build_filters()
res1 = process(img, filters)
for i in range(len(res1)-1):
for j in range(len(res1[i])-1):
mean_counter.append(np.mean(res1[i][j]))
f.write('%s' %audio_file.split('/')[-1].split('.')[0].split('haydn')[0])
f.write(',')
f.write('%r' %np.mean(mfccs[0]))
f.write(',')
f.write('%r' %np.mean(mfccs[1]))
f.write(',')
f.write('%r' %np.mean(mfccs[2]))
f.write(',')
f.write('%r' %np.mean(mfccs[3]))
f.write(',')
f.write('%r' %np.mean(mfccs[4]))
f.write(',')
f.write('%r' %np.mean(mfccs[5]))
f.write(',')
f.write('%r' %np.mean(mfccs[6]))
f.write(',')
f.write('%r' %np.mean(mfccs[7]))
f.write(',')
f.write('%r' %np.mean(mfccs[8]))
f.write(',')
f.write('%r' %np.mean(mfccs[9]))
f.write(',')
f.write('%r' %np.mean(mfccs[10]))
f.write(',')
f.write('%r' %np.mean(mfccs[11]))
f.write(',')
f.write('%r' %np.mean(mfccs[12]))
f.write(',')
f.write('%r' %np.mean(flat))
f.write(',')
f.write('%r' %np.var(flat))
f.write(',')
f.write('%r' %np.mean(rol))
f.write(',')
f.write('%r' %np.var(rol))
f.write(',')
f.write('%r' %np.mean(cen))
f.write(',')
f.write('%r' %np.var(cen))
f.write(',')
f.write('%r' %np.mean(flu))
f.write(',')
f.write('%r' %np.var(flu))
f.write(',')
f.write('%r' %np.mean(ene))
f.write(',')
f.write('%r' %np.var(ene))
f.write(',')
f.write('%r' %np.mean(zer))
f.write(',')
f.write('%r' %np.var(zer))
f.write(',')
f.write('%r' %np.std(flat))
f.write(',')
f.write('%r' %stats.hmean(flat))
f.write(',')
f.write('%r' %rate[0][1])
f.write(',')
f.write('%r' %np.var(mean_counter))
f.write(',')
f.write('%r' %np.std(mean_counter))
f.write(',')
f.write('%s' %composer)
f.write('\n')
counter += 1
'''
# 5
dirList = glob.glob("/home/usuario/Escritorio/SMC/2 term/Music Information Retrieval/Classical Composer Identification/datasets/liszt/*.wav")
dirimg = '/home/usuario/Escritorio/SMC/2 term/Music Information Retrieval/Classical Composer Identification/code_10/pictures/liszt'
dirname = str(dirimg) +'/*.png'
piclist = glob.glob(dirname)
counter = 0
for audio_file in dirList:
# Selecting the expectrogram
for item in piclist:
if item.split('/')[-1].split('.')[0] == audio_file.split('/')[-1].split('.')[0]:
picname = str(dirimg)+'/'+str(audio_file.split('/')[-1].split('.')[0]) + '.png'
flat = []
rol = []
cen = []
flu = []
ene = []
zer = []
mfccs = []
stft = []
sil = []
mean_counter = []
# Loading audio
audio = MonoLoader(filename = audio_file)()
# Features extraction
for frame in FrameGenerator(audio, frameSize = 1024, hopSize = 512, startFromZero=True):
bands = melbands(spectrum(frame))
stft.append(fft(frame))
flat.append(flatness(bands))
rol.append(rolloff(bands))
cen.append(centroid(bands))
flu.append(flux(bands))
ene.append(energy(bands))
zer.append(zero(frame))
mfcc_bands, mfcc_coeffs = mfcc(spectrum(w(frame)))
mfccs.append(mfcc_coeffs)
sil.append(silence(frame))
rate = collections.Counter()
rate.update(sil)
rate = rate.most_common(1)
composer = 'liszt'
# Gabor filter analysis
if __name__ == '__main__':
import sys
print __doc__
try:
img_fn = sys.argv[1]
except:
img_fn = picname
img = cv2.imread(img_fn)
if img is None:
print 'Failed to load image file:', img_fn
sys.exit(1)
filters = build_filters()
res1 = process(img, filters)
for i in range(len(res1)-1):
for j in range(len(res1[i])-1):
mean_counter.append(np.mean(res1[i][j]))
'''
f.write('%s' %audio_file.split('/')[-1].split('.')[0].split('liszt')[0])
f.write(',')
f.write('%r' %np.mean(mfccs[0]))
f.write(',')
f.write('%r' %np.mean(mfccs[1]))
f.write(',')
f.write('%r' %np.mean(mfccs[2]))
f.write(',')
f.write('%r' %np.mean(mfccs[3]))
f.write(',')
f.write('%r' %np.mean(mfccs[4]))
f.write(',')
f.write('%r' %np.mean(mfccs[5]))
f.write(',')
f.write('%r' %np.mean(mfccs[6]))
f.write(',')
f.write('%r' %np.mean(mfccs[7]))
f.write(',')
f.write('%r' %np.mean(mfccs[8]))
f.write(',')
f.write('%r' %np.mean(mfccs[9]))
f.write(',')
f.write('%r' %np.mean(mfccs[10]))
f.write(',')
f.write('%r' %np.mean(mfccs[11]))
f.write(',')
f.write('%r' %np.mean(mfccs[12]))
f.write(',')
f.write('%r' %np.mean(flat))
f.write(',')
f.write('%r' %np.var(flat))
f.write(',')
f.write('%r' %np.mean(rol))
f.write(',')
f.write('%r' %np.var(rol))
f.write(',')
f.write('%r' %np.mean(cen))
f.write(',')
f.write('%r' %np.var(cen))
f.write(',')
f.write('%r' %np.mean(flu))
f.write(',')
f.write('%r' %np.var(flu))
f.write(',')
f.write('%r' %np.mean(ene))
f.write(',')
f.write('%r' %np.var(ene))
f.write(',')
f.write('%r' %np.mean(zer))
f.write(',')
f.write('%r' %np.var(zer))
f.write(',')
f.write('%r' %np.std(flat))
f.write(',')
f.write('%r' %stats.hmean(flat))
f.write(',')
f.write('%r' %rate[0][1])
f.write(',')
f.write('%r' %np.var(mean_counter))
f.write(',')
f.write('%r' %np.std(mean_counter))
f.write(',')
f.write('%s' %composer)
f.write('\n')
counter += 1
# 6
dirList = glob.glob("/home/usuario/Escritorio/SMC/2 term/Music Information Retrieval/Classical Composer Identification/datasets/mendelssohn/*.wav")
dirimg = '/home/usuario/Escritorio/SMC/2 term/Music Information Retrieval/Classical Composer Identification/code_10/pictures/mendelssohn'
dirname = str(dirimg) +'/*.png'
piclist = glob.glob(dirname)
counter = 0
for audio_file in dirList:
# Selecting the expectrogram
for item in piclist:
if item.split('/')[-1].split('.')[0] == audio_file.split('/')[-1].split('.')[0]:
picname = str(dirimg)+'/'+str(audio_file.split('/')[-1].split('.')[0]) + '.png'
flat = []
rol = []
cen = []
flu = []
ene = []
zer = []
mfccs = []
stft = []
sil = []
mean_counter = []
# Loading audio
audio = MonoLoader(filename = audio_file)()
# Features extraction
for frame in FrameGenerator(audio, frameSize = 1024, hopSize = 512, startFromZero=True):
bands = melbands(spectrum(frame))
stft.append(fft(frame))
flat.append(flatness(bands))
rol.append(rolloff(bands))
cen.append(centroid(bands))
flu.append(flux(bands))
ene.append(energy(bands))
zer.append(zero(frame))
mfcc_bands, mfcc_coeffs = mfcc(spectrum(w(frame)))
mfccs.append(mfcc_coeffs)
sil.append(silence(frame))
rate = collections.Counter()
rate.update(sil)
rate = rate.most_common(1)
composer = 'mendelssohn'
# Gabor filter analysis
if __name__ == '__main__':
import sys
print __doc__
try:
img_fn = sys.argv[1]
except:
img_fn = picname
img = cv2.imread(img_fn)
if img is None:
print 'Failed to load image file:', img_fn
sys.exit(1)
filters = build_filters()
res1 = process(img, filters)
for i in range(len(res1)-1):
for j in range(len(res1[i])-1):
mean_counter.append(np.mean(res1[i][j]))
f.write('%s' %audio_file.split('/')[-1].split('.')[0].split('mendelssohn')[0])
f.write(',')
f.write('%r' %np.mean(mfccs[0]))
f.write(',')
f.write('%r' %np.mean(mfccs[1]))
f.write(',')
f.write('%r' %np.mean(mfccs[2]))
f.write(',')
f.write('%r' %np.mean(mfccs[3]))
f.write(',')
f.write('%r' %np.mean(mfccs[4]))
f.write(',')
f.write('%r' %np.mean(mfccs[5]))
f.write(',')
f.write('%r' %np.mean(mfccs[6]))
f.write(',')
f.write('%r' %np.mean(mfccs[7]))
f.write(',')
f.write('%r' %np.mean(mfccs[8]))
f.write(',')
f.write('%r' %np.mean(mfccs[9]))
f.write(',')
f.write('%r' %np.mean(mfccs[10]))
f.write(',')
f.write('%r' %np.mean(mfccs[11]))
f.write(',')
f.write('%r' %np.mean(mfccs[12]))
f.write(',')
f.write('%r' %np.mean(flat))
f.write(',')
f.write('%r' %np.var(flat))
f.write(',')
f.write('%r' %np.mean(rol))
f.write(',')
f.write('%r' %np.var(rol))
f.write(',')
f.write('%r' %np.mean(cen))
f.write(',')
f.write('%r' %np.var(cen))
f.write(',')
f.write('%r' %np.mean(flu))
f.write(',')
f.write('%r' %np.var(flu))
f.write(',')
f.write('%r' %np.mean(ene))
f.write(',')
f.write('%r' %np.var(ene))
f.write(',')
f.write('%r' %np.mean(zer))
f.write(',')
f.write('%r' %np.var(zer))
f.write(',')
f.write('%r' %np.std(flat))
f.write(',')
f.write('%r' %stats.hmean(flat))
f.write(',')
f.write('%r' %rate[0][1])
f.write(',')
f.write('%r' %np.var(mean_counter))
f.write(',')
f.write('%r' %np.std(mean_counter))
f.write(',')
f.write('%s' %composer)
f.write('\n')
counter += 1
# 7
dirList = glob.glob("/home/usuario/Escritorio/SMC/2 term/Music Information Retrieval/Classical Composer Identification/datasets/mozart/*.wav")
dirimg = '/home/usuario/Escritorio/SMC/2 term/Music Information Retrieval/Classical Composer Identification/code_10/pictures/mozart'
dirname = str(dirimg) +'/*.png'
piclist = glob.glob(dirname)
counter = 0
for audio_file in dirList:
# Selecting the expectrogram
for item in piclist:
if item.split('/')[-1].split('.')[0] == audio_file.split('/')[-1].split('.')[0]:
picname = str(dirimg)+'/'+str(audio_file.split('/')[-1].split('.')[0]) + '.png'
flat = []
rol = []
cen = []
flu = []
ene = []
zer = []
mfccs = []
stft = []
sil = []
mean_counter = []
# Loading audio
audio = MonoLoader(filename = audio_file)()
# Features extraction
for frame in FrameGenerator(audio, frameSize = 1024, hopSize = 512, startFromZero=True):
bands = melbands(spectrum(frame))
stft.append(fft(frame))
flat.append(flatness(bands))
rol.append(rolloff(bands))
cen.append(centroid(bands))
flu.append(flux(bands))
ene.append(energy(bands))
zer.append(zero(frame))
mfcc_bands, mfcc_coeffs = mfcc(spectrum(w(frame)))
mfccs.append(mfcc_coeffs)
sil.append(silence(frame))
rate = collections.Counter()
rate.update(sil)
rate = rate.most_common(1)
composer = 'mozart'
# Gabor filter analysis
if __name__ == '__main__':
import sys
print __doc__
try:
img_fn = sys.argv[1]
except:
img_fn = picname
img = cv2.imread(img_fn)
if img is None:
print 'Failed to load image file:', img_fn
sys.exit(1)
filters = build_filters()
res1 = process(img, filters)
for i in range(len(res1)-1):
for j in range(len(res1[i])-1):
mean_counter.append(np.mean(res1[i][j]))
f.write('%s' %audio_file.split('/')[-1].split('.')[0].split('mozart')[0])
f.write(',')
f.write('%r' %np.mean(mfccs[0]))
f.write(',')
f.write('%r' %np.mean(mfccs[1]))
f.write(',')
f.write('%r' %np.mean(mfccs[2]))
f.write(',')
f.write('%r' %np.mean(mfccs[3]))
f.write(',')
f.write('%r' %np.mean(mfccs[4]))
f.write(',')
f.write('%r' %np.mean(mfccs[5]))
f.write(',')
f.write('%r' %np.mean(mfccs[6]))
f.write(',')
f.write('%r' %np.mean(mfccs[7]))
f.write(',')
f.write('%r' %np.mean(mfccs[8]))
f.write(',')
f.write('%r' %np.mean(mfccs[9]))
f.write(',')
f.write('%r' %np.mean(mfccs[10]))
f.write(',')
f.write('%r' %np.mean(mfccs[11]))
f.write(',')
f.write('%r' %np.mean(mfccs[12]))
f.write(',')
f.write('%r' %np.mean(flat))
f.write(',')
f.write('%r' %np.var(flat))
f.write(',')
f.write('%r' %np.mean(rol))
f.write(',')
f.write('%r' %np.var(rol))
f.write(',')
f.write('%r' %np.mean(cen))
f.write(',')
f.write('%r' %np.var(cen))
f.write(',')
f.write('%r' %np.mean(flu))
f.write(',')
f.write('%r' %np.var(flu))
f.write(',')
f.write('%r' %np.mean(ene))
f.write(',')
f.write('%r' %np.var(ene))
f.write(',')
f.write('%r' %np.mean(zer))
f.write(',')
f.write('%r' %np.var(zer))
f.write(',')
f.write('%r' %np.std(flat))
f.write(',')
f.write('%r' %stats.hmean(flat))
f.write(',')
f.write('%r' %rate[0][1])
f.write(',')
f.write('%r' %np.var(mean_counter))
f.write(',')
f.write('%r' %np.std(mean_counter))
f.write(',')
f.write('%s' %composer)
f.write('\n')
counter += 1
# 8
dirList = glob.glob("/home/usuario/Escritorio/SMC/2 term/Music Information Retrieval/Classical Composer Identification/datasets/vivaldi/*.wav")
dirimg = '/home/usuario/Escritorio/SMC/2 term/Music Information Retrieval/Classical Composer Identification/code_10/pictures/vivaldi'
dirname = str(dirimg) +'/*.png'
piclist = glob.glob(dirname)
counter = 0
for audio_file in dirList:
# Selecting the expectrogram
for item in piclist:
if item.split('/')[-1].split('.')[0] == audio_file.split('/')[-1].split('.')[0]:
picname = str(dirimg)+'/'+str(audio_file.split('/')[-1].split('.')[0]) + '.png'
flat = []
rol = []
cen = []
flu = []
ene = []
zer = []
mfccs = []
stft = []
sil = []
mean_counter = []
# Loading audio
audio = MonoLoader(filename = audio_file)()
# Features extraction
for frame in FrameGenerator(audio, frameSize = 1024, hopSize = 512, startFromZero=True):
bands = melbands(spectrum(frame))
stft.append(fft(frame))
flat.append(flatness(bands))
rol.append(rolloff(bands))
cen.append(centroid(bands))
flu.append(flux(bands))
ene.append(energy(bands))
zer.append(zero(frame))
mfcc_bands, mfcc_coeffs = mfcc(spectrum(w(frame)))
mfccs.append(mfcc_coeffs)
sil.append(silence(frame))
rate = collections.Counter()
rate.update(sil)
rate = rate.most_common(1)
composer = 'vivaldi'
# Gabor filter analysis
if __name__ == '__main__':
import sys
print __doc__
try:
img_fn = sys.argv[1]
except:
img_fn = picname
img = cv2.imread(img_fn)
if img is None:
print 'Failed to load image file:', img_fn
sys.exit(1)
filters = build_filters()
res1 = process(img, filters)
for i in range(len(res1)-1):
for j in range(len(res1[i])-1):
mean_counter.append(np.mean(res1[i][j]))
f.write('%s' %audio_file.split('/')[-1].split('.')[0].split('vivaldi')[0])
f.write(',')
f.write('%r' %np.mean(mfccs[0]))
f.write(',')
f.write('%r' %np.mean(mfccs[1]))
f.write(',')
f.write('%r' %np.mean(mfccs[2]))
f.write(',')
f.write('%r' %np.mean(mfccs[3]))
f.write(',')
f.write('%r' %np.mean(mfccs[4]))
f.write(',')
f.write('%r' %np.mean(mfccs[5]))
f.write(',')
f.write('%r' %np.mean(mfccs[6]))
f.write(',')
f.write('%r' %np.mean(mfccs[7]))
f.write(',')
f.write('%r' %np.mean(mfccs[8]))
f.write(',')
f.write('%r' %np.mean(mfccs[9]))
f.write(',')
f.write('%r' %np.mean(mfccs[10]))
f.write(',')
f.write('%r' %np.mean(mfccs[11]))
f.write(',')
f.write('%r' %np.mean(mfccs[12]))
f.write(',')
f.write('%r' %np.mean(flat))
f.write(',')
f.write('%r' %np.var(flat))
f.write(',')
f.write('%r' %np.mean(rol))
f.write(',')
f.write('%r' %np.var(rol))
f.write(',')
f.write('%r' %np.mean(cen))
f.write(',')
f.write('%r' %np.var(cen))
f.write(',')
f.write('%r' %np.mean(flu))
f.write(',')
f.write('%r' %np.var(flu))
f.write(',')
f.write('%r' %np.mean(ene))
f.write(',')
f.write('%r' %np.var(ene))
f.write(',')
f.write('%r' %np.mean(zer))
f.write(',')
f.write('%r' %np.var(zer))
f.write(',')
f.write('%r' %np.std(flat))
f.write(',')
f.write('%r' %stats.hmean(flat))
f.write(',')
f.write('%r' %rate[0][1])
f.write(',')
f.write('%r' %np.var(mean_counter))
f.write(',')
f.write('%r' %np.std(mean_counter))
f.write(',')
f.write('%s' %composer)
f.write('\n')
counter += 1
f.write('%\n')
f.write('%\n')
f.write('%\n')
f.close()
| gpl-3.0 |
TRESCLOUD/odoo | addons/account_bank_statement_extensions/report/__init__.py | 415 | 1128 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
#
# Copyright (c) 2011 Noviat nv/sa (www.noviat.be). All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import bank_statement_balance_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
serviceagility/boto | boto/cognito/identity/layer1.py | 15 | 12034 | # Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import boto
from boto.compat import json
from boto.connection import AWSQueryConnection
from boto.regioninfo import RegionInfo
from boto.exception import JSONResponseError
from boto.cognito.identity import exceptions
class CognitoIdentityConnection(AWSQueryConnection):
"""
Amazon Cognito
Amazon Cognito is a web service that facilitates the delivery of
scoped, temporary credentials to mobile devices or other untrusted
environments. Amazon Cognito uniquely identifies a device or user
and supplies the user with a consistent identity throughout the
lifetime of an application.
Amazon Cognito lets users authenticate with third-party identity
providers (Facebook, Google, or Login with Amazon). As a
developer, you decide which identity providers to trust. You can
also choose to support unauthenticated access from your
application. Your users are provided with Cognito tokens that
uniquely identify their device and any information provided about
third-party logins.
"""
APIVersion = "2014-06-30"
DefaultRegionName = "us-east-1"
DefaultRegionEndpoint = "cognito-identity.us-east-1.amazonaws.com"
ServiceName = "CognitoIdentity"
TargetPrefix = "AWSCognitoIdentityService"
ResponseError = JSONResponseError
_faults = {
"LimitExceededException": exceptions.LimitExceededException,
"ResourceConflictException": exceptions.ResourceConflictException,
"TooManyRequestsException": exceptions.TooManyRequestsException,
"InvalidParameterException": exceptions.InvalidParameterException,
"ResourceNotFoundException": exceptions.ResourceNotFoundException,
"InternalErrorException": exceptions.InternalErrorException,
"NotAuthorizedException": exceptions.NotAuthorizedException,
}
def __init__(self, **kwargs):
region = kwargs.pop('region', None)
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
if 'host' not in kwargs or kwargs['host'] is None:
kwargs['host'] = region.endpoint
super(CognitoIdentityConnection, self).__init__(**kwargs)
self.region = region
def _required_auth_capability(self):
return ['hmac-v4']
def create_identity_pool(self, identity_pool_name,
allow_unauthenticated_identities,
supported_login_providers=None):
"""
Creates a new identity pool. The identity pool is a store of
user identity information that is specific to your AWS
account.
:type identity_pool_name: string
:param identity_pool_name: A string that you provide.
:type allow_unauthenticated_identities: boolean
:param allow_unauthenticated_identities: TRUE if the identity pool
supports unauthenticated logins.
:type supported_login_providers: map
:param supported_login_providers: Optional key:value pairs mapping
provider names to provider app IDs.
"""
params = {
'IdentityPoolName': identity_pool_name,
'AllowUnauthenticatedIdentities': allow_unauthenticated_identities,
}
if supported_login_providers is not None:
params['SupportedLoginProviders'] = supported_login_providers
return self.make_request(action='CreateIdentityPool',
body=json.dumps(params))
def delete_identity_pool(self, identity_pool_id):
"""
Deletes a user pool. Once a pool is deleted, users will not be
able to authenticate with the pool.
:type identity_pool_id: string
:param identity_pool_id: An identity pool ID in the format REGION:GUID.
"""
params = {'IdentityPoolId': identity_pool_id, }
return self.make_request(action='DeleteIdentityPool',
body=json.dumps(params))
def describe_identity_pool(self, identity_pool_id):
"""
Gets details about a particular identity pool, including the
pool name, ID description, creation date, and current number
of users.
:type identity_pool_id: string
:param identity_pool_id: An identity pool ID in the format REGION:GUID.
"""
params = {'IdentityPoolId': identity_pool_id, }
return self.make_request(action='DescribeIdentityPool',
body=json.dumps(params))
def get_id(self, account_id, identity_pool_id, logins=None):
"""
Generates (or retrieves) a Cognito ID. Supplying multiple
logins will create an implicit linked account.
:type account_id: string
:param account_id: A standard AWS account ID (9+ digits).
:type identity_pool_id: string
:param identity_pool_id: An identity pool ID in the format REGION:GUID.
:type logins: map
:param logins: A set of optional name/value pairs that map provider
names to provider tokens.
"""
params = {
'AccountId': account_id,
'IdentityPoolId': identity_pool_id,
}
if logins is not None:
params['Logins'] = logins
return self.make_request(action='GetId',
body=json.dumps(params))
def get_open_id_token(self, identity_id, logins=None):
"""
Gets an OpenID token, using a known Cognito ID. This known
Cognito ID is returned from GetId. You can optionally add
additional logins for the identity. Supplying multiple logins
creates an implicit link.
:type identity_id: string
:param identity_id: A unique identifier in the format REGION:GUID.
:type logins: map
:param logins: A set of optional name/value pairs that map provider
names to provider tokens.
"""
params = {'IdentityId': identity_id, }
if logins is not None:
params['Logins'] = logins
return self.make_request(action='GetOpenIdToken',
body=json.dumps(params))
def list_identities(self, identity_pool_id, max_results, next_token=None):
"""
Lists the identities in a pool.
:type identity_pool_id: string
:param identity_pool_id: An identity pool ID in the format REGION:GUID.
:type max_results: integer
:param max_results: The maximum number of identities to return.
:type next_token: string
:param next_token: A pagination token.
"""
params = {
'IdentityPoolId': identity_pool_id,
'MaxResults': max_results,
}
if next_token is not None:
params['NextToken'] = next_token
return self.make_request(action='ListIdentities',
body=json.dumps(params))
def list_identity_pools(self, max_results, next_token=None):
"""
Lists all of the Cognito identity pools registered for your
account.
:type max_results: integer
:param max_results: The maximum number of identities to return.
:type next_token: string
:param next_token: A pagination token.
"""
params = {'MaxResults': max_results, }
if next_token is not None:
params['NextToken'] = next_token
return self.make_request(action='ListIdentityPools',
body=json.dumps(params))
def unlink_identity(self, identity_id, logins, logins_to_remove):
"""
Unlinks a federated identity from an existing account.
Unlinked logins will be considered new identities next time
they are seen. Removing the last linked login will make this
identity inaccessible.
:type identity_id: string
:param identity_id: A unique identifier in the format REGION:GUID.
:type logins: map
:param logins: A set of optional name/value pairs that map provider
names to provider tokens.
:type logins_to_remove: list
:param logins_to_remove: Provider names to unlink from this identity.
"""
params = {
'IdentityId': identity_id,
'Logins': logins,
'LoginsToRemove': logins_to_remove,
}
return self.make_request(action='UnlinkIdentity',
body=json.dumps(params))
def update_identity_pool(self, identity_pool_id, identity_pool_name,
allow_unauthenticated_identities,
supported_login_providers=None):
"""
Updates a user pool.
:type identity_pool_id: string
:param identity_pool_id: An identity pool ID in the format REGION:GUID.
:type identity_pool_name: string
:param identity_pool_name: A string that you provide.
:type allow_unauthenticated_identities: boolean
:param allow_unauthenticated_identities: TRUE if the identity pool
supports unauthenticated logins.
:type supported_login_providers: map
:param supported_login_providers: Optional key:value pairs mapping
provider names to provider app IDs.
"""
params = {
'IdentityPoolId': identity_pool_id,
'IdentityPoolName': identity_pool_name,
'AllowUnauthenticatedIdentities': allow_unauthenticated_identities,
}
if supported_login_providers is not None:
params['SupportedLoginProviders'] = supported_login_providers
return self.make_request(action='UpdateIdentityPool',
body=json.dumps(params))
def make_request(self, action, body):
headers = {
'X-Amz-Target': '%s.%s' % (self.TargetPrefix, action),
'Host': self.region.endpoint,
'Content-Type': 'application/x-amz-json-1.1',
'Content-Length': str(len(body)),
}
http_request = self.build_base_http_request(
method='POST', path='/', auth_path='/', params={},
headers=headers, data=body)
response = self._mexe(http_request, sender=None,
override_num_retries=10)
response_body = response.read().decode('utf-8')
boto.log.debug(response_body)
if response.status == 200:
if response_body:
return json.loads(response_body)
else:
json_body = json.loads(response_body)
fault_name = json_body.get('__type', None)
exception_class = self._faults.get(fault_name, self.ResponseError)
raise exception_class(response.status, response.reason,
body=json_body)
| mit |
facelessuser/backrefs | tests/test_wordbreak.py | 1 | 2754 | """Test `Word Break`."""
import unittest
from backrefs import uniprops
import re
class TestWordBreak(unittest.TestCase):
"""Test `Word Break` access."""
def test_table_integrity(self):
"""Test that there is parity between Unicode and ASCII tables."""
re_key = re.compile(r'^\^?[a-z0-9./]+$')
keys1 = set(uniprops.unidata.unicode_word_break.keys())
keys2 = set(uniprops.unidata.ascii_word_break.keys())
# Ensure all keys are lowercase (only need to check Unicode as the ASCII keys must match the Unicode later)
for k in keys1:
self.assertTrue(re_key.match(k) is not None)
# Ensure the same keys are in both the Unicode table as the ASCII table
self.assertEqual(keys1, keys2)
# Ensure each positive key has an inverse key
for key in keys1:
if not key.startswith('^'):
self.assertTrue('^' + key in keys1)
def test_wordbreak(self):
"""Test `Word Break` properties."""
for k, v in uniprops.unidata.unicode_word_break.items():
result = uniprops.get_unicode_property('wordbreak', k)
self.assertEqual(result, v)
def test_wordbreak_ascii(self):
"""Test `Word Break` ASCII properties."""
for k, v in uniprops.unidata.ascii_word_break.items():
result = uniprops.get_unicode_property('wordbreak', k, mode=uniprops.MODE_NORMAL)
self.assertEqual(result, v)
def test_wordbreak_binary(self):
"""Test `Word Break` ASCII properties."""
for k, v in uniprops.unidata.ascii_word_break.items():
result = uniprops.get_unicode_property('wordbreak', k, mode=uniprops.MODE_ASCII)
self.assertEqual(result, uniprops.fmt_string(v, True))
def test_bad_wordbreak(self):
"""Test `Word Break` property with bad value."""
with self.assertRaises(ValueError):
uniprops.get_unicode_property('wordbreak', 'bad')
def test_alias(self):
"""Test aliases."""
alias = None
for k, v in uniprops.unidata.alias.unicode_alias['_'].items():
if v == 'wordbreak':
alias = k
break
self.assertTrue(alias is not None)
# Ensure alias works
for k, v in uniprops.unidata.unicode_word_break.items():
result = uniprops.get_unicode_property(alias, k)
self.assertEqual(result, v)
break
# Test aliases for values
for k, v in uniprops.unidata.alias.unicode_alias['wordbreak'].items():
result1 = uniprops.get_unicode_property(alias, k)
result2 = uniprops.get_unicode_property(alias, v)
self.assertEqual(result1, result2)
| mit |
Shelnutt2/android_kernel_lge_gee_3.4 | tools/perf/scripts/python/futex-contention.py | 11261 | 1486 | # futex contention
# (c) 2010, Arnaldo Carvalho de Melo <acme@redhat.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Translation of:
#
# http://sourceware.org/systemtap/wiki/WSFutexContention
#
# to perf python scripting.
#
# Measures futex contention
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Util import *
process_names = {}
thread_thislock = {}
thread_blocktime = {}
lock_waits = {} # long-lived stats on (tid,lock) blockage elapsed time
process_names = {} # long-lived pid-to-execname mapping
def syscalls__sys_enter_futex(event, ctxt, cpu, s, ns, tid, comm,
nr, uaddr, op, val, utime, uaddr2, val3):
cmd = op & FUTEX_CMD_MASK
if cmd != FUTEX_WAIT:
return # we don't care about originators of WAKE events
process_names[tid] = comm
thread_thislock[tid] = uaddr
thread_blocktime[tid] = nsecs(s, ns)
def syscalls__sys_exit_futex(event, ctxt, cpu, s, ns, tid, comm,
nr, ret):
if thread_blocktime.has_key(tid):
elapsed = nsecs(s, ns) - thread_blocktime[tid]
add_stats(lock_waits, (tid, thread_thislock[tid]), elapsed)
del thread_blocktime[tid]
del thread_thislock[tid]
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
for (tid, lock) in lock_waits:
min, max, avg, count = lock_waits[tid, lock]
print "%s[%d] lock %x contended %d times, %d avg ns" % \
(process_names[tid], tid, lock, count, avg)
| gpl-2.0 |
tuxfux-hlp-notes/python-batches | archieves/batch-64/09-modules/myenv/lib/python2.7/site-packages/django/core/files/uploadedfile.py | 471 | 4334 | """
Classes representing uploaded files.
"""
import errno
import os
from io import BytesIO
from django.conf import settings
from django.core.files import temp as tempfile
from django.core.files.base import File
from django.utils.encoding import force_str
__all__ = ('UploadedFile', 'TemporaryUploadedFile', 'InMemoryUploadedFile',
'SimpleUploadedFile')
class UploadedFile(File):
"""
A abstract uploaded file (``TemporaryUploadedFile`` and
``InMemoryUploadedFile`` are the built-in concrete subclasses).
An ``UploadedFile`` object behaves somewhat like a file object and
represents some file data that the user submitted with a form.
"""
DEFAULT_CHUNK_SIZE = 64 * 2 ** 10
def __init__(self, file=None, name=None, content_type=None, size=None, charset=None, content_type_extra=None):
super(UploadedFile, self).__init__(file, name)
self.size = size
self.content_type = content_type
self.charset = charset
self.content_type_extra = content_type_extra
def __repr__(self):
return force_str("<%s: %s (%s)>" % (
self.__class__.__name__, self.name, self.content_type))
def _get_name(self):
return self._name
def _set_name(self, name):
# Sanitize the file name so that it can't be dangerous.
if name is not None:
# Just use the basename of the file -- anything else is dangerous.
name = os.path.basename(name)
# File names longer than 255 characters can cause problems on older OSes.
if len(name) > 255:
name, ext = os.path.splitext(name)
ext = ext[:255]
name = name[:255 - len(ext)] + ext
self._name = name
name = property(_get_name, _set_name)
class TemporaryUploadedFile(UploadedFile):
"""
A file uploaded to a temporary location (i.e. stream-to-disk).
"""
def __init__(self, name, content_type, size, charset, content_type_extra=None):
if settings.FILE_UPLOAD_TEMP_DIR:
file = tempfile.NamedTemporaryFile(suffix='.upload',
dir=settings.FILE_UPLOAD_TEMP_DIR)
else:
file = tempfile.NamedTemporaryFile(suffix='.upload')
super(TemporaryUploadedFile, self).__init__(file, name, content_type, size, charset, content_type_extra)
def temporary_file_path(self):
"""
Returns the full path of this file.
"""
return self.file.name
def close(self):
try:
return self.file.close()
except OSError as e:
if e.errno != errno.ENOENT:
# Means the file was moved or deleted before the tempfile
# could unlink it. Still sets self.file.close_called and
# calls self.file.file.close() before the exception
raise
class InMemoryUploadedFile(UploadedFile):
"""
A file uploaded into memory (i.e. stream-to-memory).
"""
def __init__(self, file, field_name, name, content_type, size, charset, content_type_extra=None):
super(InMemoryUploadedFile, self).__init__(file, name, content_type, size, charset, content_type_extra)
self.field_name = field_name
def open(self, mode=None):
self.file.seek(0)
def chunks(self, chunk_size=None):
self.file.seek(0)
yield self.read()
def multiple_chunks(self, chunk_size=None):
# Since it's in memory, we'll never have multiple chunks.
return False
class SimpleUploadedFile(InMemoryUploadedFile):
"""
A simple representation of a file, which just has content, size, and a name.
"""
def __init__(self, name, content, content_type='text/plain'):
content = content or b''
super(SimpleUploadedFile, self).__init__(BytesIO(content), None, name,
content_type, len(content), None, None)
@classmethod
def from_dict(cls, file_dict):
"""
Creates a SimpleUploadedFile object from
a dictionary object with the following keys:
- filename
- content-type
- content
"""
return cls(file_dict['filename'],
file_dict['content'],
file_dict.get('content-type', 'text/plain'))
| gpl-3.0 |
kbdick/RecycleTracker | recyclecollector/scrap/gdata-2.0.18/samples/apps/marketplace_sample/gdata/tlslite/integration/IntegrationHelper.py | 286 | 1839 |
class IntegrationHelper:
def __init__(self,
username=None, password=None, sharedKey=None,
certChain=None, privateKey=None,
cryptoID=None, protocol=None,
x509Fingerprint=None,
x509TrustList=None, x509CommonName=None,
settings = None):
self.username = None
self.password = None
self.sharedKey = None
self.certChain = None
self.privateKey = None
self.checker = None
#SRP Authentication
if username and password and not \
(sharedKey or certChain or privateKey):
self.username = username
self.password = password
#Shared Key Authentication
elif username and sharedKey and not \
(password or certChain or privateKey):
self.username = username
self.sharedKey = sharedKey
#Certificate Chain Authentication
elif certChain and privateKey and not \
(username or password or sharedKey):
self.certChain = certChain
self.privateKey = privateKey
#No Authentication
elif not password and not username and not \
sharedKey and not certChain and not privateKey:
pass
else:
raise ValueError("Bad parameters")
#Authenticate the server based on its cryptoID or fingerprint
if sharedKey and (cryptoID or protocol or x509Fingerprint):
raise ValueError("Can't use shared keys with other forms of"\
"authentication")
self.checker = Checker(cryptoID, protocol, x509Fingerprint,
x509TrustList, x509CommonName)
self.settings = settings | gpl-3.0 |
FireBladeNooT/Medusa_1_6 | lib/sqlalchemy/orm/mapper.py | 12 | 115133 | # orm/mapper.py
# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Logic to map Python classes to and from selectables.
Defines the :class:`~sqlalchemy.orm.mapper.Mapper` class, the central
configurational unit which associates a class with a database table.
This is a semi-private module; the main configurational API of the ORM is
available in :class:`~sqlalchemy.orm.`.
"""
from __future__ import absolute_import
import types
import weakref
from itertools import chain
from collections import deque
from .. import sql, util, log, exc as sa_exc, event, schema, inspection
from ..sql import expression, visitors, operators, util as sql_util
from . import instrumentation, attributes, exc as orm_exc, loading
from . import properties
from . import util as orm_util
from .interfaces import MapperProperty, InspectionAttr, _MappedAttribute
from .base import _class_to_mapper, _state_mapper, class_mapper, \
state_str, _INSTRUMENTOR
from .path_registry import PathRegistry
import sys
_mapper_registry = weakref.WeakKeyDictionary()
_already_compiling = False
_memoized_configured_property = util.group_expirable_memoized_property()
# a constant returned by _get_attr_by_column to indicate
# this mapper is not handling an attribute for a particular
# column
NO_ATTRIBUTE = util.symbol('NO_ATTRIBUTE')
# lock used to synchronize the "mapper configure" step
_CONFIGURE_MUTEX = util.threading.RLock()
@inspection._self_inspects
@log.class_logger
class Mapper(InspectionAttr):
"""Define the correlation of class attributes to database table
columns.
The :class:`.Mapper` object is instantiated using the
:func:`~sqlalchemy.orm.mapper` function. For information
about instantiating new :class:`.Mapper` objects, see
that function's documentation.
When :func:`.mapper` is used
explicitly to link a user defined class with table
metadata, this is referred to as *classical mapping*.
Modern SQLAlchemy usage tends to favor the
:mod:`sqlalchemy.ext.declarative` extension for class
configuration, which
makes usage of :func:`.mapper` behind the scenes.
Given a particular class known to be mapped by the ORM,
the :class:`.Mapper` which maintains it can be acquired
using the :func:`.inspect` function::
from sqlalchemy import inspect
mapper = inspect(MyClass)
A class which was mapped by the :mod:`sqlalchemy.ext.declarative`
extension will also have its mapper available via the ``__mapper__``
attribute.
"""
_new_mappers = False
def __init__(self,
class_,
local_table=None,
properties=None,
primary_key=None,
non_primary=False,
inherits=None,
inherit_condition=None,
inherit_foreign_keys=None,
extension=None,
order_by=False,
always_refresh=False,
version_id_col=None,
version_id_generator=None,
polymorphic_on=None,
_polymorphic_map=None,
polymorphic_identity=None,
concrete=False,
with_polymorphic=None,
allow_partial_pks=True,
batch=True,
column_prefix=None,
include_properties=None,
exclude_properties=None,
passive_updates=True,
confirm_deleted_rows=True,
eager_defaults=False,
legacy_is_orphan=False,
_compiled_cache_size=100,
):
"""Return a new :class:`~.Mapper` object.
This function is typically used behind the scenes
via the Declarative extension. When using Declarative,
many of the usual :func:`.mapper` arguments are handled
by the Declarative extension itself, including ``class_``,
``local_table``, ``properties``, and ``inherits``.
Other options are passed to :func:`.mapper` using
the ``__mapper_args__`` class variable::
class MyClass(Base):
__tablename__ = 'my_table'
id = Column(Integer, primary_key=True)
type = Column(String(50))
alt = Column("some_alt", Integer)
__mapper_args__ = {
'polymorphic_on' : type
}
Explicit use of :func:`.mapper`
is often referred to as *classical mapping*. The above
declarative example is equivalent in classical form to::
my_table = Table("my_table", metadata,
Column('id', Integer, primary_key=True),
Column('type', String(50)),
Column("some_alt", Integer)
)
class MyClass(object):
pass
mapper(MyClass, my_table,
polymorphic_on=my_table.c.type,
properties={
'alt':my_table.c.some_alt
})
.. seealso::
:ref:`classical_mapping` - discussion of direct usage of
:func:`.mapper`
:param class\_: The class to be mapped. When using Declarative,
this argument is automatically passed as the declared class
itself.
:param local_table: The :class:`.Table` or other selectable
to which the class is mapped. May be ``None`` if
this mapper inherits from another mapper using single-table
inheritance. When using Declarative, this argument is
automatically passed by the extension, based on what
is configured via the ``__table__`` argument or via the
:class:`.Table` produced as a result of the ``__tablename__``
and :class:`.Column` arguments present.
:param always_refresh: If True, all query operations for this mapped
class will overwrite all data within object instances that already
exist within the session, erasing any in-memory changes with
whatever information was loaded from the database. Usage of this
flag is highly discouraged; as an alternative, see the method
:meth:`.Query.populate_existing`.
:param allow_partial_pks: Defaults to True. Indicates that a
composite primary key with some NULL values should be considered as
possibly existing within the database. This affects whether a
mapper will assign an incoming row to an existing identity, as well
as if :meth:`.Session.merge` will check the database first for a
particular primary key value. A "partial primary key" can occur if
one has mapped to an OUTER JOIN, for example.
:param batch: Defaults to ``True``, indicating that save operations
of multiple entities can be batched together for efficiency.
Setting to False indicates
that an instance will be fully saved before saving the next
instance. This is used in the extremely rare case that a
:class:`.MapperEvents` listener requires being called
in between individual row persistence operations.
:param column_prefix: A string which will be prepended
to the mapped attribute name when :class:`.Column`
objects are automatically assigned as attributes to the
mapped class. Does not affect explicitly specified
column-based properties.
See the section :ref:`column_prefix` for an example.
:param concrete: If True, indicates this mapper should use concrete
table inheritance with its parent mapper.
See the section :ref:`concrete_inheritance` for an example.
:param confirm_deleted_rows: defaults to True; when a DELETE occurs
of one more rows based on specific primary keys, a warning is
emitted when the number of rows matched does not equal the number
of rows expected. This parameter may be set to False to handle the
case where database ON DELETE CASCADE rules may be deleting some of
those rows automatically. The warning may be changed to an
exception in a future release.
.. versionadded:: 0.9.4 - added
:paramref:`.mapper.confirm_deleted_rows` as well as conditional
matched row checking on delete.
:param eager_defaults: if True, the ORM will immediately fetch the
value of server-generated default values after an INSERT or UPDATE,
rather than leaving them as expired to be fetched on next access.
This can be used for event schemes where the server-generated values
are needed immediately before the flush completes. By default,
this scheme will emit an individual ``SELECT`` statement per row
inserted or updated, which note can add significant performance
overhead. However, if the
target database supports :term:`RETURNING`, the default values will
be returned inline with the INSERT or UPDATE statement, which can
greatly enhance performance for an application that needs frequent
access to just-generated server defaults.
.. versionchanged:: 0.9.0 The ``eager_defaults`` option can now
make use of :term:`RETURNING` for backends which support it.
:param exclude_properties: A list or set of string column names to
be excluded from mapping.
See :ref:`include_exclude_cols` for an example.
:param extension: A :class:`.MapperExtension` instance or
list of :class:`.MapperExtension` instances which will be applied
to all operations by this :class:`.Mapper`. **Deprecated.**
Please see :class:`.MapperEvents`.
:param include_properties: An inclusive list or set of string column
names to map.
See :ref:`include_exclude_cols` for an example.
:param inherits: A mapped class or the corresponding :class:`.Mapper`
of one indicating a superclass to which this :class:`.Mapper`
should *inherit* from. The mapped class here must be a subclass
of the other mapper's class. When using Declarative, this argument
is passed automatically as a result of the natural class
hierarchy of the declared classes.
.. seealso::
:ref:`inheritance_toplevel`
:param inherit_condition: For joined table inheritance, a SQL
expression which will
define how the two tables are joined; defaults to a natural join
between the two tables.
:param inherit_foreign_keys: When ``inherit_condition`` is used and
the columns present are missing a :class:`.ForeignKey`
configuration, this parameter can be used to specify which columns
are "foreign". In most cases can be left as ``None``.
:param legacy_is_orphan: Boolean, defaults to ``False``.
When ``True``, specifies that "legacy" orphan consideration
is to be applied to objects mapped by this mapper, which means
that a pending (that is, not persistent) object is auto-expunged
from an owning :class:`.Session` only when it is de-associated
from *all* parents that specify a ``delete-orphan`` cascade towards
this mapper. The new default behavior is that the object is
auto-expunged when it is de-associated with *any* of its parents
that specify ``delete-orphan`` cascade. This behavior is more
consistent with that of a persistent object, and allows behavior to
be consistent in more scenarios independently of whether or not an
orphanable object has been flushed yet or not.
See the change note and example at :ref:`legacy_is_orphan_addition`
for more detail on this change.
.. versionadded:: 0.8 - the consideration of a pending object as
an "orphan" has been modified to more closely match the
behavior as that of persistent objects, which is that the object
is expunged from the :class:`.Session` as soon as it is
de-associated from any of its orphan-enabled parents. Previously,
the pending object would be expunged only if de-associated
from all of its orphan-enabled parents. The new flag
``legacy_is_orphan`` is added to :func:`.orm.mapper` which
re-establishes the legacy behavior.
:param non_primary: Specify that this :class:`.Mapper` is in addition
to the "primary" mapper, that is, the one used for persistence.
The :class:`.Mapper` created here may be used for ad-hoc
mapping of the class to an alternate selectable, for loading
only.
:paramref:`.Mapper.non_primary` is not an often used option, but
is useful in some specific :func:`.relationship` cases.
.. seealso::
:ref:`relationship_non_primary_mapper`
:param order_by: A single :class:`.Column` or list of :class:`.Column`
objects for which selection operations should use as the default
ordering for entities. By default mappers have no pre-defined
ordering.
:param passive_updates: Indicates UPDATE behavior of foreign key
columns when a primary key column changes on a joined-table
inheritance mapping. Defaults to ``True``.
When True, it is assumed that ON UPDATE CASCADE is configured on
the foreign key in the database, and that the database will handle
propagation of an UPDATE from a source column to dependent columns
on joined-table rows.
When False, it is assumed that the database does not enforce
referential integrity and will not be issuing its own CASCADE
operation for an update. The unit of work process will
emit an UPDATE statement for the dependent columns during a
primary key change.
.. seealso::
:ref:`passive_updates` - description of a similar feature as
used with :func:`.relationship`
:param polymorphic_on: Specifies the column, attribute, or
SQL expression used to determine the target class for an
incoming row, when inheriting classes are present.
This value is commonly a :class:`.Column` object that's
present in the mapped :class:`.Table`::
class Employee(Base):
__tablename__ = 'employee'
id = Column(Integer, primary_key=True)
discriminator = Column(String(50))
__mapper_args__ = {
"polymorphic_on":discriminator,
"polymorphic_identity":"employee"
}
It may also be specified
as a SQL expression, as in this example where we
use the :func:`.case` construct to provide a conditional
approach::
class Employee(Base):
__tablename__ = 'employee'
id = Column(Integer, primary_key=True)
discriminator = Column(String(50))
__mapper_args__ = {
"polymorphic_on":case([
(discriminator == "EN", "engineer"),
(discriminator == "MA", "manager"),
], else_="employee"),
"polymorphic_identity":"employee"
}
It may also refer to any attribute
configured with :func:`.column_property`, or to the
string name of one::
class Employee(Base):
__tablename__ = 'employee'
id = Column(Integer, primary_key=True)
discriminator = Column(String(50))
employee_type = column_property(
case([
(discriminator == "EN", "engineer"),
(discriminator == "MA", "manager"),
], else_="employee")
)
__mapper_args__ = {
"polymorphic_on":employee_type,
"polymorphic_identity":"employee"
}
.. versionchanged:: 0.7.4
``polymorphic_on`` may be specified as a SQL expression,
or refer to any attribute configured with
:func:`.column_property`, or to the string name of one.
When setting ``polymorphic_on`` to reference an
attribute or expression that's not present in the
locally mapped :class:`.Table`, yet the value
of the discriminator should be persisted to the database,
the value of the
discriminator is not automatically set on new
instances; this must be handled by the user,
either through manual means or via event listeners.
A typical approach to establishing such a listener
looks like::
from sqlalchemy import event
from sqlalchemy.orm import object_mapper
@event.listens_for(Employee, "init", propagate=True)
def set_identity(instance, *arg, **kw):
mapper = object_mapper(instance)
instance.discriminator = mapper.polymorphic_identity
Where above, we assign the value of ``polymorphic_identity``
for the mapped class to the ``discriminator`` attribute,
thus persisting the value to the ``discriminator`` column
in the database.
.. warning::
Currently, **only one discriminator column may be set**, typically
on the base-most class in the hierarchy. "Cascading" polymorphic
columns are not yet supported.
.. seealso::
:ref:`inheritance_toplevel`
:param polymorphic_identity: Specifies the value which
identifies this particular class as returned by the
column expression referred to by the ``polymorphic_on``
setting. As rows are received, the value corresponding
to the ``polymorphic_on`` column expression is compared
to this value, indicating which subclass should
be used for the newly reconstructed object.
:param properties: A dictionary mapping the string names of object
attributes to :class:`.MapperProperty` instances, which define the
persistence behavior of that attribute. Note that :class:`.Column`
objects present in
the mapped :class:`.Table` are automatically placed into
``ColumnProperty`` instances upon mapping, unless overridden.
When using Declarative, this argument is passed automatically,
based on all those :class:`.MapperProperty` instances declared
in the declared class body.
:param primary_key: A list of :class:`.Column` objects which define
the primary key to be used against this mapper's selectable unit.
This is normally simply the primary key of the ``local_table``, but
can be overridden here.
:param version_id_col: A :class:`.Column`
that will be used to keep a running version id of rows
in the table. This is used to detect concurrent updates or
the presence of stale data in a flush. The methodology is to
detect if an UPDATE statement does not match the last known
version id, a
:class:`~sqlalchemy.orm.exc.StaleDataError` exception is
thrown.
By default, the column must be of :class:`.Integer` type,
unless ``version_id_generator`` specifies an alternative version
generator.
.. seealso::
:ref:`mapper_version_counter` - discussion of version counting
and rationale.
:param version_id_generator: Define how new version ids should
be generated. Defaults to ``None``, which indicates that
a simple integer counting scheme be employed. To provide a custom
versioning scheme, provide a callable function of the form::
def generate_version(version):
return next_version
Alternatively, server-side versioning functions such as triggers,
or programmatic versioning schemes outside of the version id
generator may be used, by specifying the value ``False``.
Please see :ref:`server_side_version_counter` for a discussion
of important points when using this option.
.. versionadded:: 0.9.0 ``version_id_generator`` supports
server-side version number generation.
.. seealso::
:ref:`custom_version_counter`
:ref:`server_side_version_counter`
:param with_polymorphic: A tuple in the form ``(<classes>,
<selectable>)`` indicating the default style of "polymorphic"
loading, that is, which tables are queried at once. <classes> is
any single or list of mappers and/or classes indicating the
inherited classes that should be loaded at once. The special value
``'*'`` may be used to indicate all descending classes should be
loaded immediately. The second tuple argument <selectable>
indicates a selectable that will be used to query for multiple
classes.
.. seealso::
:ref:`with_polymorphic` - discussion of polymorphic querying
techniques.
"""
self.class_ = util.assert_arg_type(class_, type, 'class_')
self.class_manager = None
self._primary_key_argument = util.to_list(primary_key)
self.non_primary = non_primary
if order_by is not False:
self.order_by = util.to_list(order_by)
else:
self.order_by = order_by
self.always_refresh = always_refresh
if isinstance(version_id_col, MapperProperty):
self.version_id_prop = version_id_col
self.version_id_col = None
else:
self.version_id_col = version_id_col
if version_id_generator is False:
self.version_id_generator = False
elif version_id_generator is None:
self.version_id_generator = lambda x: (x or 0) + 1
else:
self.version_id_generator = version_id_generator
self.concrete = concrete
self.single = False
self.inherits = inherits
self.local_table = local_table
self.inherit_condition = inherit_condition
self.inherit_foreign_keys = inherit_foreign_keys
self._init_properties = properties or {}
self._delete_orphans = []
self.batch = batch
self.eager_defaults = eager_defaults
self.column_prefix = column_prefix
self.polymorphic_on = expression._clause_element_as_expr(
polymorphic_on)
self._dependency_processors = []
self.validators = util.immutabledict()
self.passive_updates = passive_updates
self.legacy_is_orphan = legacy_is_orphan
self._clause_adapter = None
self._requires_row_aliasing = False
self._inherits_equated_pairs = None
self._memoized_values = {}
self._compiled_cache_size = _compiled_cache_size
self._reconstructor = None
self._deprecated_extensions = util.to_list(extension or [])
self.allow_partial_pks = allow_partial_pks
if self.inherits and not self.concrete:
self.confirm_deleted_rows = False
else:
self.confirm_deleted_rows = confirm_deleted_rows
self._set_with_polymorphic(with_polymorphic)
if isinstance(self.local_table, expression.SelectBase):
raise sa_exc.InvalidRequestError(
"When mapping against a select() construct, map against "
"an alias() of the construct instead."
"This because several databases don't allow a "
"SELECT from a subquery that does not have an alias."
)
if self.with_polymorphic and \
isinstance(self.with_polymorphic[1],
expression.SelectBase):
self.with_polymorphic = (self.with_polymorphic[0],
self.with_polymorphic[1].alias())
# our 'polymorphic identity', a string name that when located in a
# result set row indicates this Mapper should be used to construct
# the object instance for that row.
self.polymorphic_identity = polymorphic_identity
# a dictionary of 'polymorphic identity' names, associating those
# names with Mappers that will be used to construct object instances
# upon a select operation.
if _polymorphic_map is None:
self.polymorphic_map = {}
else:
self.polymorphic_map = _polymorphic_map
if include_properties is not None:
self.include_properties = util.to_set(include_properties)
else:
self.include_properties = None
if exclude_properties:
self.exclude_properties = util.to_set(exclude_properties)
else:
self.exclude_properties = None
self.configured = False
# prevent this mapper from being constructed
# while a configure_mappers() is occurring (and defer a
# configure_mappers() until construction succeeds)
_CONFIGURE_MUTEX.acquire()
try:
self.dispatch._events._new_mapper_instance(class_, self)
self._configure_inheritance()
self._configure_legacy_instrument_class()
self._configure_class_instrumentation()
self._configure_listeners()
self._configure_properties()
self._configure_polymorphic_setter()
self._configure_pks()
Mapper._new_mappers = True
self._log("constructed")
self._expire_memoizations()
finally:
_CONFIGURE_MUTEX.release()
# major attributes initialized at the classlevel so that
# they can be Sphinx-documented.
is_mapper = True
"""Part of the inspection API."""
@property
def mapper(self):
"""Part of the inspection API.
Returns self.
"""
return self
@property
def entity(self):
"""Part of the inspection API.
Returns self.class\_.
"""
return self.class_
local_table = None
"""The :class:`.Selectable` which this :class:`.Mapper` manages.
Typically is an instance of :class:`.Table` or :class:`.Alias`.
May also be ``None``.
The "local" table is the
selectable that the :class:`.Mapper` is directly responsible for
managing from an attribute access and flush perspective. For
non-inheriting mappers, the local table is the same as the
"mapped" table. For joined-table inheritance mappers, local_table
will be the particular sub-table of the overall "join" which
this :class:`.Mapper` represents. If this mapper is a
single-table inheriting mapper, local_table will be ``None``.
.. seealso::
:attr:`~.Mapper.mapped_table`.
"""
mapped_table = None
"""The :class:`.Selectable` to which this :class:`.Mapper` is mapped.
Typically an instance of :class:`.Table`, :class:`.Join`, or
:class:`.Alias`.
The "mapped" table is the selectable that
the mapper selects from during queries. For non-inheriting
mappers, the mapped table is the same as the "local" table.
For joined-table inheritance mappers, mapped_table references the
full :class:`.Join` representing full rows for this particular
subclass. For single-table inheritance mappers, mapped_table
references the base table.
.. seealso::
:attr:`~.Mapper.local_table`.
"""
inherits = None
"""References the :class:`.Mapper` which this :class:`.Mapper`
inherits from, if any.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
configured = None
"""Represent ``True`` if this :class:`.Mapper` has been configured.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
.. seealso::
:func:`.configure_mappers`.
"""
concrete = None
"""Represent ``True`` if this :class:`.Mapper` is a concrete
inheritance mapper.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
tables = None
"""An iterable containing the collection of :class:`.Table` objects
which this :class:`.Mapper` is aware of.
If the mapper is mapped to a :class:`.Join`, or an :class:`.Alias`
representing a :class:`.Select`, the individual :class:`.Table`
objects that comprise the full construct will be represented here.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
primary_key = None
"""An iterable containing the collection of :class:`.Column` objects
which comprise the 'primary key' of the mapped table, from the
perspective of this :class:`.Mapper`.
This list is against the selectable in :attr:`~.Mapper.mapped_table`. In
the case of inheriting mappers, some columns may be managed by a
superclass mapper. For example, in the case of a :class:`.Join`, the
primary key is determined by all of the primary key columns across all
tables referenced by the :class:`.Join`.
The list is also not necessarily the same as the primary key column
collection associated with the underlying tables; the :class:`.Mapper`
features a ``primary_key`` argument that can override what the
:class:`.Mapper` considers as primary key columns.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
class_ = None
"""The Python class which this :class:`.Mapper` maps.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
class_manager = None
"""The :class:`.ClassManager` which maintains event listeners
and class-bound descriptors for this :class:`.Mapper`.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
single = None
"""Represent ``True`` if this :class:`.Mapper` is a single table
inheritance mapper.
:attr:`~.Mapper.local_table` will be ``None`` if this flag is set.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
non_primary = None
"""Represent ``True`` if this :class:`.Mapper` is a "non-primary"
mapper, e.g. a mapper that is used only to selet rows but not for
persistence management.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
polymorphic_on = None
"""The :class:`.Column` or SQL expression specified as the
``polymorphic_on`` argument
for this :class:`.Mapper`, within an inheritance scenario.
This attribute is normally a :class:`.Column` instance but
may also be an expression, such as one derived from
:func:`.cast`.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
polymorphic_map = None
"""A mapping of "polymorphic identity" identifiers mapped to
:class:`.Mapper` instances, within an inheritance scenario.
The identifiers can be of any type which is comparable to the
type of column represented by :attr:`~.Mapper.polymorphic_on`.
An inheritance chain of mappers will all reference the same
polymorphic map object. The object is used to correlate incoming
result rows to target mappers.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
polymorphic_identity = None
"""Represent an identifier which is matched against the
:attr:`~.Mapper.polymorphic_on` column during result row loading.
Used only with inheritance, this object can be of any type which is
comparable to the type of column represented by
:attr:`~.Mapper.polymorphic_on`.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
base_mapper = None
"""The base-most :class:`.Mapper` in an inheritance chain.
In a non-inheriting scenario, this attribute will always be this
:class:`.Mapper`. In an inheritance scenario, it references
the :class:`.Mapper` which is parent to all other :class:`.Mapper`
objects in the inheritance chain.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
columns = None
"""A collection of :class:`.Column` or other scalar expression
objects maintained by this :class:`.Mapper`.
The collection behaves the same as that of the ``c`` attribute on
any :class:`.Table` object, except that only those columns included in
this mapping are present, and are keyed based on the attribute name
defined in the mapping, not necessarily the ``key`` attribute of the
:class:`.Column` itself. Additionally, scalar expressions mapped
by :func:`.column_property` are also present here.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
validators = None
"""An immutable dictionary of attributes which have been decorated
using the :func:`~.orm.validates` decorator.
The dictionary contains string attribute names as keys
mapped to the actual validation method.
"""
c = None
"""A synonym for :attr:`~.Mapper.columns`."""
@util.memoized_property
def _path_registry(self):
return PathRegistry.per_mapper(self)
def _configure_inheritance(self):
"""Configure settings related to inherting and/or inherited mappers
being present."""
# a set of all mappers which inherit from this one.
self._inheriting_mappers = util.WeakSequence()
if self.inherits:
if isinstance(self.inherits, type):
self.inherits = class_mapper(self.inherits, configure=False)
if not issubclass(self.class_, self.inherits.class_):
raise sa_exc.ArgumentError(
"Class '%s' does not inherit from '%s'" %
(self.class_.__name__, self.inherits.class_.__name__))
if self.non_primary != self.inherits.non_primary:
np = not self.non_primary and "primary" or "non-primary"
raise sa_exc.ArgumentError(
"Inheritance of %s mapper for class '%s' is "
"only allowed from a %s mapper" %
(np, self.class_.__name__, np))
# inherit_condition is optional.
if self.local_table is None:
self.local_table = self.inherits.local_table
self.mapped_table = self.inherits.mapped_table
self.single = True
elif self.local_table is not self.inherits.local_table:
if self.concrete:
self.mapped_table = self.local_table
for mapper in self.iterate_to_root():
if mapper.polymorphic_on is not None:
mapper._requires_row_aliasing = True
else:
if self.inherit_condition is None:
# figure out inherit condition from our table to the
# immediate table of the inherited mapper, not its
# full table which could pull in other stuff we don't
# want (allows test/inheritance.InheritTest4 to pass)
self.inherit_condition = sql_util.join_condition(
self.inherits.local_table,
self.local_table)
self.mapped_table = sql.join(
self.inherits.mapped_table,
self.local_table,
self.inherit_condition)
fks = util.to_set(self.inherit_foreign_keys)
self._inherits_equated_pairs = \
sql_util.criterion_as_pairs(
self.mapped_table.onclause,
consider_as_foreign_keys=fks)
else:
self.mapped_table = self.local_table
if self.polymorphic_identity is not None and not self.concrete:
self._identity_class = self.inherits._identity_class
else:
self._identity_class = self.class_
if self.version_id_col is None:
self.version_id_col = self.inherits.version_id_col
self.version_id_generator = self.inherits.version_id_generator
elif self.inherits.version_id_col is not None and \
self.version_id_col is not self.inherits.version_id_col:
util.warn(
"Inheriting version_id_col '%s' does not match inherited "
"version_id_col '%s' and will not automatically populate "
"the inherited versioning column. "
"version_id_col should only be specified on "
"the base-most mapper that includes versioning." %
(self.version_id_col.description,
self.inherits.version_id_col.description)
)
if self.order_by is False and \
not self.concrete and \
self.inherits.order_by is not False:
self.order_by = self.inherits.order_by
self.polymorphic_map = self.inherits.polymorphic_map
self.batch = self.inherits.batch
self.inherits._inheriting_mappers.append(self)
self.base_mapper = self.inherits.base_mapper
self.passive_updates = self.inherits.passive_updates
self._all_tables = self.inherits._all_tables
if self.polymorphic_identity is not None:
if self.polymorphic_identity in self.polymorphic_map:
util.warn(
"Reassigning polymorphic association for identity %r "
"from %r to %r: Check for duplicate use of %r as "
"value for polymorphic_identity." %
(self.polymorphic_identity,
self.polymorphic_map[self.polymorphic_identity],
self, self.polymorphic_identity)
)
self.polymorphic_map[self.polymorphic_identity] = self
else:
self._all_tables = set()
self.base_mapper = self
self.mapped_table = self.local_table
if self.polymorphic_identity is not None:
self.polymorphic_map[self.polymorphic_identity] = self
self._identity_class = self.class_
if self.mapped_table is None:
raise sa_exc.ArgumentError(
"Mapper '%s' does not have a mapped_table specified."
% self)
def _set_with_polymorphic(self, with_polymorphic):
if with_polymorphic == '*':
self.with_polymorphic = ('*', None)
elif isinstance(with_polymorphic, (tuple, list)):
if isinstance(
with_polymorphic[0], util.string_types + (tuple, list)):
self.with_polymorphic = with_polymorphic
else:
self.with_polymorphic = (with_polymorphic, None)
elif with_polymorphic is not None:
raise sa_exc.ArgumentError("Invalid setting for with_polymorphic")
else:
self.with_polymorphic = None
if isinstance(self.local_table, expression.SelectBase):
raise sa_exc.InvalidRequestError(
"When mapping against a select() construct, map against "
"an alias() of the construct instead."
"This because several databases don't allow a "
"SELECT from a subquery that does not have an alias."
)
if self.with_polymorphic and \
isinstance(self.with_polymorphic[1],
expression.SelectBase):
self.with_polymorphic = (self.with_polymorphic[0],
self.with_polymorphic[1].alias())
if self.configured:
self._expire_memoizations()
def _set_concrete_base(self, mapper):
"""Set the given :class:`.Mapper` as the 'inherits' for this
:class:`.Mapper`, assuming this :class:`.Mapper` is concrete
and does not already have an inherits."""
assert self.concrete
assert not self.inherits
assert isinstance(mapper, Mapper)
self.inherits = mapper
self.inherits.polymorphic_map.update(self.polymorphic_map)
self.polymorphic_map = self.inherits.polymorphic_map
for mapper in self.iterate_to_root():
if mapper.polymorphic_on is not None:
mapper._requires_row_aliasing = True
self.batch = self.inherits.batch
for mp in self.self_and_descendants:
mp.base_mapper = self.inherits.base_mapper
self.inherits._inheriting_mappers.append(self)
self.passive_updates = self.inherits.passive_updates
self._all_tables = self.inherits._all_tables
for key, prop in mapper._props.items():
if key not in self._props and \
not self._should_exclude(key, key, local=False,
column=None):
self._adapt_inherited_property(key, prop, False)
def _set_polymorphic_on(self, polymorphic_on):
self.polymorphic_on = polymorphic_on
self._configure_polymorphic_setter(True)
def _configure_legacy_instrument_class(self):
if self.inherits:
self.dispatch._update(self.inherits.dispatch)
super_extensions = set(
chain(*[m._deprecated_extensions
for m in self.inherits.iterate_to_root()]))
else:
super_extensions = set()
for ext in self._deprecated_extensions:
if ext not in super_extensions:
ext._adapt_instrument_class(self, ext)
def _configure_listeners(self):
if self.inherits:
super_extensions = set(
chain(*[m._deprecated_extensions
for m in self.inherits.iterate_to_root()]))
else:
super_extensions = set()
for ext in self._deprecated_extensions:
if ext not in super_extensions:
ext._adapt_listener(self, ext)
def _configure_class_instrumentation(self):
"""If this mapper is to be a primary mapper (i.e. the
non_primary flag is not set), associate this Mapper with the
given class_ and entity name.
Subsequent calls to ``class_mapper()`` for the class_/entity
name combination will return this mapper. Also decorate the
`__init__` method on the mapped class to include optional
auto-session attachment logic.
"""
manager = attributes.manager_of_class(self.class_)
if self.non_primary:
if not manager or not manager.is_mapped:
raise sa_exc.InvalidRequestError(
"Class %s has no primary mapper configured. Configure "
"a primary mapper first before setting up a non primary "
"Mapper." % self.class_)
self.class_manager = manager
self._identity_class = manager.mapper._identity_class
_mapper_registry[self] = True
return
if manager is not None:
assert manager.class_ is self.class_
if manager.is_mapped:
raise sa_exc.ArgumentError(
"Class '%s' already has a primary mapper defined. "
"Use non_primary=True to "
"create a non primary Mapper. clear_mappers() will "
"remove *all* current mappers from all classes." %
self.class_)
# else:
# a ClassManager may already exist as
# ClassManager.instrument_attribute() creates
# new managers for each subclass if they don't yet exist.
_mapper_registry[self] = True
# note: this *must be called before instrumentation.register_class*
# to maintain the documented behavior of instrument_class
self.dispatch.instrument_class(self, self.class_)
if manager is None:
manager = instrumentation.register_class(self.class_)
self.class_manager = manager
manager.mapper = self
manager.deferred_scalar_loader = util.partial(
loading.load_scalar_attributes, self)
# The remaining members can be added by any mapper,
# e_name None or not.
if manager.info.get(_INSTRUMENTOR, False):
return
event.listen(manager, 'first_init', _event_on_first_init, raw=True)
event.listen(manager, 'init', _event_on_init, raw=True)
for key, method in util.iterate_attributes(self.class_):
if isinstance(method, types.FunctionType):
if hasattr(method, '__sa_reconstructor__'):
self._reconstructor = method
event.listen(manager, 'load', _event_on_load, raw=True)
elif hasattr(method, '__sa_validators__'):
validation_opts = method.__sa_validation_opts__
for name in method.__sa_validators__:
self.validators = self.validators.union(
{name: (method, validation_opts)}
)
manager.info[_INSTRUMENTOR] = self
@classmethod
def _configure_all(cls):
"""Class-level path to the :func:`.configure_mappers` call.
"""
configure_mappers()
def dispose(self):
# Disable any attribute-based compilation.
self.configured = True
if hasattr(self, '_configure_failed'):
del self._configure_failed
if not self.non_primary and \
self.class_manager is not None and \
self.class_manager.is_mapped and \
self.class_manager.mapper is self:
instrumentation.unregister_class(self.class_)
def _configure_pks(self):
self.tables = sql_util.find_tables(self.mapped_table)
self._pks_by_table = {}
self._cols_by_table = {}
all_cols = util.column_set(chain(*[
col.proxy_set for col in
self._columntoproperty]))
pk_cols = util.column_set(c for c in all_cols if c.primary_key)
# identify primary key columns which are also mapped by this mapper.
tables = set(self.tables + [self.mapped_table])
self._all_tables.update(tables)
for t in tables:
if t.primary_key and pk_cols.issuperset(t.primary_key):
# ordering is important since it determines the ordering of
# mapper.primary_key (and therefore query.get())
self._pks_by_table[t] = \
util.ordered_column_set(t.primary_key).\
intersection(pk_cols)
self._cols_by_table[t] = \
util.ordered_column_set(t.c).\
intersection(all_cols)
# if explicit PK argument sent, add those columns to the
# primary key mappings
if self._primary_key_argument:
for k in self._primary_key_argument:
if k.table not in self._pks_by_table:
self._pks_by_table[k.table] = util.OrderedSet()
self._pks_by_table[k.table].add(k)
# otherwise, see that we got a full PK for the mapped table
elif self.mapped_table not in self._pks_by_table or \
len(self._pks_by_table[self.mapped_table]) == 0:
raise sa_exc.ArgumentError(
"Mapper %s could not assemble any primary "
"key columns for mapped table '%s'" %
(self, self.mapped_table.description))
elif self.local_table not in self._pks_by_table and \
isinstance(self.local_table, schema.Table):
util.warn("Could not assemble any primary "
"keys for locally mapped table '%s' - "
"no rows will be persisted in this Table."
% self.local_table.description)
if self.inherits and \
not self.concrete and \
not self._primary_key_argument:
# if inheriting, the "primary key" for this mapper is
# that of the inheriting (unless concrete or explicit)
self.primary_key = self.inherits.primary_key
else:
# determine primary key from argument or mapped_table pks -
# reduce to the minimal set of columns
if self._primary_key_argument:
primary_key = sql_util.reduce_columns(
[self.mapped_table.corresponding_column(c) for c in
self._primary_key_argument],
ignore_nonexistent_tables=True)
else:
primary_key = sql_util.reduce_columns(
self._pks_by_table[self.mapped_table],
ignore_nonexistent_tables=True)
if len(primary_key) == 0:
raise sa_exc.ArgumentError(
"Mapper %s could not assemble any primary "
"key columns for mapped table '%s'" %
(self, self.mapped_table.description))
self.primary_key = tuple(primary_key)
self._log("Identified primary key columns: %s", primary_key)
# determine cols that aren't expressed within our tables; mark these
# as "read only" properties which are refreshed upon INSERT/UPDATE
self._readonly_props = set(
self._columntoproperty[col]
for col in self._columntoproperty
if self._columntoproperty[col] not in self._identity_key_props and
(not hasattr(col, 'table') or
col.table not in self._cols_by_table))
def _configure_properties(self):
# Column and other ClauseElement objects which are mapped
self.columns = self.c = util.OrderedProperties()
# object attribute names mapped to MapperProperty objects
self._props = util.OrderedDict()
# table columns mapped to lists of MapperProperty objects
# using a list allows a single column to be defined as
# populating multiple object attributes
self._columntoproperty = _ColumnMapping(self)
# load custom properties
if self._init_properties:
for key, prop in self._init_properties.items():
self._configure_property(key, prop, False)
# pull properties from the inherited mapper if any.
if self.inherits:
for key, prop in self.inherits._props.items():
if key not in self._props and \
not self._should_exclude(key, key, local=False,
column=None):
self._adapt_inherited_property(key, prop, False)
# create properties for each column in the mapped table,
# for those columns which don't already map to a property
for column in self.mapped_table.columns:
if column in self._columntoproperty:
continue
column_key = (self.column_prefix or '') + column.key
if self._should_exclude(
column.key, column_key,
local=self.local_table.c.contains_column(column),
column=column
):
continue
# adjust the "key" used for this column to that
# of the inheriting mapper
for mapper in self.iterate_to_root():
if column in mapper._columntoproperty:
column_key = mapper._columntoproperty[column].key
self._configure_property(column_key,
column,
init=False,
setparent=True)
def _configure_polymorphic_setter(self, init=False):
"""Configure an attribute on the mapper representing the
'polymorphic_on' column, if applicable, and not
already generated by _configure_properties (which is typical).
Also create a setter function which will assign this
attribute to the value of the 'polymorphic_identity'
upon instance construction, also if applicable. This
routine will run when an instance is created.
"""
setter = False
if self.polymorphic_on is not None:
setter = True
if isinstance(self.polymorphic_on, util.string_types):
# polymorphic_on specified as a string - link
# it to mapped ColumnProperty
try:
self.polymorphic_on = self._props[self.polymorphic_on]
except KeyError:
raise sa_exc.ArgumentError(
"Can't determine polymorphic_on "
"value '%s' - no attribute is "
"mapped to this name." % self.polymorphic_on)
if self.polymorphic_on in self._columntoproperty:
# polymorphic_on is a column that is already mapped
# to a ColumnProperty
prop = self._columntoproperty[self.polymorphic_on]
polymorphic_key = prop.key
self.polymorphic_on = prop.columns[0]
polymorphic_key = prop.key
elif isinstance(self.polymorphic_on, MapperProperty):
# polymorphic_on is directly a MapperProperty,
# ensure it's a ColumnProperty
if not isinstance(self.polymorphic_on,
properties.ColumnProperty):
raise sa_exc.ArgumentError(
"Only direct column-mapped "
"property or SQL expression "
"can be passed for polymorphic_on")
prop = self.polymorphic_on
self.polymorphic_on = prop.columns[0]
polymorphic_key = prop.key
elif not expression._is_column(self.polymorphic_on):
# polymorphic_on is not a Column and not a ColumnProperty;
# not supported right now.
raise sa_exc.ArgumentError(
"Only direct column-mapped "
"property or SQL expression "
"can be passed for polymorphic_on"
)
else:
# polymorphic_on is a Column or SQL expression and
# doesn't appear to be mapped. this means it can be 1.
# only present in the with_polymorphic selectable or
# 2. a totally standalone SQL expression which we'd
# hope is compatible with this mapper's mapped_table
col = self.mapped_table.corresponding_column(
self.polymorphic_on)
if col is None:
# polymorphic_on doesn't derive from any
# column/expression isn't present in the mapped
# table. we will make a "hidden" ColumnProperty
# for it. Just check that if it's directly a
# schema.Column and we have with_polymorphic, it's
# likely a user error if the schema.Column isn't
# represented somehow in either mapped_table or
# with_polymorphic. Otherwise as of 0.7.4 we
# just go with it and assume the user wants it
# that way (i.e. a CASE statement)
setter = False
instrument = False
col = self.polymorphic_on
if isinstance(col, schema.Column) and (
self.with_polymorphic is None or
self.with_polymorphic[1].
corresponding_column(col) is None):
raise sa_exc.InvalidRequestError(
"Could not map polymorphic_on column "
"'%s' to the mapped table - polymorphic "
"loads will not function properly"
% col.description)
else:
# column/expression that polymorphic_on derives from
# is present in our mapped table
# and is probably mapped, but polymorphic_on itself
# is not. This happens when
# the polymorphic_on is only directly present in the
# with_polymorphic selectable, as when use
# polymorphic_union.
# we'll make a separate ColumnProperty for it.
instrument = True
key = getattr(col, 'key', None)
if key:
if self._should_exclude(col.key, col.key, False, col):
raise sa_exc.InvalidRequestError(
"Cannot exclude or override the "
"discriminator column %r" %
col.key)
else:
self.polymorphic_on = col = \
col.label("_sa_polymorphic_on")
key = col.key
self._configure_property(
key,
properties.ColumnProperty(col,
_instrument=instrument),
init=init, setparent=True)
polymorphic_key = key
else:
# no polymorphic_on was set.
# check inheriting mappers for one.
for mapper in self.iterate_to_root():
# determine if polymorphic_on of the parent
# should be propagated here. If the col
# is present in our mapped table, or if our mapped
# table is the same as the parent (i.e. single table
# inheritance), we can use it
if mapper.polymorphic_on is not None:
if self.mapped_table is mapper.mapped_table:
self.polymorphic_on = mapper.polymorphic_on
else:
self.polymorphic_on = \
self.mapped_table.corresponding_column(
mapper.polymorphic_on)
# we can use the parent mapper's _set_polymorphic_identity
# directly; it ensures the polymorphic_identity of the
# instance's mapper is used so is portable to subclasses.
if self.polymorphic_on is not None:
self._set_polymorphic_identity = \
mapper._set_polymorphic_identity
self._validate_polymorphic_identity = \
mapper._validate_polymorphic_identity
else:
self._set_polymorphic_identity = None
return
if setter:
def _set_polymorphic_identity(state):
dict_ = state.dict
state.get_impl(polymorphic_key).set(
state, dict_,
state.manager.mapper.polymorphic_identity,
None)
def _validate_polymorphic_identity(mapper, state, dict_):
if polymorphic_key in dict_ and \
dict_[polymorphic_key] not in \
mapper._acceptable_polymorphic_identities:
util.warn_limited(
"Flushing object %s with "
"incompatible polymorphic identity %r; the "
"object may not refresh and/or load correctly",
(state_str(state), dict_[polymorphic_key])
)
self._set_polymorphic_identity = _set_polymorphic_identity
self._validate_polymorphic_identity = \
_validate_polymorphic_identity
else:
self._set_polymorphic_identity = None
_validate_polymorphic_identity = None
@_memoized_configured_property
def _version_id_prop(self):
if self.version_id_col is not None:
return self._columntoproperty[self.version_id_col]
else:
return None
@_memoized_configured_property
def _acceptable_polymorphic_identities(self):
identities = set()
stack = deque([self])
while stack:
item = stack.popleft()
if item.mapped_table is self.mapped_table:
identities.add(item.polymorphic_identity)
stack.extend(item._inheriting_mappers)
return identities
@_memoized_configured_property
def _prop_set(self):
return frozenset(self._props.values())
def _adapt_inherited_property(self, key, prop, init):
if not self.concrete:
self._configure_property(key, prop, init=False, setparent=False)
elif key not in self._props:
self._configure_property(
key,
properties.ConcreteInheritedProperty(),
init=init, setparent=True)
def _configure_property(self, key, prop, init=True, setparent=True):
self._log("_configure_property(%s, %s)", key, prop.__class__.__name__)
if not isinstance(prop, MapperProperty):
prop = self._property_from_column(key, prop)
if isinstance(prop, properties.ColumnProperty):
col = self.mapped_table.corresponding_column(prop.columns[0])
# if the column is not present in the mapped table,
# test if a column has been added after the fact to the
# parent table (or their parent, etc.) [ticket:1570]
if col is None and self.inherits:
path = [self]
for m in self.inherits.iterate_to_root():
col = m.local_table.corresponding_column(prop.columns[0])
if col is not None:
for m2 in path:
m2.mapped_table._reset_exported()
col = self.mapped_table.corresponding_column(
prop.columns[0])
break
path.append(m)
# subquery expression, column not present in the mapped
# selectable.
if col is None:
col = prop.columns[0]
# column is coming in after _readonly_props was
# initialized; check for 'readonly'
if hasattr(self, '_readonly_props') and \
(not hasattr(col, 'table') or
col.table not in self._cols_by_table):
self._readonly_props.add(prop)
else:
# if column is coming in after _cols_by_table was
# initialized, ensure the col is in the right set
if hasattr(self, '_cols_by_table') and \
col.table in self._cols_by_table and \
col not in self._cols_by_table[col.table]:
self._cols_by_table[col.table].add(col)
# if this properties.ColumnProperty represents the "polymorphic
# discriminator" column, mark it. We'll need this when rendering
# columns in SELECT statements.
if not hasattr(prop, '_is_polymorphic_discriminator'):
prop._is_polymorphic_discriminator = \
(col is self.polymorphic_on or
prop.columns[0] is self.polymorphic_on)
self.columns[key] = col
for col in prop.columns + prop._orig_columns:
for col in col.proxy_set:
self._columntoproperty[col] = prop
prop.key = key
if setparent:
prop.set_parent(self, init)
if key in self._props and \
getattr(self._props[key], '_mapped_by_synonym', False):
syn = self._props[key]._mapped_by_synonym
raise sa_exc.ArgumentError(
"Can't call map_column=True for synonym %r=%r, "
"a ColumnProperty already exists keyed to the name "
"%r for column %r" % (syn, key, key, syn)
)
if key in self._props and \
not isinstance(prop, properties.ColumnProperty) and \
not isinstance(self._props[key], properties.ColumnProperty):
util.warn("Property %s on %s being replaced with new "
"property %s; the old property will be discarded" % (
self._props[key],
self,
prop,
))
oldprop = self._props[key]
self._path_registry.pop(oldprop, None)
self._props[key] = prop
if not self.non_primary:
prop.instrument_class(self)
for mapper in self._inheriting_mappers:
mapper._adapt_inherited_property(key, prop, init)
if init:
prop.init()
prop.post_instrument_class(self)
if self.configured:
self._expire_memoizations()
def _property_from_column(self, key, prop):
"""generate/update a :class:`.ColumnProprerty` given a
:class:`.Column` object. """
# we were passed a Column or a list of Columns;
# generate a properties.ColumnProperty
columns = util.to_list(prop)
column = columns[0]
if not expression._is_column(column):
raise sa_exc.ArgumentError(
"%s=%r is not an instance of MapperProperty or Column"
% (key, prop))
prop = self._props.get(key, None)
if isinstance(prop, properties.ColumnProperty):
if (
not self._inherits_equated_pairs or
(prop.columns[0], column) not in self._inherits_equated_pairs
) and \
not prop.columns[0].shares_lineage(column) and \
prop.columns[0] is not self.version_id_col and \
column is not self.version_id_col:
warn_only = prop.parent is not self
msg = ("Implicitly combining column %s with column "
"%s under attribute '%s'. Please configure one "
"or more attributes for these same-named columns "
"explicitly." % (prop.columns[-1], column, key))
if warn_only:
util.warn(msg)
else:
raise sa_exc.InvalidRequestError(msg)
# existing properties.ColumnProperty from an inheriting
# mapper. make a copy and append our column to it
prop = prop.copy()
prop.columns.insert(0, column)
self._log("inserting column to existing list "
"in properties.ColumnProperty %s" % (key))
return prop
elif prop is None or isinstance(prop,
properties.ConcreteInheritedProperty):
mapped_column = []
for c in columns:
mc = self.mapped_table.corresponding_column(c)
if mc is None:
mc = self.local_table.corresponding_column(c)
if mc is not None:
# if the column is in the local table but not the
# mapped table, this corresponds to adding a
# column after the fact to the local table.
# [ticket:1523]
self.mapped_table._reset_exported()
mc = self.mapped_table.corresponding_column(c)
if mc is None:
raise sa_exc.ArgumentError(
"When configuring property '%s' on %s, "
"column '%s' is not represented in the mapper's "
"table. Use the `column_property()` function to "
"force this column to be mapped as a read-only "
"attribute." % (key, self, c))
mapped_column.append(mc)
return properties.ColumnProperty(*mapped_column)
else:
raise sa_exc.ArgumentError(
"WARNING: when configuring property '%s' on %s, "
"column '%s' conflicts with property '%r'. "
"To resolve this, map the column to the class under a "
"different name in the 'properties' dictionary. Or, "
"to remove all awareness of the column entirely "
"(including its availability as a foreign key), "
"use the 'include_properties' or 'exclude_properties' "
"mapper arguments to control specifically which table "
"columns get mapped." %
(key, self, column.key, prop))
def _post_configure_properties(self):
"""Call the ``init()`` method on all ``MapperProperties``
attached to this mapper.
This is a deferred configuration step which is intended
to execute once all mappers have been constructed.
"""
self._log("_post_configure_properties() started")
l = [(key, prop) for key, prop in self._props.items()]
for key, prop in l:
self._log("initialize prop %s", key)
if prop.parent is self and not prop._configure_started:
prop.init()
if prop._configure_finished:
prop.post_instrument_class(self)
self._log("_post_configure_properties() complete")
self.configured = True
def add_properties(self, dict_of_properties):
"""Add the given dictionary of properties to this mapper,
using `add_property`.
"""
for key, value in dict_of_properties.items():
self.add_property(key, value)
def add_property(self, key, prop):
"""Add an individual MapperProperty to this mapper.
If the mapper has not been configured yet, just adds the
property to the initial properties dictionary sent to the
constructor. If this Mapper has already been configured, then
the given MapperProperty is configured immediately.
"""
self._init_properties[key] = prop
self._configure_property(key, prop, init=self.configured)
def _expire_memoizations(self):
for mapper in self.iterate_to_root():
_memoized_configured_property.expire_instance(mapper)
@property
def _log_desc(self):
return "(" + self.class_.__name__ + \
"|" + \
(self.local_table is not None and
self.local_table.description or
str(self.local_table)) +\
(self.non_primary and
"|non-primary" or "") + ")"
def _log(self, msg, *args):
self.logger.info(
"%s " + msg, *((self._log_desc,) + args)
)
def _log_debug(self, msg, *args):
self.logger.debug(
"%s " + msg, *((self._log_desc,) + args)
)
def __repr__(self):
return '<Mapper at 0x%x; %s>' % (
id(self), self.class_.__name__)
def __str__(self):
return "Mapper|%s|%s%s" % (
self.class_.__name__,
self.local_table is not None and
self.local_table.description or None,
self.non_primary and "|non-primary" or ""
)
def _is_orphan(self, state):
orphan_possible = False
for mapper in self.iterate_to_root():
for (key, cls) in mapper._delete_orphans:
orphan_possible = True
has_parent = attributes.manager_of_class(cls).has_parent(
state, key, optimistic=state.has_identity)
if self.legacy_is_orphan and has_parent:
return False
elif not self.legacy_is_orphan and not has_parent:
return True
if self.legacy_is_orphan:
return orphan_possible
else:
return False
def has_property(self, key):
return key in self._props
def get_property(self, key, _configure_mappers=True):
"""return a MapperProperty associated with the given key.
"""
if _configure_mappers and Mapper._new_mappers:
configure_mappers()
try:
return self._props[key]
except KeyError:
raise sa_exc.InvalidRequestError(
"Mapper '%s' has no property '%s'" % (self, key))
def get_property_by_column(self, column):
"""Given a :class:`.Column` object, return the
:class:`.MapperProperty` which maps this column."""
return self._columntoproperty[column]
@property
def iterate_properties(self):
"""return an iterator of all MapperProperty objects."""
if Mapper._new_mappers:
configure_mappers()
return iter(self._props.values())
def _mappers_from_spec(self, spec, selectable):
"""given a with_polymorphic() argument, return the set of mappers it
represents.
Trims the list of mappers to just those represented within the given
selectable, if present. This helps some more legacy-ish mappings.
"""
if spec == '*':
mappers = list(self.self_and_descendants)
elif spec:
mappers = set()
for m in util.to_list(spec):
m = _class_to_mapper(m)
if not m.isa(self):
raise sa_exc.InvalidRequestError(
"%r does not inherit from %r" %
(m, self))
if selectable is None:
mappers.update(m.iterate_to_root())
else:
mappers.add(m)
mappers = [m for m in self.self_and_descendants if m in mappers]
else:
mappers = []
if selectable is not None:
tables = set(sql_util.find_tables(selectable,
include_aliases=True))
mappers = [m for m in mappers if m.local_table in tables]
return mappers
def _selectable_from_mappers(self, mappers, innerjoin):
"""given a list of mappers (assumed to be within this mapper's
inheritance hierarchy), construct an outerjoin amongst those mapper's
mapped tables.
"""
from_obj = self.mapped_table
for m in mappers:
if m is self:
continue
if m.concrete:
raise sa_exc.InvalidRequestError(
"'with_polymorphic()' requires 'selectable' argument "
"when concrete-inheriting mappers are used.")
elif not m.single:
if innerjoin:
from_obj = from_obj.join(m.local_table,
m.inherit_condition)
else:
from_obj = from_obj.outerjoin(m.local_table,
m.inherit_condition)
return from_obj
@_memoized_configured_property
def _single_table_criterion(self):
if self.single and \
self.inherits and \
self.polymorphic_on is not None:
return self.polymorphic_on.in_(
m.polymorphic_identity
for m in self.self_and_descendants)
else:
return None
@_memoized_configured_property
def _with_polymorphic_mappers(self):
if Mapper._new_mappers:
configure_mappers()
if not self.with_polymorphic:
return []
return self._mappers_from_spec(*self.with_polymorphic)
@_memoized_configured_property
def _with_polymorphic_selectable(self):
if not self.with_polymorphic:
return self.mapped_table
spec, selectable = self.with_polymorphic
if selectable is not None:
return selectable
else:
return self._selectable_from_mappers(
self._mappers_from_spec(spec, selectable),
False)
with_polymorphic_mappers = _with_polymorphic_mappers
"""The list of :class:`.Mapper` objects included in the
default "polymorphic" query.
"""
@_memoized_configured_property
def _insert_cols_as_none(self):
return dict(
(
table,
frozenset(
col.key for col in columns
if not col.primary_key and
not col.server_default and not col.default)
)
for table, columns in self._cols_by_table.items()
)
@_memoized_configured_property
def _propkey_to_col(self):
return dict(
(
table,
dict(
(self._columntoproperty[col].key, col)
for col in columns
)
)
for table, columns in self._cols_by_table.items()
)
@_memoized_configured_property
def _pk_keys_by_table(self):
return dict(
(
table,
frozenset([col.key for col in pks])
)
for table, pks in self._pks_by_table.items()
)
@_memoized_configured_property
def _server_default_cols(self):
return dict(
(
table,
frozenset([
col.key for col in columns
if col.server_default is not None])
)
for table, columns in self._cols_by_table.items()
)
@_memoized_configured_property
def _server_onupdate_default_cols(self):
return dict(
(
table,
frozenset([
col.key for col in columns
if col.server_onupdate is not None])
)
for table, columns in self._cols_by_table.items()
)
@property
def selectable(self):
"""The :func:`.select` construct this :class:`.Mapper` selects from
by default.
Normally, this is equivalent to :attr:`.mapped_table`, unless
the ``with_polymorphic`` feature is in use, in which case the
full "polymorphic" selectable is returned.
"""
return self._with_polymorphic_selectable
def _with_polymorphic_args(self, spec=None, selectable=False,
innerjoin=False):
if self.with_polymorphic:
if not spec:
spec = self.with_polymorphic[0]
if selectable is False:
selectable = self.with_polymorphic[1]
elif selectable is False:
selectable = None
mappers = self._mappers_from_spec(spec, selectable)
if selectable is not None:
return mappers, selectable
else:
return mappers, self._selectable_from_mappers(mappers,
innerjoin)
@_memoized_configured_property
def _polymorphic_properties(self):
return list(self._iterate_polymorphic_properties(
self._with_polymorphic_mappers))
def _iterate_polymorphic_properties(self, mappers=None):
"""Return an iterator of MapperProperty objects which will render into
a SELECT."""
if mappers is None:
mappers = self._with_polymorphic_mappers
if not mappers:
for c in self.iterate_properties:
yield c
else:
# in the polymorphic case, filter out discriminator columns
# from other mappers, as these are sometimes dependent on that
# mapper's polymorphic selectable (which we don't want rendered)
for c in util.unique_list(
chain(*[
list(mapper.iterate_properties) for mapper in
[self] + mappers
])
):
if getattr(c, '_is_polymorphic_discriminator', False) and \
(self.polymorphic_on is None or
c.columns[0] is not self.polymorphic_on):
continue
yield c
@util.memoized_property
def attrs(self):
"""A namespace of all :class:`.MapperProperty` objects
associated this mapper.
This is an object that provides each property based on
its key name. For instance, the mapper for a
``User`` class which has ``User.name`` attribute would
provide ``mapper.attrs.name``, which would be the
:class:`.ColumnProperty` representing the ``name``
column. The namespace object can also be iterated,
which would yield each :class:`.MapperProperty`.
:class:`.Mapper` has several pre-filtered views
of this attribute which limit the types of properties
returned, inclding :attr:`.synonyms`, :attr:`.column_attrs`,
:attr:`.relationships`, and :attr:`.composites`.
.. warning::
The :attr:`.Mapper.attrs` accessor namespace is an
instance of :class:`.OrderedProperties`. This is
a dictionary-like object which includes a small number of
named methods such as :meth:`.OrderedProperties.items`
and :meth:`.OrderedProperties.values`. When
accessing attributes dynamically, favor using the dict-access
scheme, e.g. ``mapper.attrs[somename]`` over
``getattr(mapper.attrs, somename)`` to avoid name collisions.
.. seealso::
:attr:`.Mapper.all_orm_descriptors`
"""
if Mapper._new_mappers:
configure_mappers()
return util.ImmutableProperties(self._props)
@util.memoized_property
def all_orm_descriptors(self):
"""A namespace of all :class:`.InspectionAttr` attributes associated
with the mapped class.
These attributes are in all cases Python :term:`descriptors`
associated with the mapped class or its superclasses.
This namespace includes attributes that are mapped to the class
as well as attributes declared by extension modules.
It includes any Python descriptor type that inherits from
:class:`.InspectionAttr`. This includes
:class:`.QueryableAttribute`, as well as extension types such as
:class:`.hybrid_property`, :class:`.hybrid_method` and
:class:`.AssociationProxy`.
To distinguish between mapped attributes and extension attributes,
the attribute :attr:`.InspectionAttr.extension_type` will refer
to a constant that distinguishes between different extension types.
When dealing with a :class:`.QueryableAttribute`, the
:attr:`.QueryableAttribute.property` attribute refers to the
:class:`.MapperProperty` property, which is what you get when
referring to the collection of mapped properties via
:attr:`.Mapper.attrs`.
.. warning::
The :attr:`.Mapper.all_orm_descriptors` accessor namespace is an
instance of :class:`.OrderedProperties`. This is
a dictionary-like object which includes a small number of
named methods such as :meth:`.OrderedProperties.items`
and :meth:`.OrderedProperties.values`. When
accessing attributes dynamically, favor using the dict-access
scheme, e.g. ``mapper.all_orm_descriptors[somename]`` over
``getattr(mapper.all_orm_descriptors, somename)`` to avoid name
collisions.
.. versionadded:: 0.8.0
.. seealso::
:attr:`.Mapper.attrs`
"""
return util.ImmutableProperties(
dict(self.class_manager._all_sqla_attributes()))
@_memoized_configured_property
def synonyms(self):
"""Return a namespace of all :class:`.SynonymProperty`
properties maintained by this :class:`.Mapper`.
.. seealso::
:attr:`.Mapper.attrs` - namespace of all :class:`.MapperProperty`
objects.
"""
return self._filter_properties(properties.SynonymProperty)
@_memoized_configured_property
def column_attrs(self):
"""Return a namespace of all :class:`.ColumnProperty`
properties maintained by this :class:`.Mapper`.
.. seealso::
:attr:`.Mapper.attrs` - namespace of all :class:`.MapperProperty`
objects.
"""
return self._filter_properties(properties.ColumnProperty)
@_memoized_configured_property
def relationships(self):
"""A namespace of all :class:`.RelationshipProperty` properties
maintained by this :class:`.Mapper`.
.. warning::
the :attr:`.Mapper.relationships` accessor namespace is an
instance of :class:`.OrderedProperties`. This is
a dictionary-like object which includes a small number of
named methods such as :meth:`.OrderedProperties.items`
and :meth:`.OrderedProperties.values`. When
accessing attributes dynamically, favor using the dict-access
scheme, e.g. ``mapper.relationships[somename]`` over
``getattr(mapper.relationships, somename)`` to avoid name
collisions.
.. seealso::
:attr:`.Mapper.attrs` - namespace of all :class:`.MapperProperty`
objects.
"""
return self._filter_properties(properties.RelationshipProperty)
@_memoized_configured_property
def composites(self):
"""Return a namespace of all :class:`.CompositeProperty`
properties maintained by this :class:`.Mapper`.
.. seealso::
:attr:`.Mapper.attrs` - namespace of all :class:`.MapperProperty`
objects.
"""
return self._filter_properties(properties.CompositeProperty)
def _filter_properties(self, type_):
if Mapper._new_mappers:
configure_mappers()
return util.ImmutableProperties(util.OrderedDict(
(k, v) for k, v in self._props.items()
if isinstance(v, type_)
))
@_memoized_configured_property
def _get_clause(self):
"""create a "get clause" based on the primary key. this is used
by query.get() and many-to-one lazyloads to load this item
by primary key.
"""
params = [(primary_key, sql.bindparam(None, type_=primary_key.type))
for primary_key in self.primary_key]
return sql.and_(*[k == v for (k, v) in params]), \
util.column_dict(params)
@_memoized_configured_property
def _equivalent_columns(self):
"""Create a map of all *equivalent* columns, based on
the determination of column pairs that are equated to
one another based on inherit condition. This is designed
to work with the queries that util.polymorphic_union
comes up with, which often don't include the columns from
the base table directly (including the subclass table columns
only).
The resulting structure is a dictionary of columns mapped
to lists of equivalent columns, i.e.
{
tablea.col1:
set([tableb.col1, tablec.col1]),
tablea.col2:
set([tabled.col2])
}
"""
result = util.column_dict()
def visit_binary(binary):
if binary.operator == operators.eq:
if binary.left in result:
result[binary.left].add(binary.right)
else:
result[binary.left] = util.column_set((binary.right,))
if binary.right in result:
result[binary.right].add(binary.left)
else:
result[binary.right] = util.column_set((binary.left,))
for mapper in self.base_mapper.self_and_descendants:
if mapper.inherit_condition is not None:
visitors.traverse(
mapper.inherit_condition, {},
{'binary': visit_binary})
return result
def _is_userland_descriptor(self, obj):
if isinstance(obj, (_MappedAttribute,
instrumentation.ClassManager,
expression.ColumnElement)):
return False
else:
return True
def _should_exclude(self, name, assigned_name, local, column):
"""determine whether a particular property should be implicitly
present on the class.
This occurs when properties are propagated from an inherited class, or
are applied from the columns present in the mapped table.
"""
# check for class-bound attributes and/or descriptors,
# either local or from an inherited class
if local:
if self.class_.__dict__.get(assigned_name, None) is not None \
and self._is_userland_descriptor(
self.class_.__dict__[assigned_name]):
return True
else:
if getattr(self.class_, assigned_name, None) is not None \
and self._is_userland_descriptor(
getattr(self.class_, assigned_name)):
return True
if self.include_properties is not None and \
name not in self.include_properties and \
(column is None or column not in self.include_properties):
self._log("not including property %s" % (name))
return True
if self.exclude_properties is not None and \
(
name in self.exclude_properties or
(column is not None and column in self.exclude_properties)
):
self._log("excluding property %s" % (name))
return True
return False
def common_parent(self, other):
"""Return true if the given mapper shares a
common inherited parent as this mapper."""
return self.base_mapper is other.base_mapper
def _canload(self, state, allow_subtypes):
s = self.primary_mapper()
if self.polymorphic_on is not None or allow_subtypes:
return _state_mapper(state).isa(s)
else:
return _state_mapper(state) is s
def isa(self, other):
"""Return True if the this mapper inherits from the given mapper."""
m = self
while m and m is not other:
m = m.inherits
return bool(m)
def iterate_to_root(self):
m = self
while m:
yield m
m = m.inherits
@_memoized_configured_property
def self_and_descendants(self):
"""The collection including this mapper and all descendant mappers.
This includes not just the immediately inheriting mappers but
all their inheriting mappers as well.
"""
descendants = []
stack = deque([self])
while stack:
item = stack.popleft()
descendants.append(item)
stack.extend(item._inheriting_mappers)
return util.WeakSequence(descendants)
def polymorphic_iterator(self):
"""Iterate through the collection including this mapper and
all descendant mappers.
This includes not just the immediately inheriting mappers but
all their inheriting mappers as well.
To iterate through an entire hierarchy, use
``mapper.base_mapper.polymorphic_iterator()``.
"""
return iter(self.self_and_descendants)
def primary_mapper(self):
"""Return the primary mapper corresponding to this mapper's class key
(class)."""
return self.class_manager.mapper
@property
def primary_base_mapper(self):
return self.class_manager.mapper.base_mapper
def _result_has_identity_key(self, result, adapter=None):
pk_cols = self.primary_key
if adapter:
pk_cols = [adapter.columns[c] for c in pk_cols]
for col in pk_cols:
if not result._has_key(col):
return False
else:
return True
def identity_key_from_row(self, row, adapter=None):
"""Return an identity-map key for use in storing/retrieving an
item from the identity map.
:param row: A :class:`.RowProxy` instance. The columns which are
mapped by this :class:`.Mapper` should be locatable in the row,
preferably via the :class:`.Column` object directly (as is the case
when a :func:`.select` construct is executed), or via string names of
the form ``<tablename>_<colname>``.
"""
pk_cols = self.primary_key
if adapter:
pk_cols = [adapter.columns[c] for c in pk_cols]
return self._identity_class, \
tuple(row[column] for column in pk_cols)
def identity_key_from_primary_key(self, primary_key):
"""Return an identity-map key for use in storing/retrieving an
item from an identity map.
:param primary_key: A list of values indicating the identifier.
"""
return self._identity_class, tuple(primary_key)
def identity_key_from_instance(self, instance):
"""Return the identity key for the given instance, based on
its primary key attributes.
If the instance's state is expired, calling this method
will result in a database check to see if the object has been deleted.
If the row no longer exists,
:class:`~sqlalchemy.orm.exc.ObjectDeletedError` is raised.
This value is typically also found on the instance state under the
attribute name `key`.
"""
return self.identity_key_from_primary_key(
self.primary_key_from_instance(instance))
def _identity_key_from_state(self, state):
dict_ = state.dict
manager = state.manager
return self._identity_class, tuple([
manager[self._columntoproperty[col].key].
impl.get(state, dict_, attributes.PASSIVE_RETURN_NEVER_SET)
for col in self.primary_key
])
def primary_key_from_instance(self, instance):
"""Return the list of primary key values for the given
instance.
If the instance's state is expired, calling this method
will result in a database check to see if the object has been deleted.
If the row no longer exists,
:class:`~sqlalchemy.orm.exc.ObjectDeletedError` is raised.
"""
state = attributes.instance_state(instance)
return self._primary_key_from_state(state, attributes.PASSIVE_OFF)
def _primary_key_from_state(
self, state, passive=attributes.PASSIVE_RETURN_NEVER_SET):
dict_ = state.dict
manager = state.manager
return [
manager[prop.key].
impl.get(state, dict_, passive)
for prop in self._identity_key_props
]
@_memoized_configured_property
def _identity_key_props(self):
return [self._columntoproperty[col] for col in self.primary_key]
@_memoized_configured_property
def _all_pk_props(self):
collection = set()
for table in self.tables:
collection.update(self._pks_by_table[table])
return collection
@_memoized_configured_property
def _should_undefer_in_wildcard(self):
cols = set(self.primary_key)
if self.polymorphic_on is not None:
cols.add(self.polymorphic_on)
return cols
@_memoized_configured_property
def _primary_key_propkeys(self):
return set([prop.key for prop in self._all_pk_props])
def _get_state_attr_by_column(
self, state, dict_, column,
passive=attributes.PASSIVE_RETURN_NEVER_SET):
prop = self._columntoproperty[column]
return state.manager[prop.key].impl.get(state, dict_, passive=passive)
def _set_committed_state_attr_by_column(self, state, dict_, column, value):
prop = self._columntoproperty[column]
state.manager[prop.key].impl.set_committed_value(state, dict_, value)
def _set_state_attr_by_column(self, state, dict_, column, value):
prop = self._columntoproperty[column]
state.manager[prop.key].impl.set(state, dict_, value, None)
def _get_committed_attr_by_column(self, obj, column):
state = attributes.instance_state(obj)
dict_ = attributes.instance_dict(obj)
return self._get_committed_state_attr_by_column(
state, dict_, column, passive=attributes.PASSIVE_OFF)
def _get_committed_state_attr_by_column(
self, state, dict_, column,
passive=attributes.PASSIVE_RETURN_NEVER_SET):
prop = self._columntoproperty[column]
return state.manager[prop.key].impl.\
get_committed_value(state, dict_, passive=passive)
def _optimized_get_statement(self, state, attribute_names):
"""assemble a WHERE clause which retrieves a given state by primary
key, using a minimized set of tables.
Applies to a joined-table inheritance mapper where the
requested attribute names are only present on joined tables,
not the base table. The WHERE clause attempts to include
only those tables to minimize joins.
"""
props = self._props
tables = set(chain(
*[sql_util.find_tables(c, check_columns=True)
for key in attribute_names
for c in props[key].columns]
))
if self.base_mapper.local_table in tables:
return None
class ColumnsNotAvailable(Exception):
pass
def visit_binary(binary):
leftcol = binary.left
rightcol = binary.right
if leftcol is None or rightcol is None:
return
if leftcol.table not in tables:
leftval = self._get_committed_state_attr_by_column(
state, state.dict,
leftcol,
passive=attributes.PASSIVE_NO_INITIALIZE)
if leftval in orm_util._none_set:
raise ColumnsNotAvailable()
binary.left = sql.bindparam(None, leftval,
type_=binary.right.type)
elif rightcol.table not in tables:
rightval = self._get_committed_state_attr_by_column(
state, state.dict,
rightcol,
passive=attributes.PASSIVE_NO_INITIALIZE)
if rightval in orm_util._none_set:
raise ColumnsNotAvailable()
binary.right = sql.bindparam(None, rightval,
type_=binary.right.type)
allconds = []
try:
start = False
for mapper in reversed(list(self.iterate_to_root())):
if mapper.local_table in tables:
start = True
elif not isinstance(mapper.local_table,
expression.TableClause):
return None
if start and not mapper.single:
allconds.append(visitors.cloned_traverse(
mapper.inherit_condition,
{},
{'binary': visit_binary}
)
)
except ColumnsNotAvailable:
return None
cond = sql.and_(*allconds)
cols = []
for key in attribute_names:
cols.extend(props[key].columns)
return sql.select(cols, cond, use_labels=True)
def cascade_iterator(self, type_, state, halt_on=None):
"""Iterate each element and its mapper in an object graph,
for all relationships that meet the given cascade rule.
:param type_:
The name of the cascade rule (i.e. ``"save-update"``, ``"delete"``,
etc.).
.. note:: the ``"all"`` cascade is not accepted here. For a generic
object traversal function, see :ref:`faq_walk_objects`.
:param state:
The lead InstanceState. child items will be processed per
the relationships defined for this object's mapper.
:return: the method yields individual object instances.
.. seealso::
:ref:`unitofwork_cascades`
:ref:`faq_walk_objects` - illustrates a generic function to
traverse all objects without relying on cascades.
"""
visited_states = set()
prp, mpp = object(), object()
visitables = deque([(deque(self._props.values()), prp,
state, state.dict)])
while visitables:
iterator, item_type, parent_state, parent_dict = visitables[-1]
if not iterator:
visitables.pop()
continue
if item_type is prp:
prop = iterator.popleft()
if type_ not in prop.cascade:
continue
queue = deque(prop.cascade_iterator(
type_, parent_state, parent_dict,
visited_states, halt_on))
if queue:
visitables.append((queue, mpp, None, None))
elif item_type is mpp:
instance, instance_mapper, corresponding_state, \
corresponding_dict = iterator.popleft()
yield instance, instance_mapper, \
corresponding_state, corresponding_dict
visitables.append((deque(instance_mapper._props.values()),
prp, corresponding_state,
corresponding_dict))
@_memoized_configured_property
def _compiled_cache(self):
return util.LRUCache(self._compiled_cache_size)
@_memoized_configured_property
def _sorted_tables(self):
table_to_mapper = {}
for mapper in self.base_mapper.self_and_descendants:
for t in mapper.tables:
table_to_mapper.setdefault(t, mapper)
extra_dependencies = []
for table, mapper in table_to_mapper.items():
super_ = mapper.inherits
if super_:
extra_dependencies.extend([
(super_table, table)
for super_table in super_.tables
])
def skip(fk):
# attempt to skip dependencies that are not
# significant to the inheritance chain
# for two tables that are related by inheritance.
# while that dependency may be important, it's technically
# not what we mean to sort on here.
parent = table_to_mapper.get(fk.parent.table)
dep = table_to_mapper.get(fk.column.table)
if parent is not None and \
dep is not None and \
dep is not parent and \
dep.inherit_condition is not None:
cols = set(sql_util._find_columns(dep.inherit_condition))
if parent.inherit_condition is not None:
cols = cols.union(sql_util._find_columns(
parent.inherit_condition))
return fk.parent not in cols and fk.column not in cols
else:
return fk.parent not in cols
return False
sorted_ = sql_util.sort_tables(table_to_mapper,
skip_fn=skip,
extra_dependencies=extra_dependencies)
ret = util.OrderedDict()
for t in sorted_:
ret[t] = table_to_mapper[t]
return ret
def _memo(self, key, callable_):
if key in self._memoized_values:
return self._memoized_values[key]
else:
self._memoized_values[key] = value = callable_()
return value
@util.memoized_property
def _table_to_equated(self):
"""memoized map of tables to collections of columns to be
synchronized upwards to the base mapper."""
result = util.defaultdict(list)
for table in self._sorted_tables:
cols = set(table.c)
for m in self.iterate_to_root():
if m._inherits_equated_pairs and \
cols.intersection(
util.reduce(set.union,
[l.proxy_set for l, r in
m._inherits_equated_pairs])
):
result[table].append((m, m._inherits_equated_pairs))
return result
def configure_mappers():
"""Initialize the inter-mapper relationships of all mappers that
have been constructed thus far.
This function can be called any number of times, but in
most cases is invoked automatically, the first time mappings are used,
as well as whenever mappings are used and additional not-yet-configured
mappers have been constructed.
Points at which this occur include when a mapped class is instantiated
into an instance, as well as when the :meth:`.Session.query` method
is used.
The :func:`.configure_mappers` function provides several event hooks
that can be used to augment its functionality. These methods include:
* :meth:`.MapperEvents.before_configured` - called once before
:func:`.configure_mappers` does any work; this can be used to establish
additional options, properties, or related mappings before the operation
proceeds.
* :meth:`.MapperEvents.mapper_configured` - called as each indivudal
:class:`.Mapper` is configured within the process; will include all
mapper state except for backrefs set up by other mappers that are still
to be configured.
* :meth:`.MapperEvents.after_configured` - called once after
:func:`.configure_mappers` is complete; at this stage, all
:class:`.Mapper` objects that are known to SQLAlchemy will be fully
configured. Note that the calling application may still have other
mappings that haven't been produced yet, such as if they are in modules
as yet unimported.
"""
if not Mapper._new_mappers:
return
_CONFIGURE_MUTEX.acquire()
try:
global _already_compiling
if _already_compiling:
return
_already_compiling = True
try:
# double-check inside mutex
if not Mapper._new_mappers:
return
Mapper.dispatch._for_class(Mapper).before_configured()
# initialize properties on all mappers
# note that _mapper_registry is unordered, which
# may randomly conceal/reveal issues related to
# the order of mapper compilation
for mapper in list(_mapper_registry):
if getattr(mapper, '_configure_failed', False):
e = sa_exc.InvalidRequestError(
"One or more mappers failed to initialize - "
"can't proceed with initialization of other "
"mappers. Original exception was: %s"
% mapper._configure_failed)
e._configure_failed = mapper._configure_failed
raise e
if not mapper.configured:
try:
mapper._post_configure_properties()
mapper._expire_memoizations()
mapper.dispatch.mapper_configured(
mapper, mapper.class_)
except Exception:
exc = sys.exc_info()[1]
if not hasattr(exc, '_configure_failed'):
mapper._configure_failed = exc
raise
Mapper._new_mappers = False
finally:
_already_compiling = False
finally:
_CONFIGURE_MUTEX.release()
Mapper.dispatch._for_class(Mapper).after_configured()
def reconstructor(fn):
"""Decorate a method as the 'reconstructor' hook.
Designates a method as the "reconstructor", an ``__init__``-like
method that will be called by the ORM after the instance has been
loaded from the database or otherwise reconstituted.
The reconstructor will be invoked with no arguments. Scalar
(non-collection) database-mapped attributes of the instance will
be available for use within the function. Eagerly-loaded
collections are generally not yet available and will usually only
contain the first element. ORM state changes made to objects at
this stage will not be recorded for the next flush() operation, so
the activity within a reconstructor should be conservative.
"""
fn.__sa_reconstructor__ = True
return fn
def validates(*names, **kw):
"""Decorate a method as a 'validator' for one or more named properties.
Designates a method as a validator, a method which receives the
name of the attribute as well as a value to be assigned, or in the
case of a collection, the value to be added to the collection.
The function can then raise validation exceptions to halt the
process from continuing (where Python's built-in ``ValueError``
and ``AssertionError`` exceptions are reasonable choices), or can
modify or replace the value before proceeding. The function should
otherwise return the given value.
Note that a validator for a collection **cannot** issue a load of that
collection within the validation routine - this usage raises
an assertion to avoid recursion overflows. This is a reentrant
condition which is not supported.
:param \*names: list of attribute names to be validated.
:param include_removes: if True, "remove" events will be
sent as well - the validation function must accept an additional
argument "is_remove" which will be a boolean.
.. versionadded:: 0.7.7
:param include_backrefs: defaults to ``True``; if ``False``, the
validation function will not emit if the originator is an attribute
event related via a backref. This can be used for bi-directional
:func:`.validates` usage where only one validator should emit per
attribute operation.
.. versionadded:: 0.9.0
.. seealso::
:ref:`simple_validators` - usage examples for :func:`.validates`
"""
include_removes = kw.pop('include_removes', False)
include_backrefs = kw.pop('include_backrefs', True)
def wrap(fn):
fn.__sa_validators__ = names
fn.__sa_validation_opts__ = {
"include_removes": include_removes,
"include_backrefs": include_backrefs
}
return fn
return wrap
def _event_on_load(state, ctx):
instrumenting_mapper = state.manager.info[_INSTRUMENTOR]
if instrumenting_mapper._reconstructor:
instrumenting_mapper._reconstructor(state.obj())
def _event_on_first_init(manager, cls):
"""Initial mapper compilation trigger.
instrumentation calls this one when InstanceState
is first generated, and is needed for legacy mutable
attributes to work.
"""
instrumenting_mapper = manager.info.get(_INSTRUMENTOR)
if instrumenting_mapper:
if Mapper._new_mappers:
configure_mappers()
def _event_on_init(state, args, kwargs):
"""Run init_instance hooks.
This also includes mapper compilation, normally not needed
here but helps with some piecemeal configuration
scenarios (such as in the ORM tutorial).
"""
instrumenting_mapper = state.manager.info.get(_INSTRUMENTOR)
if instrumenting_mapper:
if Mapper._new_mappers:
configure_mappers()
if instrumenting_mapper._set_polymorphic_identity:
instrumenting_mapper._set_polymorphic_identity(state)
class _ColumnMapping(dict):
"""Error reporting helper for mapper._columntoproperty."""
__slots__ = 'mapper',
def __init__(self, mapper):
self.mapper = mapper
def __missing__(self, column):
prop = self.mapper._props.get(column)
if prop:
raise orm_exc.UnmappedColumnError(
"Column '%s.%s' is not available, due to "
"conflicting property '%s':%r" % (
column.table.name, column.name, column.key, prop))
raise orm_exc.UnmappedColumnError(
"No column %s is configured on mapper %s..." %
(column, self.mapper))
| gpl-3.0 |
chadnetzer/numpy-gaurdro | numpy/f2py/crackfortran.py | 2 | 114871 | #!/usr/bin/env python
"""
crackfortran --- read fortran (77,90) code and extract declaration information.
Usage is explained in the comment block below.
Copyright 1999-2004 Pearu Peterson all rights reserved,
Pearu Peterson <pearu@ioc.ee>
Permission to use, modify, and distribute this software is given under the
terms of the NumPy License.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Date: 2005/09/27 07:13:49 $
Pearu Peterson
"""
__version__ = "$Revision: 1.177 $"[10:-1]
import __version__
f2py_version = __version__.version
"""
Usage of crackfortran:
======================
Command line keys: -quiet,-verbose,-fix,-f77,-f90,-show,-h <pyffilename>
-m <module name for f77 routines>,--ignore-contains
Functions: crackfortran, crack2fortran
The following Fortran statements/constructions are supported
(or will be if needed):
block data,byte,call,character,common,complex,contains,data,
dimension,double complex,double precision,end,external,function,
implicit,integer,intent,interface,intrinsic,
logical,module,optional,parameter,private,public,
program,real,(sequence?),subroutine,type,use,virtual,
include,pythonmodule
Note: 'virtual' is mapped to 'dimension'.
Note: 'implicit integer (z) static (z)' is 'implicit static (z)' (this is minor bug).
Note: code after 'contains' will be ignored until its scope ends.
Note: 'common' statement is extended: dimensions are moved to variable definitions
Note: f2py directive: <commentchar>f2py<line> is read as <line>
Note: pythonmodule is introduced to represent Python module
Usage:
`postlist=crackfortran(files,funcs)`
`postlist` contains declaration information read from the list of files `files`.
`crack2fortran(postlist)` returns a fortran code to be saved to pyf-file
`postlist` has the following structure:
*** it is a list of dictionaries containing `blocks':
B = {'block','body','vars','parent_block'[,'name','prefix','args','result',
'implicit','externals','interfaced','common','sortvars',
'commonvars','note']}
B['block'] = 'interface' | 'function' | 'subroutine' | 'module' |
'program' | 'block data' | 'type' | 'pythonmodule'
B['body'] --- list containing `subblocks' with the same structure as `blocks'
B['parent_block'] --- dictionary of a parent block:
C['body'][<index>]['parent_block'] is C
B['vars'] --- dictionary of variable definitions
B['sortvars'] --- dictionary of variable definitions sorted by dependence (independent first)
B['name'] --- name of the block (not if B['block']=='interface')
B['prefix'] --- prefix string (only if B['block']=='function')
B['args'] --- list of argument names if B['block']== 'function' | 'subroutine'
B['result'] --- name of the return value (only if B['block']=='function')
B['implicit'] --- dictionary {'a':<variable definition>,'b':...} | None
B['externals'] --- list of variables being external
B['interfaced'] --- list of variables being external and defined
B['common'] --- dictionary of common blocks (list of objects)
B['commonvars'] --- list of variables used in common blocks (dimensions are moved to variable definitions)
B['from'] --- string showing the 'parents' of the current block
B['use'] --- dictionary of modules used in current block:
{<modulename>:{['only':<0|1>],['map':{<local_name1>:<use_name1>,...}]}}
B['note'] --- list of LaTeX comments on the block
B['f2pyenhancements'] --- optional dictionary
{'threadsafe':'','fortranname':<name>,
'callstatement':<C-expr>|<multi-line block>,
'callprotoargument':<C-expr-list>,
'usercode':<multi-line block>|<list of multi-line blocks>,
'pymethoddef:<multi-line block>'
}
B['entry'] --- dictionary {entryname:argslist,..}
B['varnames'] --- list of variable names given in the order of reading the
Fortran code, useful for derived types.
*** Variable definition is a dictionary
D = B['vars'][<variable name>] =
{'typespec'[,'attrspec','kindselector','charselector','=','typename']}
D['typespec'] = 'byte' | 'character' | 'complex' | 'double complex' |
'double precision' | 'integer' | 'logical' | 'real' | 'type'
D['attrspec'] --- list of attributes (e.g. 'dimension(<arrayspec>)',
'external','intent(in|out|inout|hide|c|callback|cache)',
'optional','required', etc)
K = D['kindselector'] = {['*','kind']} (only if D['typespec'] =
'complex' | 'integer' | 'logical' | 'real' )
C = D['charselector'] = {['*','len','kind']}
(only if D['typespec']=='character')
D['='] --- initialization expression string
D['typename'] --- name of the type if D['typespec']=='type'
D['dimension'] --- list of dimension bounds
D['intent'] --- list of intent specifications
D['depend'] --- list of variable names on which current variable depends on
D['check'] --- list of C-expressions; if C-expr returns zero, exception is raised
D['note'] --- list of LaTeX comments on the variable
*** Meaning of kind/char selectors (few examples):
D['typespec>']*K['*']
D['typespec'](kind=K['kind'])
character*C['*']
character(len=C['len'],kind=C['kind'])
(see also fortran type declaration statement formats below)
Fortran 90 type declaration statement format (F77 is subset of F90)
====================================================================
(Main source: IBM XL Fortran 5.1 Language Reference Manual)
type declaration = <typespec> [[<attrspec>]::] <entitydecl>
<typespec> = byte |
character[<charselector>] |
complex[<kindselector>] |
double complex |
double precision |
integer[<kindselector>] |
logical[<kindselector>] |
real[<kindselector>] |
type(<typename>)
<charselector> = * <charlen> |
([len=]<len>[,[kind=]<kind>]) |
(kind=<kind>[,len=<len>])
<kindselector> = * <intlen> |
([kind=]<kind>)
<attrspec> = comma separated list of attributes.
Only the following attributes are used in
building up the interface:
external
(parameter --- affects '=' key)
optional
intent
Other attributes are ignored.
<intentspec> = in | out | inout
<arrayspec> = comma separated list of dimension bounds.
<entitydecl> = <name> [[*<charlen>][(<arrayspec>)] | [(<arrayspec>)]*<charlen>]
[/<init_expr>/ | =<init_expr>] [,<entitydecl>]
In addition, the following attributes are used: check,depend,note
TODO:
* Apply 'parameter' attribute (e.g. 'integer parameter :: i=2' 'real x(i)'
-> 'real x(2)')
The above may be solved by creating appropriate preprocessor program, for example.
"""
#
import sys
import string
import fileinput
import re
import pprint
import os
import copy
from auxfuncs import *
# Global flags:
strictf77=1 # Ignore `!' comments unless line[0]=='!'
sourcecodeform='fix' # 'fix','free'
quiet=0 # Be verbose if 0 (Obsolete: not used any more)
verbose=1 # Be quiet if 0, extra verbose if > 1.
tabchar=4*' '
pyffilename=''
f77modulename=''
skipemptyends=0 # for old F77 programs without 'program' statement
ignorecontains=1
dolowercase=1
debug=[]
## do_analyze = 1
###### global variables
## use reload(crackfortran) to reset these variables
groupcounter=0
grouplist={groupcounter:[]}
neededmodule=-1
expectbegin=1
skipblocksuntil=-1
usermodules=[]
f90modulevars={}
gotnextfile=1
filepositiontext=''
currentfilename=''
skipfunctions=[]
skipfuncs=[]
onlyfuncs=[]
include_paths=[]
previous_context = None
###### Some helper functions
def show(o,f=0):pprint.pprint(o)
errmess=sys.stderr.write
def outmess(line,flag=1):
global filepositiontext
if not verbose: return
if not quiet:
if flag:sys.stdout.write(filepositiontext)
sys.stdout.write(line)
re._MAXCACHE=50
defaultimplicitrules={}
for c in "abcdefghopqrstuvwxyz$_": defaultimplicitrules[c]={'typespec':'real'}
for c in "ijklmn": defaultimplicitrules[c]={'typespec':'integer'}
del c
badnames={}
invbadnames={}
for n in ['int','double','float','char','short','long','void','case','while',
'return','signed','unsigned','if','for','typedef','sizeof','union',
'struct','static','register','new','break','do','goto','switch',
'continue','else','inline','extern','delete','const','auto',
'len','rank','shape','index','slen','size','_i',
'max', 'min',
'flen','fshape',
'string','complex_double','float_double','stdin','stderr','stdout',
'type','default']:
badnames[n]=n+'_bn'
invbadnames[n+'_bn']=n
def rmbadname1(name):
if name in badnames:
errmess('rmbadname1: Replacing "%s" with "%s".\n'%(name,badnames[name]))
return badnames[name]
return name
def rmbadname(names): return map(rmbadname1,names)
def undo_rmbadname1(name):
if name in invbadnames:
errmess('undo_rmbadname1: Replacing "%s" with "%s".\n'\
%(name,invbadnames[name]))
return invbadnames[name]
return name
def undo_rmbadname(names): return map(undo_rmbadname1,names)
def getextension(name):
i=name.rfind('.')
if i==-1: return ''
if '\\' in name[i:]: return ''
if '/' in name[i:]: return ''
return name[i+1:]
is_f_file = re.compile(r'.*[.](for|ftn|f77|f)\Z',re.I).match
_has_f_header = re.compile(r'-[*]-\s*fortran\s*-[*]-',re.I).search
_has_f90_header = re.compile(r'-[*]-\s*f90\s*-[*]-',re.I).search
_has_fix_header = re.compile(r'-[*]-\s*fix\s*-[*]-',re.I).search
_free_f90_start = re.compile(r'[^c*]\s*[^\s\d\t]',re.I).match
def is_free_format(file):
"""Check if file is in free format Fortran."""
# f90 allows both fixed and free format, assuming fixed unless
# signs of free format are detected.
result = 0
f = open(file,'r')
line = f.readline()
n = 15 # the number of non-comment lines to scan for hints
if _has_f_header(line):
n = 0
elif _has_f90_header(line):
n = 0
result = 1
while n>0 and line:
if line[0]!='!' and line.strip():
n -= 1
if (line[0]!='\t' and _free_f90_start(line[:5])) or line[-2:-1]=='&':
result = 1
break
line = f.readline()
f.close()
return result
####### Read fortran (77,90) code
def readfortrancode(ffile,dowithline=show,istop=1):
"""
Read fortran codes from files and
1) Get rid of comments, line continuations, and empty lines; lower cases.
2) Call dowithline(line) on every line.
3) Recursively call itself when statement \"include '<filename>'\" is met.
"""
global gotnextfile,filepositiontext,currentfilename,sourcecodeform,strictf77,\
beginpattern,quiet,verbose,dolowercase,include_paths
if not istop:
saveglobals=gotnextfile,filepositiontext,currentfilename,sourcecodeform,strictf77,\
beginpattern,quiet,verbose,dolowercase
if ffile==[]: return
localdolowercase = dolowercase
cont=0
finalline=''
ll=''
commentline=re.compile(r'(?P<line>([^"]*"[^"]*"[^"!]*|[^\']*\'[^\']*\'[^\'!]*|[^!]*))!{1}(?P<rest>.*)')
includeline=re.compile(r'\s*include\s*(\'|")(?P<name>[^\'"]*)(\'|")',re.I)
cont1=re.compile(r'(?P<line>.*)&\s*\Z')
cont2=re.compile(r'(\s*&|)(?P<line>.*)')
mline_mark = re.compile(r".*?'''")
if istop: dowithline('',-1)
ll,l1='',''
spacedigits=[' ']+map(str,range(10))
filepositiontext=''
fin=fileinput.FileInput(ffile)
while 1:
l=fin.readline()
if not l: break
if fin.isfirstline():
filepositiontext=''
currentfilename=fin.filename()
gotnextfile=1
l1=l
strictf77=0
sourcecodeform='fix'
ext = os.path.splitext(currentfilename)[1]
if is_f_file(currentfilename) and \
not (_has_f90_header(l) or _has_fix_header(l)):
strictf77=1
elif is_free_format(currentfilename) and not _has_fix_header(l):
sourcecodeform='free'
if strictf77: beginpattern=beginpattern77
else: beginpattern=beginpattern90
outmess('\tReading file %s (format:%s%s)\n'\
%(`currentfilename`,sourcecodeform,
strictf77 and ',strict' or ''))
l=l.expandtabs().replace('\xa0',' ')
while not l=='': # Get rid of newline characters
if l[-1] not in "\n\r\f": break
l=l[:-1]
if not strictf77:
r=commentline.match(l)
if r:
l=r.group('line')+' ' # Strip comments starting with `!'
rl=r.group('rest')
if rl[:4].lower()=='f2py': # f2py directive
l = l + 4*' '
r=commentline.match(rl[4:])
if r: l=l+r.group('line')
else: l = l + rl[4:]
if l.strip()=='': # Skip empty line
cont=0
continue
if sourcecodeform=='fix':
if l[0] in ['*','c','!','C','#']:
if l[1:5].lower()=='f2py': # f2py directive
l=' '+l[5:]
else: # Skip comment line
cont=0
continue
elif strictf77:
if len(l)>72: l=l[:72]
if not (l[0] in spacedigits):
raise Exception('readfortrancode: Found non-(space,digit) char '
'in the first column.\n\tAre you sure that '
'this code is in fix form?\n\tline=%s' % `l`)
if (not cont or strictf77) and (len(l)>5 and not l[5]==' '):
# Continuation of a previous line
ll=ll+l[6:]
finalline=''
origfinalline=''
else:
if not strictf77:
# F90 continuation
r=cont1.match(l)
if r: l=r.group('line') # Continuation follows ..
if cont:
ll=ll+cont2.match(l).group('line')
finalline=''
origfinalline=''
else:
l=' '+l[5:] # clean up line beginning from possible digits.
if localdolowercase: finalline=ll.lower()
else: finalline=ll
origfinalline=ll
ll=l
cont=(r is not None)
else:
l=' '+l[5:] # clean up line beginning from possible digits.
if localdolowercase: finalline=ll.lower()
else: finalline=ll
origfinalline =ll
ll=l
elif sourcecodeform=='free':
if not cont and ext=='.pyf' and mline_mark.match(l):
l = l + '\n'
while 1:
lc = fin.readline()
if not lc:
errmess('Unexpected end of file when reading multiline\n')
break
l = l + lc
if mline_mark.match(lc):
break
l = l.rstrip()
r=cont1.match(l)
if r: l=r.group('line') # Continuation follows ..
if cont:
ll=ll+cont2.match(l).group('line')
finalline=''
origfinalline=''
else:
if localdolowercase: finalline=ll.lower()
else: finalline=ll
origfinalline =ll
ll=l
cont=(r is not None)
else:
raise ValueError,"Flag sourcecodeform must be either 'fix' or 'free': %s"%`sourcecodeform`
filepositiontext='Line #%d in %s:"%s"\n\t' % (fin.filelineno()-1,currentfilename,l1)
m=includeline.match(origfinalline)
if m:
fn=m.group('name')
if os.path.isfile(fn):
readfortrancode(fn,dowithline=dowithline,istop=0)
else:
include_dirs = [os.path.dirname(currentfilename)] + include_paths
foundfile = 0
for inc_dir in include_dirs:
fn1 = os.path.join(inc_dir,fn)
if os.path.isfile(fn1):
foundfile = 1
readfortrancode(fn1,dowithline=dowithline,istop=0)
break
if not foundfile:
outmess('readfortrancode: could not find include file %s. Ignoring.\n'%(`fn`))
else:
dowithline(finalline)
l1=ll
if localdolowercase:
finalline=ll.lower()
else: finalline=ll
origfinalline = ll
filepositiontext='Line #%d in %s:"%s"\n\t' % (fin.filelineno()-1,currentfilename,l1)
m=includeline.match(origfinalline)
if m:
fn=m.group('name')
fn1=os.path.join(os.path.dirname(currentfilename),fn)
if os.path.isfile(fn):
readfortrancode(fn,dowithline=dowithline,istop=0)
elif os.path.isfile(fn1):
readfortrancode(fn1,dowithline=dowithline,istop=0)
else:
outmess('readfortrancode: could not find include file %s. Ignoring.\n'%(`fn`))
else:
dowithline(finalline)
filepositiontext=''
fin.close()
if istop: dowithline('',1)
else:
gotnextfile,filepositiontext,currentfilename,sourcecodeform,strictf77,\
beginpattern,quiet,verbose,dolowercase=saveglobals
########### Crack line
beforethisafter=r'\s*(?P<before>%s(?=\s*(\b(%s)\b)))'+ \
r'\s*(?P<this>(\b(%s)\b))'+ \
r'\s*(?P<after>%s)\s*\Z'
##
fortrantypes='character|logical|integer|real|complex|double\s*(precision\s*(complex|)|complex)|type(?=\s*\([\w\s,=(*)]*\))|byte'
typespattern=re.compile(beforethisafter%('',fortrantypes,fortrantypes,'.*'),re.I),'type'
typespattern4implicit=re.compile(beforethisafter%('',fortrantypes+'|static|automatic|undefined',fortrantypes+'|static|automatic|undefined','.*'),re.I)
#
functionpattern=re.compile(beforethisafter%('([a-z]+[\w\s(=*+-/)]*?|)','function','function','.*'),re.I),'begin'
subroutinepattern=re.compile(beforethisafter%('[a-z\s]*?','subroutine','subroutine','.*'),re.I),'begin'
#modulepattern=re.compile(beforethisafter%('[a-z\s]*?','module','module','.*'),re.I),'begin'
#
groupbegins77=r'program|block\s*data'
beginpattern77=re.compile(beforethisafter%('',groupbegins77,groupbegins77,'.*'),re.I),'begin'
groupbegins90=groupbegins77+r'|module(?!\s*procedure)|python\s*module|interface|type(?!\s*\()'
beginpattern90=re.compile(beforethisafter%('',groupbegins90,groupbegins90,'.*'),re.I),'begin'
groupends=r'end|endprogram|endblockdata|endmodule|endpythonmodule|endinterface'
endpattern=re.compile(beforethisafter%('',groupends,groupends,'[\w\s]*'),re.I),'end'
#endifs='end\s*(if|do|where|select|while|forall)'
endifs='(end\s*(if|do|where|select|while|forall))|(module\s*procedure)'
endifpattern=re.compile(beforethisafter%('[\w]*?',endifs,endifs,'[\w\s]*'),re.I),'endif'
#
implicitpattern=re.compile(beforethisafter%('','implicit','implicit','.*'),re.I),'implicit'
dimensionpattern=re.compile(beforethisafter%('','dimension|virtual','dimension|virtual','.*'),re.I),'dimension'
externalpattern=re.compile(beforethisafter%('','external','external','.*'),re.I),'external'
optionalpattern=re.compile(beforethisafter%('','optional','optional','.*'),re.I),'optional'
requiredpattern=re.compile(beforethisafter%('','required','required','.*'),re.I),'required'
publicpattern=re.compile(beforethisafter%('','public','public','.*'),re.I),'public'
privatepattern=re.compile(beforethisafter%('','private','private','.*'),re.I),'private'
intrisicpattern=re.compile(beforethisafter%('','intrisic','intrisic','.*'),re.I),'intrisic'
intentpattern=re.compile(beforethisafter%('','intent|depend|note|check','intent|depend|note|check','\s*\(.*?\).*'),re.I),'intent'
parameterpattern=re.compile(beforethisafter%('','parameter','parameter','\s*\(.*'),re.I),'parameter'
datapattern=re.compile(beforethisafter%('','data','data','.*'),re.I),'data'
callpattern=re.compile(beforethisafter%('','call','call','.*'),re.I),'call'
entrypattern=re.compile(beforethisafter%('','entry','entry','.*'),re.I),'entry'
callfunpattern=re.compile(beforethisafter%('','callfun','callfun','.*'),re.I),'callfun'
commonpattern=re.compile(beforethisafter%('','common','common','.*'),re.I),'common'
usepattern=re.compile(beforethisafter%('','use','use','.*'),re.I),'use'
containspattern=re.compile(beforethisafter%('','contains','contains',''),re.I),'contains'
formatpattern=re.compile(beforethisafter%('','format','format','.*'),re.I),'format'
## Non-fortran and f2py-specific statements
f2pyenhancementspattern=re.compile(beforethisafter%('','threadsafe|fortranname|callstatement|callprotoargument|usercode|pymethoddef','threadsafe|fortranname|callstatement|callprotoargument|usercode|pymethoddef','.*'),re.I|re.S),'f2pyenhancements'
multilinepattern = re.compile(r"\s*(?P<before>''')(?P<this>.*?)(?P<after>''')\s*\Z",re.S),'multiline'
##
def _simplifyargs(argsline):
a = []
for n in markoutercomma(argsline).split('@,@'):
for r in '(),':
n = n.replace(r,'_')
a.append(n)
return ','.join(a)
crackline_re_1 = re.compile(r'\s*(?P<result>\b[a-z]+[\w]*\b)\s*[=].*',re.I)
def crackline(line,reset=0):
"""
reset=-1 --- initialize
reset=0 --- crack the line
reset=1 --- final check if mismatch of blocks occured
Cracked data is saved in grouplist[0].
"""
global beginpattern,groupcounter,groupname,groupcache,grouplist,gotnextfile,\
filepositiontext,currentfilename,neededmodule,expectbegin,skipblocksuntil,\
skipemptyends,previous_context
if ';' in line and not (f2pyenhancementspattern[0].match(line) or
multilinepattern[0].match(line)):
for l in line.split(';'):
assert reset==0,`reset` # XXX: non-zero reset values need testing
crackline(l,reset)
return
if reset<0:
groupcounter=0
groupname={groupcounter:''}
groupcache={groupcounter:{}}
grouplist={groupcounter:[]}
groupcache[groupcounter]['body']=[]
groupcache[groupcounter]['vars']={}
groupcache[groupcounter]['block']=''
groupcache[groupcounter]['name']=''
neededmodule=-1
skipblocksuntil=-1
return
if reset>0:
fl=0
if f77modulename and neededmodule==groupcounter: fl=2
while groupcounter>fl:
outmess('crackline: groupcounter=%s groupname=%s\n'%(`groupcounter`,`groupname`))
outmess('crackline: Mismatch of blocks encountered. Trying to fix it by assuming "end" statement.\n')
grouplist[groupcounter-1].append(groupcache[groupcounter])
grouplist[groupcounter-1][-1]['body']=grouplist[groupcounter]
del grouplist[groupcounter]
groupcounter=groupcounter-1
if f77modulename and neededmodule==groupcounter:
grouplist[groupcounter-1].append(groupcache[groupcounter])
grouplist[groupcounter-1][-1]['body']=grouplist[groupcounter]
del grouplist[groupcounter]
groupcounter=groupcounter-1 # end interface
grouplist[groupcounter-1].append(groupcache[groupcounter])
grouplist[groupcounter-1][-1]['body']=grouplist[groupcounter]
del grouplist[groupcounter]
groupcounter=groupcounter-1 # end module
neededmodule=-1
return
if line=='': return
flag=0
for pat in [dimensionpattern,externalpattern,intentpattern,optionalpattern,
requiredpattern,
parameterpattern,datapattern,publicpattern,privatepattern,
intrisicpattern,
endifpattern,endpattern,
formatpattern,
beginpattern,functionpattern,subroutinepattern,
implicitpattern,typespattern,commonpattern,
callpattern,usepattern,containspattern,
entrypattern,
f2pyenhancementspattern,
multilinepattern
]:
m = pat[0].match(line)
if m:
break
flag=flag+1
if not m:
re_1 = crackline_re_1
if 0<=skipblocksuntil<=groupcounter:return
if 'externals' in groupcache[groupcounter]:
for name in groupcache[groupcounter]['externals']:
if name in invbadnames:
name=invbadnames[name]
if 'interfaced' in groupcache[groupcounter] and name in groupcache[groupcounter]['interfaced']:
continue
m1=re.match(r'(?P<before>[^"]*)\b%s\b\s*@\(@(?P<args>[^@]*)@\)@.*\Z'%name,markouterparen(line),re.I)
if m1:
m2 = re_1.match(m1.group('before'))
a = _simplifyargs(m1.group('args'))
if m2:
line='callfun %s(%s) result (%s)'%(name,a,m2.group('result'))
else: line='callfun %s(%s)'%(name,a)
m = callfunpattern[0].match(line)
if not m:
outmess('crackline: could not resolve function call for line=%s.\n'%`line`)
return
analyzeline(m,'callfun',line)
return
if verbose>1:
previous_context = None
outmess('crackline:%d: No pattern for line\n'%(groupcounter))
return
elif pat[1]=='end':
if 0<=skipblocksuntil<groupcounter:
groupcounter=groupcounter-1
if skipblocksuntil<=groupcounter: return
if groupcounter<=0:
raise Exception('crackline: groupcounter(=%s) is nonpositive. '
'Check the blocks.' \
% (groupcounter))
m1 = beginpattern[0].match((line))
if (m1) and (not m1.group('this')==groupname[groupcounter]):
raise Exception('crackline: End group %s does not match with '
'previous Begin group %s\n\t%s' % \
(`m1.group('this')`, `groupname[groupcounter]`,
filepositiontext)
)
if skipblocksuntil==groupcounter:
skipblocksuntil=-1
grouplist[groupcounter-1].append(groupcache[groupcounter])
grouplist[groupcounter-1][-1]['body']=grouplist[groupcounter]
del grouplist[groupcounter]
groupcounter=groupcounter-1
if not skipemptyends:
expectbegin=1
elif pat[1] == 'begin':
if 0<=skipblocksuntil<=groupcounter:
groupcounter=groupcounter+1
return
gotnextfile=0
analyzeline(m,pat[1],line)
expectbegin=0
elif pat[1]=='endif':
pass
elif pat[1]=='contains':
if ignorecontains: return
if 0<=skipblocksuntil<=groupcounter: return
skipblocksuntil=groupcounter
else:
if 0<=skipblocksuntil<=groupcounter:return
analyzeline(m,pat[1],line)
def markouterparen(line):
l='';f=0
for c in line:
if c=='(':
f=f+1
if f==1: l=l+'@(@'; continue
elif c==')':
f=f-1
if f==0: l=l+'@)@'; continue
l=l+c
return l
def markoutercomma(line,comma=','):
l='';f=0
cc=''
for c in line:
if (not cc or cc==')') and c=='(':
f=f+1
cc = ')'
elif not cc and c=='\'' and (not l or l[-1]!='\\'):
f=f+1
cc = '\''
elif c==cc:
f=f-1
if f==0:
cc=''
elif c==comma and f==0:
l=l+'@'+comma+'@'
continue
l=l+c
assert not f,`f,line,l,cc`
return l
def unmarkouterparen(line):
r = line.replace('@(@','(').replace('@)@',')')
return r
def appenddecl(decl,decl2,force=1):
if not decl: decl={}
if not decl2: return decl
if decl is decl2: return decl
for k in decl2.keys():
if k=='typespec':
if force or k not in decl:
decl[k]=decl2[k]
elif k=='attrspec':
for l in decl2[k]:
decl=setattrspec(decl,l,force)
elif k=='kindselector':
decl=setkindselector(decl,decl2[k],force)
elif k=='charselector':
decl=setcharselector(decl,decl2[k],force)
elif k in ['=','typename']:
if force or k not in decl:
decl[k]=decl2[k]
elif k=='note':
pass
elif k in ['intent','check','dimension','optional','required']:
errmess('appenddecl: "%s" not implemented.\n'%k)
else:
raise Exception('appenddecl: Unknown variable definition key:' + \
str(k))
return decl
selectpattern=re.compile(r'\s*(?P<this>(@\(@.*?@\)@|[*][\d*]+|[*]\s*@\(@.*?@\)@|))(?P<after>.*)\Z',re.I)
nameargspattern=re.compile(r'\s*(?P<name>\b[\w$]+\b)\s*(@\(@\s*(?P<args>[\w\s,]*)\s*@\)@|)\s*(result(\s*@\(@\s*(?P<result>\b[\w$]+\b)\s*@\)@|))*\s*\Z',re.I)
callnameargspattern=re.compile(r'\s*(?P<name>\b[\w$]+\b)\s*@\(@\s*(?P<args>.*)\s*@\)@\s*\Z',re.I)
real16pattern = re.compile(r'([-+]?(?:\d+(?:\.\d*)?|\d*\.\d+))[dD]((?:[-+]?\d+)?)')
real8pattern = re.compile(r'([-+]?((?:\d+(?:\.\d*)?|\d*\.\d+))[eE]((?:[-+]?\d+)?)|(\d+\.\d*))')
_intentcallbackpattern = re.compile(r'intent\s*\(.*?\bcallback\b',re.I)
def _is_intent_callback(vdecl):
for a in vdecl.get('attrspec',[]):
if _intentcallbackpattern.match(a):
return 1
return 0
def _resolvenameargspattern(line):
line = markouterparen(line)
m1=nameargspattern.match(line)
if m1: return m1.group('name'),m1.group('args'),m1.group('result')
m1=callnameargspattern.match(line)
if m1: return m1.group('name'),m1.group('args'),None
return None,[],None
def analyzeline(m,case,line):
global groupcounter,groupname,groupcache,grouplist,filepositiontext,\
currentfilename,f77modulename,neededinterface,neededmodule,expectbegin,\
gotnextfile,previous_context
block=m.group('this')
if case != 'multiline':
previous_context = None
if expectbegin and case not in ['begin','call','callfun','type'] \
and not skipemptyends and groupcounter<1:
newname=os.path.basename(currentfilename).split('.')[0]
outmess('analyzeline: no group yet. Creating program group with name "%s".\n'%newname)
gotnextfile=0
groupcounter=groupcounter+1
groupname[groupcounter]='program'
groupcache[groupcounter]={}
grouplist[groupcounter]=[]
groupcache[groupcounter]['body']=[]
groupcache[groupcounter]['vars']={}
groupcache[groupcounter]['block']='program'
groupcache[groupcounter]['name']=newname
groupcache[groupcounter]['from']='fromsky'
expectbegin=0
if case in ['begin','call','callfun']:
# Crack line => block,name,args,result
block = block.lower()
if re.match(r'block\s*data',block,re.I): block='block data'
if re.match(r'python\s*module',block,re.I): block='python module'
name,args,result = _resolvenameargspattern(m.group('after'))
if name is None:
if block=='block data':
name = '_BLOCK_DATA_'
else:
name = ''
if block not in ['interface','block data']:
outmess('analyzeline: No name/args pattern found for line.\n')
previous_context = (block,name,groupcounter)
if args: args=rmbadname([x.strip() for x in markoutercomma(args).split('@,@')])
else: args=[]
if '' in args:
while '' in args:
args.remove('')
outmess('analyzeline: argument list is malformed (missing argument).\n')
# end of crack line => block,name,args,result
needmodule=0
needinterface=0
if case in ['call','callfun']:
needinterface=1
if 'args' not in groupcache[groupcounter]:
return
if name not in groupcache[groupcounter]['args']:
return
for it in grouplist[groupcounter]:
if it['name']==name:
return
if name in groupcache[groupcounter]['interfaced']:
return
block={'call':'subroutine','callfun':'function'}[case]
if f77modulename and neededmodule==-1 and groupcounter<=1:
neededmodule=groupcounter+2
needmodule=1
needinterface=1
# Create new block(s)
groupcounter=groupcounter+1
groupcache[groupcounter]={}
grouplist[groupcounter]=[]
if needmodule:
if verbose>1:
outmess('analyzeline: Creating module block %s\n'%`f77modulename`,0)
groupname[groupcounter]='module'
groupcache[groupcounter]['block']='python module'
groupcache[groupcounter]['name']=f77modulename
groupcache[groupcounter]['from']=''
groupcache[groupcounter]['body']=[]
groupcache[groupcounter]['externals']=[]
groupcache[groupcounter]['interfaced']=[]
groupcache[groupcounter]['vars']={}
groupcounter=groupcounter+1
groupcache[groupcounter]={}
grouplist[groupcounter]=[]
if needinterface:
if verbose>1:
outmess('analyzeline: Creating additional interface block (groupcounter=%s).\n' % (groupcounter),0)
groupname[groupcounter]='interface'
groupcache[groupcounter]['block']='interface'
groupcache[groupcounter]['name']='unknown_interface'
groupcache[groupcounter]['from']='%s:%s'%(groupcache[groupcounter-1]['from'],groupcache[groupcounter-1]['name'])
groupcache[groupcounter]['body']=[]
groupcache[groupcounter]['externals']=[]
groupcache[groupcounter]['interfaced']=[]
groupcache[groupcounter]['vars']={}
groupcounter=groupcounter+1
groupcache[groupcounter]={}
grouplist[groupcounter]=[]
groupname[groupcounter]=block
groupcache[groupcounter]['block']=block
if not name: name='unknown_'+block
groupcache[groupcounter]['prefix']=m.group('before')
groupcache[groupcounter]['name']=rmbadname1(name)
groupcache[groupcounter]['result']=result
if groupcounter==1:
groupcache[groupcounter]['from']=currentfilename
else:
if f77modulename and groupcounter==3:
groupcache[groupcounter]['from']='%s:%s'%(groupcache[groupcounter-1]['from'],currentfilename)
else:
groupcache[groupcounter]['from']='%s:%s'%(groupcache[groupcounter-1]['from'],groupcache[groupcounter-1]['name'])
for k in groupcache[groupcounter].keys():
if not groupcache[groupcounter][k]: del groupcache[groupcounter][k]
groupcache[groupcounter]['args']=args
groupcache[groupcounter]['body']=[]
groupcache[groupcounter]['externals']=[]
groupcache[groupcounter]['interfaced']=[]
groupcache[groupcounter]['vars']={}
groupcache[groupcounter]['entry']={}
# end of creation
if block=='type':
groupcache[groupcounter]['varnames'] = []
if case in ['call','callfun']: # set parents variables
if name not in groupcache[groupcounter-2]['externals']:
groupcache[groupcounter-2]['externals'].append(name)
groupcache[groupcounter]['vars']=copy.deepcopy(groupcache[groupcounter-2]['vars'])
#try: del groupcache[groupcounter]['vars'][groupcache[groupcounter-2]['name']]
#except: pass
try: del groupcache[groupcounter]['vars'][name][groupcache[groupcounter]['vars'][name]['attrspec'].index('external')]
except: pass
if block in ['function','subroutine']: # set global attributes
try: groupcache[groupcounter]['vars'][name]=appenddecl(groupcache[groupcounter]['vars'][name],groupcache[groupcounter-2]['vars'][''])
except: pass
if case=='callfun': # return type
if result and result in groupcache[groupcounter]['vars']:
if not name==result:
groupcache[groupcounter]['vars'][name]=appenddecl(groupcache[groupcounter]['vars'][name],groupcache[groupcounter]['vars'][result])
#if groupcounter>1: # name is interfaced
try: groupcache[groupcounter-2]['interfaced'].append(name)
except: pass
if block=='function':
t=typespattern[0].match(m.group('before')+' '+name)
if t:
typespec,selector,attr,edecl=cracktypespec0(t.group('this'),t.group('after'))
updatevars(typespec,selector,attr,edecl)
if case in ['call','callfun']:
grouplist[groupcounter-1].append(groupcache[groupcounter])
grouplist[groupcounter-1][-1]['body']=grouplist[groupcounter]
del grouplist[groupcounter]
groupcounter=groupcounter-1 # end routine
grouplist[groupcounter-1].append(groupcache[groupcounter])
grouplist[groupcounter-1][-1]['body']=grouplist[groupcounter]
del grouplist[groupcounter]
groupcounter=groupcounter-1 # end interface
elif case=='entry':
name,args,result=_resolvenameargspattern(m.group('after'))
if name is not None:
if args:
args=rmbadname([x.strip() for x in markoutercomma(args).split('@,@')])
else: args=[]
assert result is None,`result`
groupcache[groupcounter]['entry'][name] = args
previous_context = ('entry',name,groupcounter)
elif case=='type':
typespec,selector,attr,edecl=cracktypespec0(block,m.group('after'))
last_name = updatevars(typespec,selector,attr,edecl)
if last_name is not None:
previous_context = ('variable',last_name,groupcounter)
elif case in ['dimension','intent','optional','required','external','public','private','intrisic']:
edecl=groupcache[groupcounter]['vars']
ll=m.group('after').strip()
i=ll.find('::')
if i<0 and case=='intent':
i=markouterparen(ll).find('@)@')-2
ll=ll[:i+1]+'::'+ll[i+1:]
i=ll.find('::')
if ll[i:]=='::' and 'args' in groupcache[groupcounter]:
outmess('All arguments will have attribute %s%s\n'%(m.group('this'),ll[:i]))
ll = ll + ','.join(groupcache[groupcounter]['args'])
if i<0:i=0;pl=''
else: pl=ll[:i].strip();ll=ll[i+2:]
ch = markoutercomma(pl).split('@,@')
if len(ch)>1:
pl = ch[0]
outmess('analyzeline: cannot handle multiple attributes without type specification. Ignoring %r.\n' % (','.join(ch[1:])))
last_name = None
for e in [x.strip() for x in markoutercomma(ll).split('@,@')]:
m1=namepattern.match(e)
if not m1:
if case in ['public','private']: k=''
else:
print m.groupdict()
outmess('analyzeline: no name pattern found in %s statement for %s. Skipping.\n'%(case,`e`))
continue
else:
k=rmbadname1(m1.group('name'))
if k not in edecl:
edecl[k]={}
if case=='dimension':
ap=case+m1.group('after')
if case=='intent':
ap=m.group('this')+pl
if _intentcallbackpattern.match(ap):
if k not in groupcache[groupcounter]['args']:
if groupcounter>1:
outmess('analyzeline: appending intent(callback) %s'\
' to %s arguments\n' % (k,groupcache[groupcounter]['name']))
if '__user__' not in groupcache[groupcounter-2]['name']:
outmess('analyzeline: missing __user__ module (could be nothing)\n')
groupcache[groupcounter]['args'].append(k)
else:
errmess('analyzeline: intent(callback) %s is ignored' % (k))
else:
errmess('analyzeline: intent(callback) %s is already'\
' in argument list' % (k))
if case in ['optional','required','public','external','private','intrisic']:
ap=case
if 'attrspec' in edecl[k]:
edecl[k]['attrspec'].append(ap)
else:
edecl[k]['attrspec']=[ap]
if case=='external':
if groupcache[groupcounter]['block']=='program':
outmess('analyzeline: ignoring program arguments\n')
continue
if k not in groupcache[groupcounter]['args']:
#outmess('analyzeline: ignoring external %s (not in arguments list)\n'%(`k`))
continue
if 'externals' not in groupcache[groupcounter]:
groupcache[groupcounter]['externals']=[]
groupcache[groupcounter]['externals'].append(k)
last_name = k
groupcache[groupcounter]['vars']=edecl
if last_name is not None:
previous_context = ('variable',last_name,groupcounter)
elif case=='parameter':
edecl=groupcache[groupcounter]['vars']
ll=m.group('after').strip()[1:-1]
last_name = None
for e in markoutercomma(ll).split('@,@'):
try:
k,initexpr=[x.strip() for x in e.split('=')]
except:
outmess('analyzeline: could not extract name,expr in parameter statement "%s" of "%s"\n'%(e,ll));continue
params = get_parameters(edecl)
k=rmbadname1(k)
if k not in edecl:
edecl[k]={}
if '=' in edecl[k] and (not edecl[k]['=']==initexpr):
outmess('analyzeline: Overwriting the value of parameter "%s" ("%s") with "%s".\n'%(k,edecl[k]['='],initexpr))
t = determineexprtype(initexpr,params)
if t:
if t.get('typespec')=='real':
tt = list(initexpr)
for m in real16pattern.finditer(initexpr):
tt[m.start():m.end()] = list(\
initexpr[m.start():m.end()].lower().replace('d', 'e'))
initexpr = ''.join(tt)
elif t.get('typespec')=='complex':
initexpr = initexpr[1:].lower().replace('d','e').\
replace(',','+1j*(')
try:
v = eval(initexpr,{},params)
except (SyntaxError,NameError,TypeError),msg:
errmess('analyzeline: Failed to evaluate %r. Ignoring: %s\n'\
% (initexpr, msg))
continue
edecl[k]['='] = repr(v)
if 'attrspec' in edecl[k]:
edecl[k]['attrspec'].append('parameter')
else: edecl[k]['attrspec']=['parameter']
last_name = k
groupcache[groupcounter]['vars']=edecl
if last_name is not None:
previous_context = ('variable',last_name,groupcounter)
elif case=='implicit':
if m.group('after').strip().lower()=='none':
groupcache[groupcounter]['implicit']=None
elif m.group('after'):
if 'implicit' in groupcache[groupcounter]:
impl=groupcache[groupcounter]['implicit']
else: impl={}
if impl is None:
outmess('analyzeline: Overwriting earlier "implicit none" statement.\n')
impl={}
for e in markoutercomma(m.group('after')).split('@,@'):
decl={}
m1=re.match(r'\s*(?P<this>.*?)\s*(\(\s*(?P<after>[a-z-, ]+)\s*\)\s*|)\Z',e,re.I)
if not m1:
outmess('analyzeline: could not extract info of implicit statement part "%s"\n'%(e));continue
m2=typespattern4implicit.match(m1.group('this'))
if not m2:
outmess('analyzeline: could not extract types pattern of implicit statement part "%s"\n'%(e));continue
typespec,selector,attr,edecl=cracktypespec0(m2.group('this'),m2.group('after'))
kindselect,charselect,typename=cracktypespec(typespec,selector)
decl['typespec']=typespec
decl['kindselector']=kindselect
decl['charselector']=charselect
decl['typename']=typename
for k in decl.keys():
if not decl[k]: del decl[k]
for r in markoutercomma(m1.group('after')).split('@,@'):
if '-' in r:
try: begc,endc=[x.strip() for x in r.split('-')]
except:
outmess('analyzeline: expected "<char>-<char>" instead of "%s" in range list of implicit statement\n'%r);continue
else: begc=endc=r.strip()
if not len(begc)==len(endc)==1:
outmess('analyzeline: expected "<char>-<char>" instead of "%s" in range list of implicit statement (2)\n'%r);continue
for o in range(ord(begc),ord(endc)+1):
impl[chr(o)]=decl
groupcache[groupcounter]['implicit']=impl
elif case=='data':
ll=[]
dl='';il='';f=0;fc=1;inp=0
for c in m.group('after'):
if not inp:
if c=="'": fc=not fc
if c=='/' and fc: f=f+1;continue
if c=='(': inp = inp + 1
elif c==')': inp = inp - 1
if f==0: dl=dl+c
elif f==1: il=il+c
elif f==2:
dl = dl.strip()
if dl.startswith(','):
dl = dl[1:].strip()
ll.append([dl,il])
dl=c;il='';f=0
if f==2:
dl = dl.strip()
if dl.startswith(','):
dl = dl[1:].strip()
ll.append([dl,il])
vars={}
if 'vars' in groupcache[groupcounter]:
vars=groupcache[groupcounter]['vars']
last_name = None
for l in ll:
l=[x.strip() for x in l]
if l[0][0]==',':l[0]=l[0][1:]
if l[0][0]=='(':
outmess('analyzeline: implied-DO list "%s" is not supported. Skipping.\n'%l[0])
continue
#if '(' in l[0]:
# #outmess('analyzeline: ignoring this data statement.\n')
# continue
i=0;j=0;llen=len(l[1])
for v in rmbadname([x.strip() for x in markoutercomma(l[0]).split('@,@')]):
if v[0]=='(':
outmess('analyzeline: implied-DO list "%s" is not supported. Skipping.\n'%v)
# XXX: subsequent init expressions may get wrong values.
# Ignoring since data statements are irrelevant for wrapping.
continue
fc=0
while (i<llen) and (fc or not l[1][i]==','):
if l[1][i]=="'": fc=not fc
i=i+1
i=i+1
#v,l[1][j:i-1]=name,initvalue
if v not in vars:
vars[v]={}
if '=' in vars[v] and not vars[v]['=']==l[1][j:i-1]:
outmess('analyzeline: changing init expression of "%s" ("%s") to "%s"\n'%(v,vars[v]['='],l[1][j:i-1]))
vars[v]['=']=l[1][j:i-1]
j=i
last_name = v
groupcache[groupcounter]['vars']=vars
if last_name is not None:
previous_context = ('variable',last_name,groupcounter)
elif case=='common':
line=m.group('after').strip()
if not line[0]=='/':line='//'+line
cl=[]
f=0;bn='';ol=''
for c in line:
if c=='/':f=f+1;continue
if f>=3:
bn = bn.strip()
if not bn: bn='_BLNK_'
cl.append([bn,ol])
f=f-2;bn='';ol=''
if f%2: bn=bn+c
else: ol=ol+c
bn = bn.strip()
if not bn: bn='_BLNK_'
cl.append([bn,ol])
commonkey={}
if 'common' in groupcache[groupcounter]:
commonkey=groupcache[groupcounter]['common']
for c in cl:
if c[0] in commonkey:
outmess('analyzeline: previously defined common block encountered. Skipping.\n')
continue
commonkey[c[0]]=[]
for i in [x.strip() for x in markoutercomma(c[1]).split('@,@')]:
if i: commonkey[c[0]].append(i)
groupcache[groupcounter]['common']=commonkey
previous_context = ('common',bn,groupcounter)
elif case=='use':
m1=re.match(r'\A\s*(?P<name>\b[\w]+\b)\s*((,(\s*\bonly\b\s*:|(?P<notonly>))\s*(?P<list>.*))|)\s*\Z',m.group('after'),re.I)
if m1:
mm=m1.groupdict()
if 'use' not in groupcache[groupcounter]:
groupcache[groupcounter]['use']={}
name=m1.group('name')
groupcache[groupcounter]['use'][name]={}
isonly=0
if 'list' in mm and mm['list'] is not None:
if 'notonly' in mm and mm['notonly'] is None:
isonly=1
groupcache[groupcounter]['use'][name]['only']=isonly
ll=[x.strip() for x in mm['list'].split(',')]
rl={}
for l in ll:
if '=' in l:
m2=re.match(r'\A\s*(?P<local>\b[\w]+\b)\s*=\s*>\s*(?P<use>\b[\w]+\b)\s*\Z',l,re.I)
if m2: rl[m2.group('local').strip()]=m2.group('use').strip()
else:
outmess('analyzeline: Not local=>use pattern found in %s\n'%`l`)
else:
rl[l]=l
groupcache[groupcounter]['use'][name]['map']=rl
else:
pass
else:
print m.groupdict()
outmess('analyzeline: Could not crack the use statement.\n')
elif case in ['f2pyenhancements']:
if 'f2pyenhancements' not in groupcache[groupcounter]:
groupcache[groupcounter]['f2pyenhancements'] = {}
d = groupcache[groupcounter]['f2pyenhancements']
if m.group('this')=='usercode' and 'usercode' in d:
if type(d['usercode']) is type(''):
d['usercode'] = [d['usercode']]
d['usercode'].append(m.group('after'))
else:
d[m.group('this')] = m.group('after')
elif case=='multiline':
if previous_context is None:
if verbose:
outmess('analyzeline: No context for multiline block.\n')
return
gc = groupcounter
#gc = previous_context[2]
appendmultiline(groupcache[gc],
previous_context[:2],
m.group('this'))
else:
if verbose>1:
print m.groupdict()
outmess('analyzeline: No code implemented for line.\n')
def appendmultiline(group, context_name,ml):
if 'f2pymultilines' not in group:
group['f2pymultilines'] = {}
d = group['f2pymultilines']
if context_name not in d:
d[context_name] = []
d[context_name].append(ml)
return
def cracktypespec0(typespec,ll):
selector=None
attr=None
if re.match(r'double\s*complex',typespec,re.I): typespec='double complex'
elif re.match(r'double\s*precision',typespec,re.I): typespec='double precision'
else: typespec=typespec.strip().lower()
m1=selectpattern.match(markouterparen(ll))
if not m1:
outmess('cracktypespec0: no kind/char_selector pattern found for line.\n')
return
d=m1.groupdict()
for k in d.keys(): d[k]=unmarkouterparen(d[k])
if typespec in ['complex','integer','logical','real','character','type']:
selector=d['this']
ll=d['after']
i=ll.find('::')
if i>=0:
attr=ll[:i].strip()
ll=ll[i+2:]
return typespec,selector,attr,ll
#####
namepattern=re.compile(r'\s*(?P<name>\b[\w]+\b)\s*(?P<after>.*)\s*\Z',re.I)
kindselector=re.compile(r'\s*(\(\s*(kind\s*=)?\s*(?P<kind>.*)\s*\)|[*]\s*(?P<kind2>.*?))\s*\Z',re.I)
charselector=re.compile(r'\s*(\((?P<lenkind>.*)\)|[*]\s*(?P<charlen>.*))\s*\Z',re.I)
lenkindpattern=re.compile(r'\s*(kind\s*=\s*(?P<kind>.*?)\s*(@,@\s*len\s*=\s*(?P<len>.*)|)|(len\s*=\s*|)(?P<len2>.*?)\s*(@,@\s*(kind\s*=\s*|)(?P<kind2>.*)|))\s*\Z',re.I)
lenarraypattern=re.compile(r'\s*(@\(@\s*(?!/)\s*(?P<array>.*?)\s*@\)@\s*[*]\s*(?P<len>.*?)|([*]\s*(?P<len2>.*?)|)\s*(@\(@\s*(?!/)\s*(?P<array2>.*?)\s*@\)@|))\s*(=\s*(?P<init>.*?)|(@\(@|)/\s*(?P<init2>.*?)\s*/(@\)@|)|)\s*\Z',re.I)
def removespaces(expr):
expr=expr.strip()
if len(expr)<=1: return expr
expr2=expr[0]
for i in range(1,len(expr)-1):
if expr[i]==' ' and \
((expr[i+1] in "()[]{}=+-/* ") or (expr[i-1] in "()[]{}=+-/* ")): continue
expr2=expr2+expr[i]
expr2=expr2+expr[-1]
return expr2
def markinnerspaces(line):
l='';f=0
cc='\''
cc1='"'
cb=''
for c in line:
if cb=='\\' and c in ['\\','\'','"']:
l=l+c;
cb=c
continue
if f==0 and c in ['\'','"']: cc=c; cc1={'\'':'"','"':'\''}[c]
if c==cc:f=f+1
elif c==cc:f=f-1
elif c==' ' and f==1: l=l+'@_@'; continue
l=l+c;cb=c
return l
def updatevars(typespec,selector,attrspec,entitydecl):
global groupcache,groupcounter
last_name = None
kindselect,charselect,typename=cracktypespec(typespec,selector)
if attrspec:
attrspec=[x.strip() for x in markoutercomma(attrspec).split('@,@')]
l = []
c = re.compile(r'(?P<start>[a-zA-Z]+)')
for a in attrspec:
m = c.match(a)
if m:
s = m.group('start').lower()
a = s + a[len(s):]
l.append(a)
attrspec = l
el=[x.strip() for x in markoutercomma(entitydecl).split('@,@')]
el1=[]
for e in el:
for e1 in [x.strip() for x in markoutercomma(removespaces(markinnerspaces(e)),comma=' ').split('@ @')]:
if e1: el1.append(e1.replace('@_@',' '))
for e in el1:
m=namepattern.match(e)
if not m:
outmess('updatevars: no name pattern found for entity=%s. Skipping.\n'%(`e`))
continue
ename=rmbadname1(m.group('name'))
edecl={}
if ename in groupcache[groupcounter]['vars']:
edecl=groupcache[groupcounter]['vars'][ename].copy()
not_has_typespec = 'typespec' not in edecl
if not_has_typespec:
edecl['typespec']=typespec
elif typespec and (not typespec==edecl['typespec']):
outmess('updatevars: attempt to change the type of "%s" ("%s") to "%s". Ignoring.\n' % (ename,edecl['typespec'],typespec))
if 'kindselector' not in edecl:
edecl['kindselector']=copy.copy(kindselect)
elif kindselect:
for k in kindselect.keys():
if k in edecl['kindselector'] and (not kindselect[k]==edecl['kindselector'][k]):
outmess('updatevars: attempt to change the kindselector "%s" of "%s" ("%s") to "%s". Ignoring.\n' % (k,ename,edecl['kindselector'][k],kindselect[k]))
else: edecl['kindselector'][k]=copy.copy(kindselect[k])
if 'charselector' not in edecl and charselect:
if not_has_typespec:
edecl['charselector']=charselect
else:
errmess('updatevars:%s: attempt to change empty charselector to %r. Ignoring.\n' \
%(ename,charselect))
elif charselect:
for k in charselect.keys():
if k in edecl['charselector'] and (not charselect[k]==edecl['charselector'][k]):
outmess('updatevars: attempt to change the charselector "%s" of "%s" ("%s") to "%s". Ignoring.\n' % (k,ename,edecl['charselector'][k],charselect[k]))
else: edecl['charselector'][k]=copy.copy(charselect[k])
if 'typename' not in edecl:
edecl['typename']=typename
elif typename and (not edecl['typename']==typename):
outmess('updatevars: attempt to change the typename of "%s" ("%s") to "%s". Ignoring.\n' % (ename,edecl['typename'],typename))
if 'attrspec' not in edecl:
edecl['attrspec']=copy.copy(attrspec)
elif attrspec:
for a in attrspec:
if a not in edecl['attrspec']:
edecl['attrspec'].append(a)
else:
edecl['typespec']=copy.copy(typespec)
edecl['kindselector']=copy.copy(kindselect)
edecl['charselector']=copy.copy(charselect)
edecl['typename']=typename
edecl['attrspec']=copy.copy(attrspec)
if m.group('after'):
m1=lenarraypattern.match(markouterparen(m.group('after')))
if m1:
d1=m1.groupdict()
for lk in ['len','array','init']:
if d1[lk+'2'] is not None: d1[lk]=d1[lk+'2']; del d1[lk+'2']
for k in d1.keys():
if d1[k] is not None: d1[k]=unmarkouterparen(d1[k])
else: del d1[k]
if 'len' in d1 and 'array' in d1:
if d1['len']=='':
d1['len']=d1['array']
del d1['array']
else:
d1['array']=d1['array']+','+d1['len']
del d1['len']
errmess('updatevars: "%s %s" is mapped to "%s %s(%s)"\n'%(typespec,e,typespec,ename,d1['array']))
if 'array' in d1:
dm = 'dimension(%s)'%d1['array']
if 'attrspec' not in edecl or (not edecl['attrspec']):
edecl['attrspec']=[dm]
else:
edecl['attrspec'].append(dm)
for dm1 in edecl['attrspec']:
if dm1[:9]=='dimension' and dm1!=dm:
del edecl['attrspec'][-1]
errmess('updatevars:%s: attempt to change %r to %r. Ignoring.\n' \
% (ename,dm1,dm))
break
if 'len' in d1:
if typespec in ['complex','integer','logical','real']:
if ('kindselector' not in edecl) or (not edecl['kindselector']):
edecl['kindselector']={}
edecl['kindselector']['*']=d1['len']
elif typespec == 'character':
if ('charselector' not in edecl) or (not edecl['charselector']):
edecl['charselector']={}
if 'len' in edecl['charselector']:
del edecl['charselector']['len']
edecl['charselector']['*']=d1['len']
if 'init' in d1:
if '=' in edecl and (not edecl['=']==d1['init']):
outmess('updatevars: attempt to change the init expression of "%s" ("%s") to "%s". Ignoring.\n' % (ename,edecl['='],d1['init']))
else:
edecl['=']=d1['init']
else:
outmess('updatevars: could not crack entity declaration "%s". Ignoring.\n'%(ename+m.group('after')))
for k in edecl.keys():
if not edecl[k]:
del edecl[k]
groupcache[groupcounter]['vars'][ename]=edecl
if 'varnames' in groupcache[groupcounter]:
groupcache[groupcounter]['varnames'].append(ename)
last_name = ename
return last_name
def cracktypespec(typespec,selector):
kindselect=None
charselect=None
typename=None
if selector:
if typespec in ['complex','integer','logical','real']:
kindselect=kindselector.match(selector)
if not kindselect:
outmess('cracktypespec: no kindselector pattern found for %s\n'%(`selector`))
return
kindselect=kindselect.groupdict()
kindselect['*']=kindselect['kind2']
del kindselect['kind2']
for k in kindselect.keys():
if not kindselect[k]: del kindselect[k]
for k,i in kindselect.items():
kindselect[k] = rmbadname1(i)
elif typespec=='character':
charselect=charselector.match(selector)
if not charselect:
outmess('cracktypespec: no charselector pattern found for %s\n'%(`selector`))
return
charselect=charselect.groupdict()
charselect['*']=charselect['charlen']
del charselect['charlen']
if charselect['lenkind']:
lenkind=lenkindpattern.match(markoutercomma(charselect['lenkind']))
lenkind=lenkind.groupdict()
for lk in ['len','kind']:
if lenkind[lk+'2']:
lenkind[lk]=lenkind[lk+'2']
charselect[lk]=lenkind[lk]
del lenkind[lk+'2']
del charselect['lenkind']
for k in charselect.keys():
if not charselect[k]: del charselect[k]
for k,i in charselect.items():
charselect[k] = rmbadname1(i)
elif typespec=='type':
typename=re.match(r'\s*\(\s*(?P<name>\w+)\s*\)',selector,re.I)
if typename: typename=typename.group('name')
else: outmess('cracktypespec: no typename found in %s\n'%(`typespec+selector`))
else:
outmess('cracktypespec: no selector used for %s\n'%(`selector`))
return kindselect,charselect,typename
######
def setattrspec(decl,attr,force=0):
if not decl:
decl={}
if not attr:
return decl
if 'attrspec' not in decl:
decl['attrspec']=[attr]
return decl
if force: decl['attrspec'].append(attr)
if attr in decl['attrspec']: return decl
if attr=='static' and 'automatic' not in decl['attrspec']:
decl['attrspec'].append(attr)
elif attr=='automatic' and 'static' not in decl['attrspec']:
decl['attrspec'].append(attr)
elif attr=='public' and 'private' not in decl['attrspec']:
decl['attrspec'].append(attr)
elif attr=='private' and 'public' not in decl['attrspec']:
decl['attrspec'].append(attr)
else:
decl['attrspec'].append(attr)
return decl
def setkindselector(decl,sel,force=0):
if not decl:
decl={}
if not sel:
return decl
if 'kindselector' not in decl:
decl['kindselector']=sel
return decl
for k in sel.keys():
if force or k not in decl['kindselector']:
decl['kindselector'][k]=sel[k]
return decl
def setcharselector(decl,sel,force=0):
if not decl:
decl={}
if not sel:
return decl
if 'charselector' not in decl:
decl['charselector']=sel
return decl
for k in sel.keys():
if force or k not in decl['charselector']:
decl['charselector'][k]=sel[k]
return decl
def getblockname(block,unknown='unknown'):
if 'name' in block:
return block['name']
return unknown
###### post processing
def setmesstext(block):
global filepositiontext
try:
filepositiontext='In: %s:%s\n'%(block['from'],block['name'])
except:
pass
def get_usedict(block):
usedict = {}
if 'parent_block' in block:
usedict = get_usedict(block['parent_block'])
if 'use' in block:
usedict.update(block['use'])
return usedict
def get_useparameters(block, param_map=None):
global f90modulevars
if param_map is None:
param_map = {}
usedict = get_usedict(block)
if not usedict:
return param_map
for usename,mapping in usedict.items():
usename = usename.lower()
if usename not in f90modulevars:
continue
mvars = f90modulevars[usename]
params = get_parameters(mvars)
if not params:
continue
# XXX: apply mapping
if mapping:
errmess('get_useparameters: mapping for %s not impl.' % (mapping))
for k,v in params.items():
if k in param_map:
outmess('get_useparameters: overriding parameter %s with'\
' value from module %s' % (`k`,`usename`))
param_map[k] = v
return param_map
def postcrack2(block,tab='',param_map=None):
global f90modulevars
if not f90modulevars:
return block
if type(block)==types.ListType:
ret = []
for g in block:
g = postcrack2(g,tab=tab+'\t',param_map=param_map)
ret.append(g)
return ret
setmesstext(block)
outmess('%sBlock: %s\n'%(tab,block['name']),0)
if param_map is None:
param_map = get_useparameters(block)
if param_map is not None and 'vars' in block:
vars = block['vars']
for n in vars.keys():
var = vars[n]
if 'kindselector' in var:
kind = var['kindselector']
if 'kind' in kind:
val = kind['kind']
if val in param_map:
kind['kind'] = param_map[val]
new_body = []
for b in block['body']:
b = postcrack2(b,tab=tab+'\t',param_map=param_map)
new_body.append(b)
block['body'] = new_body
return block
def postcrack(block,args=None,tab=''):
"""
TODO:
function return values
determine expression types if in argument list
"""
global usermodules,onlyfunctions
if type(block)==types.ListType:
gret=[]
uret=[]
for g in block:
setmesstext(g)
g=postcrack(g,tab=tab+'\t')
if 'name' in g and '__user__' in g['name']: # sort user routines to appear first
uret.append(g)
else:
gret.append(g)
return uret+gret
setmesstext(block)
if (not type(block)==types.DictType) and 'block' not in block:
raise Exception('postcrack: Expected block dictionary instead of ' + \
str(block))
if 'name' in block and not block['name']=='unknown_interface':
outmess('%sBlock: %s\n'%(tab,block['name']),0)
blocktype=block['block']
block=analyzeargs(block)
block=analyzecommon(block)
block['vars']=analyzevars(block)
block['sortvars']=sortvarnames(block['vars'])
if 'args' in block and block['args']:
args=block['args']
block['body']=analyzebody(block,args,tab=tab)
userisdefined=[]
## fromuser = []
if 'use' in block:
useblock=block['use']
for k in useblock.keys():
if '__user__' in k:
userisdefined.append(k)
## if 'map' in useblock[k]:
## for n in useblock[k]['map'].values():
## if n not in fromuser: fromuser.append(n)
else: useblock={}
name=''
if 'name' in block:
name=block['name']
if 'externals' in block and block['externals']:# and not userisdefined: # Build a __user__ module
interfaced=[]
if 'interfaced' in block:
interfaced=block['interfaced']
mvars=copy.copy(block['vars'])
if name:
mname=name+'__user__routines'
else:
mname='unknown__user__routines'
if mname in userisdefined:
i=1
while '%s_%i'%(mname,i) in userisdefined: i=i+1
mname='%s_%i'%(mname,i)
interface={'block':'interface','body':[],'vars':{},'name':name+'_user_interface'}
for e in block['externals']:
## if e in fromuser:
## outmess(' Skipping %s that is defined explicitly in another use statement\n'%(`e`))
## continue
if e in interfaced:
edef=[]
j=-1
for b in block['body']:
j=j+1
if b['block']=='interface':
i=-1
for bb in b['body']:
i=i+1
if 'name' in bb and bb['name']==e:
edef=copy.copy(bb)
del b['body'][i]
break
if edef:
if not b['body']: del block['body'][j]
del interfaced[interfaced.index(e)]
break
interface['body'].append(edef)
else:
if e in mvars and not isexternal(mvars[e]):
interface['vars'][e]=mvars[e]
if interface['vars'] or interface['body']:
block['interfaced']=interfaced
mblock={'block':'python module','body':[interface],'vars':{},'name':mname,'interfaced':block['externals']}
useblock[mname]={}
usermodules.append(mblock)
if useblock:
block['use']=useblock
return block
def sortvarnames(vars):
indep = []
dep = []
for v in vars.keys():
if 'depend' in vars[v] and vars[v]['depend']:
dep.append(v)
#print '%s depends on %s'%(v,vars[v]['depend'])
else: indep.append(v)
n = len(dep)
i = 0
while dep: #XXX: How to catch dependence cycles correctly?
v = dep[0]
fl = 0
for w in dep[1:]:
if w in vars[v]['depend']:
fl = 1
break
if fl:
dep = dep[1:]+[v]
i = i + 1
if i>n:
errmess('sortvarnames: failed to compute dependencies because'
' of cyclic dependencies between '
+', '.join(dep)+'\n')
indep = indep + dep
break
else:
indep.append(v)
dep = dep[1:]
n = len(dep)
i = 0
#print indep
return indep
def analyzecommon(block):
if not hascommon(block): return block
commonvars=[]
for k in block['common'].keys():
comvars=[]
for e in block['common'][k]:
m=re.match(r'\A\s*\b(?P<name>.*?)\b\s*(\((?P<dims>.*?)\)|)\s*\Z',e,re.I)
if m:
dims=[]
if m.group('dims'):
dims=[x.strip() for x in markoutercomma(m.group('dims')).split('@,@')]
n=m.group('name').strip()
if n in block['vars']:
if 'attrspec' in block['vars'][n]:
block['vars'][n]['attrspec'].append('dimension(%s)'%(','.join(dims)))
else:
block['vars'][n]['attrspec']=['dimension(%s)'%(','.join(dims))]
else:
if dims:
block['vars'][n]={'attrspec':['dimension(%s)'%(','.join(dims))]}
else: block['vars'][n]={}
if n not in commonvars: commonvars.append(n)
else:
n=e
errmess('analyzecommon: failed to extract "<name>[(<dims>)]" from "%s" in common /%s/.\n'%(e,k))
comvars.append(n)
block['common'][k]=comvars
if 'commonvars' not in block:
block['commonvars']=commonvars
else:
block['commonvars']=block['commonvars']+commonvars
return block
def analyzebody(block,args,tab=''):
global usermodules,skipfuncs,onlyfuncs,f90modulevars
setmesstext(block)
body=[]
for b in block['body']:
b['parent_block'] = block
if b['block'] in ['function','subroutine']:
if args is not None and b['name'] not in args:
continue
else:
as_=b['args']
if b['name'] in skipfuncs:
continue
if onlyfuncs and b['name'] not in onlyfuncs:
continue
else: as_=args
b=postcrack(b,as_,tab=tab+'\t')
if b['block']=='interface' and not b['body']:
if 'f2pyenhancements' not in b:
continue
if b['block'].replace(' ','')=='pythonmodule':
usermodules.append(b)
else:
if b['block']=='module':
f90modulevars[b['name']] = b['vars']
body.append(b)
return body
def buildimplicitrules(block):
setmesstext(block)
implicitrules=defaultimplicitrules
attrrules={}
if 'implicit' in block:
if block['implicit'] is None:
implicitrules=None
if verbose>1:
outmess('buildimplicitrules: no implicit rules for routine %s.\n'%`block['name']`)
else:
for k in block['implicit'].keys():
if block['implicit'][k].get('typespec') not in ['static','automatic']:
implicitrules[k]=block['implicit'][k]
else:
attrrules[k]=block['implicit'][k]['typespec']
return implicitrules,attrrules
def myeval(e,g=None,l=None):
r = eval(e,g,l)
if type(r) in [type(0),type(0.0)]:
return r
raise ValueError('r=%r' % (r))
getlincoef_re_1 = re.compile(r'\A\b\w+\b\Z',re.I)
def getlincoef(e,xset): # e = a*x+b ; x in xset
try:
c = int(myeval(e,{},{}))
return 0,c,None
except: pass
if getlincoef_re_1.match(e):
return 1,0,e
len_e = len(e)
for x in xset:
if len(x)>len_e: continue
if re.search(r'\w\s*\([^)]*\b'+x+r'\b', e):
# skip function calls having x as an argument, e.g max(1, x)
continue
re_1 = re.compile(r'(?P<before>.*?)\b'+x+r'\b(?P<after>.*)',re.I)
m = re_1.match(e)
if m:
try:
m1 = re_1.match(e)
while m1:
ee = '%s(%s)%s'%(m1.group('before'),0,m1.group('after'))
m1 = re_1.match(ee)
b = myeval(ee,{},{})
m1 = re_1.match(e)
while m1:
ee = '%s(%s)%s'%(m1.group('before'),1,m1.group('after'))
m1 = re_1.match(ee)
a = myeval(ee,{},{}) - b
m1 = re_1.match(e)
while m1:
ee = '%s(%s)%s'%(m1.group('before'),0.5,m1.group('after'))
m1 = re_1.match(ee)
c = myeval(ee,{},{})
# computing another point to be sure that expression is linear
m1 = re_1.match(e)
while m1:
ee = '%s(%s)%s'%(m1.group('before'),1.5,m1.group('after'))
m1 = re_1.match(ee)
c2 = myeval(ee,{},{})
if (a*0.5+b==c and a*1.5+b==c2):
return a,b,x
except: pass
break
return None,None,None
_varname_match = re.compile(r'\A[a-z]\w*\Z').match
def getarrlen(dl,args,star='*'):
edl = []
try: edl.append(myeval(dl[0],{},{}))
except: edl.append(dl[0])
try: edl.append(myeval(dl[1],{},{}))
except: edl.append(dl[1])
if type(edl[0]) is type(0):
p1 = 1-edl[0]
if p1==0: d = str(dl[1])
elif p1<0: d = '%s-%s'%(dl[1],-p1)
else: d = '%s+%s'%(dl[1],p1)
elif type(edl[1]) is type(0):
p1 = 1+edl[1]
if p1==0: d='-(%s)' % (dl[0])
else: d='%s-(%s)' % (p1,dl[0])
else: d = '%s-(%s)+1'%(dl[1],dl[0])
try: return `myeval(d,{},{})`,None,None
except: pass
d1,d2=getlincoef(dl[0],args),getlincoef(dl[1],args)
if None not in [d1[0],d2[0]]:
if (d1[0],d2[0])==(0,0):
return `d2[1]-d1[1]+1`,None,None
b = d2[1] - d1[1] + 1
d1 = (d1[0],0,d1[2])
d2 = (d2[0],b,d2[2])
if d1[0]==0 and d2[2] in args:
if b<0: return '%s * %s - %s'%(d2[0],d2[2],-b),d2[2],'+%s)/(%s)'%(-b,d2[0])
elif b: return '%s * %s + %s'%(d2[0],d2[2],b),d2[2],'-%s)/(%s)'%(b,d2[0])
else: return '%s * %s'%(d2[0],d2[2]),d2[2],')/(%s)'%(d2[0])
if d2[0]==0 and d1[2] in args:
if b<0: return '%s * %s - %s'%(-d1[0],d1[2],-b),d1[2],'+%s)/(%s)'%(-b,-d1[0])
elif b: return '%s * %s + %s'%(-d1[0],d1[2],b),d1[2],'-%s)/(%s)'%(b,-d1[0])
else: return '%s * %s'%(-d1[0],d1[2]),d1[2],')/(%s)'%(-d1[0])
if d1[2]==d2[2] and d1[2] in args:
a = d2[0] - d1[0]
if not a: return `b`,None,None
if b<0: return '%s * %s - %s'%(a,d1[2],-b),d2[2],'+%s)/(%s)'%(-b,a)
elif b: return '%s * %s + %s'%(a,d1[2],b),d2[2],'-%s)/(%s)'%(b,a)
else: return '%s * %s'%(a,d1[2]),d2[2],')/(%s)'%(a)
if d1[0]==d2[0]==1:
c = str(d1[2])
if c not in args:
if _varname_match(c):
outmess('\tgetarrlen:variable "%s" undefined\n' % (c))
c = '(%s)'%c
if b==0: d='%s-%s' % (d2[2],c)
elif b<0: d='%s-%s-%s' % (d2[2],c,-b)
else: d='%s-%s+%s' % (d2[2],c,b)
elif d1[0]==0:
c2 = str(d2[2])
if c2 not in args:
if _varname_match(c2):
outmess('\tgetarrlen:variable "%s" undefined\n' % (c2))
c2 = '(%s)'%c2
if d2[0]==1: pass
elif d2[0]==-1: c2='-%s' %c2
else: c2='%s*%s'%(d2[0],c2)
if b==0: d=c2
elif b<0: d='%s-%s' % (c2,-b)
else: d='%s+%s' % (c2,b)
elif d2[0]==0:
c1 = str(d1[2])
if c1 not in args:
if _varname_match(c1):
outmess('\tgetarrlen:variable "%s" undefined\n' % (c1))
c1 = '(%s)'%c1
if d1[0]==1: c1='-%s'%c1
elif d1[0]==-1: c1='+%s'%c1
elif d1[0]<0: c1='+%s*%s'%(-d1[0],c1)
else: c1 = '-%s*%s' % (d1[0],c1)
if b==0: d=c1
elif b<0: d='%s-%s' % (c1,-b)
else: d='%s+%s' % (c1,b)
else:
c1 = str(d1[2])
if c1 not in args:
if _varname_match(c1):
outmess('\tgetarrlen:variable "%s" undefined\n' % (c1))
c1 = '(%s)'%c1
if d1[0]==1: c1='-%s'%c1
elif d1[0]==-1: c1='+%s'%c1
elif d1[0]<0: c1='+%s*%s'%(-d1[0],c1)
else: c1 = '-%s*%s' % (d1[0],c1)
c2 = str(d2[2])
if c2 not in args:
if _varname_match(c2):
outmess('\tgetarrlen:variable "%s" undefined\n' % (c2))
c2 = '(%s)'%c2
if d2[0]==1: pass
elif d2[0]==-1: c2='-%s' %c2
else: c2='%s*%s'%(d2[0],c2)
if b==0: d='%s%s' % (c2,c1)
elif b<0: d='%s%s-%s' % (c2,c1,-b)
else: d='%s%s+%s' % (c2,c1,b)
return d,None,None
word_pattern = re.compile(r'\b[a-z][\w$]*\b',re.I)
def _get_depend_dict(name, vars, deps):
if name in vars:
words = vars[name].get('depend',[])
if '=' in vars[name] and not isstring(vars[name]):
for word in word_pattern.findall(vars[name]['=']):
if word not in words and word in vars:
words.append(word)
for word in words[:]:
for w in deps.get(word,[]) \
or _get_depend_dict(word, vars, deps):
if w not in words:
words.append(w)
else:
outmess('_get_depend_dict: no dependence info for %s\n' % (`name`))
words = []
deps[name] = words
return words
def _calc_depend_dict(vars):
names = vars.keys()
depend_dict = {}
for n in names:
_get_depend_dict(n, vars, depend_dict)
return depend_dict
def get_sorted_names(vars):
"""
"""
depend_dict = _calc_depend_dict(vars)
names = []
for name in depend_dict.keys():
if not depend_dict[name]:
names.append(name)
del depend_dict[name]
while depend_dict:
for name, lst in depend_dict.items():
new_lst = [n for n in lst if n in depend_dict]
if not new_lst:
names.append(name)
del depend_dict[name]
else:
depend_dict[name] = new_lst
return [name for name in names if name in vars]
def _kind_func(string):
#XXX: return something sensible.
if string[0] in "'\"":
string = string[1:-1]
if real16pattern.match(string):
return 8
elif real8pattern.match(string):
return 4
return 'kind('+string+')'
def _selected_int_kind_func(r):
#XXX: This should be processor dependent
m = 10**r
if m<=2**8: return 1
if m<=2**16: return 2
if m<=2**32: return 4
if m<=2**64: return 8
if m<=2**128: return 16
return -1
def get_parameters(vars, global_params={}):
params = copy.copy(global_params)
g_params = copy.copy(global_params)
for name,func in [('kind',_kind_func),
('selected_int_kind',_selected_int_kind_func),
]:
if name not in g_params:
g_params[name] = func
param_names = []
for n in get_sorted_names(vars):
if 'attrspec' in vars[n] and 'parameter' in vars[n]['attrspec']:
param_names.append(n)
kind_re = re.compile(r'\bkind\s*\(\s*(?P<value>.*)\s*\)',re.I)
selected_int_kind_re = re.compile(r'\bselected_int_kind\s*\(\s*(?P<value>.*)\s*\)',re.I)
for n in param_names:
if '=' in vars[n]:
v = vars[n]['=']
if islogical(vars[n]):
v = v.lower()
for repl in [
('.false.','False'),
('.true.','True'),
#TODO: test .eq., .neq., etc replacements.
]:
v = v.replace(*repl)
v = kind_re.sub(r'kind("\1")',v)
v = selected_int_kind_re.sub(r'selected_int_kind(\1)',v)
if isinteger(vars[n]) and not selected_int_kind_re.match(v):
v = v.split('_')[0]
if isdouble(vars[n]):
tt = list(v)
for m in real16pattern.finditer(v):
tt[m.start():m.end()] = list(\
v[m.start():m.end()].lower().replace('d', 'e'))
v = ''.join(tt)
if iscomplex(vars[n]):
if v[0]=='(' and v[-1]==')':
l = markoutercomma(v[1:-1]).split('@,@')
print n,params
try:
params[n] = eval(v,g_params,params)
except Exception,msg:
params[n] = v
#print params
outmess('get_parameters: got "%s" on %s\n' % (msg,`v`))
if isstring(vars[n]) and type(params[n]) is type(0):
params[n] = chr(params[n])
nl = n.lower()
if nl!=n:
params[nl] = params[n]
else:
print vars[n]
outmess('get_parameters:parameter %s does not have value?!\n'%(`n`))
return params
def _eval_length(length,params):
if length in ['(:)','(*)','*']:
return '(*)'
return _eval_scalar(length,params)
_is_kind_number = re.compile('\d+_').match
def _eval_scalar(value,params):
if _is_kind_number(value):
value = value.split('_')[0]
try:
value = str(eval(value,{},params))
except (NameError, SyntaxError):
return value
except Exception,msg:
errmess('"%s" in evaluating %r '\
'(available names: %s)\n' \
% (msg,value,params.keys()))
return value
def analyzevars(block):
global f90modulevars
setmesstext(block)
implicitrules,attrrules=buildimplicitrules(block)
vars=copy.copy(block['vars'])
if block['block']=='function' and block['name'] not in vars:
vars[block['name']]={}
if '' in block['vars']:
del vars['']
if 'attrspec' in block['vars']['']:
gen=block['vars']['']['attrspec']
for n in vars.keys():
for k in ['public','private']:
if k in gen:
vars[n]=setattrspec(vars[n],k)
svars=[]
args = block['args']
for a in args:
try:
vars[a]
svars.append(a)
except KeyError:
pass
for n in vars.keys():
if n not in args: svars.append(n)
params = get_parameters(vars, get_useparameters(block))
dep_matches = {}
name_match = re.compile(r'\w[\w\d_$]*').match
for v in vars.keys():
m = name_match(v)
if m:
n = v[m.start():m.end()]
try:
dep_matches[n]
except KeyError:
dep_matches[n] = re.compile(r'.*\b%s\b'%(v),re.I).match
for n in svars:
if n[0] in attrrules.keys():
vars[n]=setattrspec(vars[n],attrrules[n[0]])
if 'typespec' not in vars[n]:
if not('attrspec' in vars[n] and 'external' in vars[n]['attrspec']):
if implicitrules:
ln0 = n[0].lower()
for k in implicitrules[ln0].keys():
if k=='typespec' and implicitrules[ln0][k]=='undefined':
continue
if k not in vars[n]:
vars[n][k]=implicitrules[ln0][k]
elif k=='attrspec':
for l in implicitrules[ln0][k]:
vars[n]=setattrspec(vars[n],l)
elif n in block['args']:
outmess('analyzevars: typespec of variable %s is not defined in routine %s.\n'%(`n`,block['name']))
if 'charselector' in vars[n]:
if 'len' in vars[n]['charselector']:
l = vars[n]['charselector']['len']
try:
l = str(eval(l,{},params))
except:
pass
vars[n]['charselector']['len'] = l
if 'kindselector' in vars[n]:
if 'kind' in vars[n]['kindselector']:
l = vars[n]['kindselector']['kind']
try:
l = str(eval(l,{},params))
except:
pass
vars[n]['kindselector']['kind'] = l
savelindims = {}
if 'attrspec' in vars[n]:
attr=vars[n]['attrspec']
attr.reverse()
vars[n]['attrspec']=[]
dim,intent,depend,check,note=None,None,None,None,None
for a in attr:
if a[:9]=='dimension': dim=(a[9:].strip())[1:-1]
elif a[:6]=='intent': intent=(a[6:].strip())[1:-1]
elif a[:6]=='depend': depend=(a[6:].strip())[1:-1]
elif a[:5]=='check': check=(a[5:].strip())[1:-1]
elif a[:4]=='note': note=(a[4:].strip())[1:-1]
else: vars[n]=setattrspec(vars[n],a)
if intent:
if 'intent' not in vars[n]:
vars[n]['intent']=[]
for c in [x.strip() for x in markoutercomma(intent).split('@,@')]:
if not c in vars[n]['intent']:
vars[n]['intent'].append(c)
intent=None
if note:
note=note.replace('\\n\\n','\n\n')
note=note.replace('\\n ','\n')
if 'note' not in vars[n]:
vars[n]['note']=[note]
else:
vars[n]['note'].append(note)
note=None
if depend is not None:
if 'depend' not in vars[n]:
vars[n]['depend']=[]
for c in rmbadname([x.strip() for x in markoutercomma(depend).split('@,@')]):
if c not in vars[n]['depend']:
vars[n]['depend'].append(c)
depend=None
if check is not None:
if 'check' not in vars[n]:
vars[n]['check']=[]
for c in [x.strip() for x in markoutercomma(check).split('@,@')]:
if not c in vars[n]['check']:
vars[n]['check'].append(c)
check=None
if dim and 'dimension' not in vars[n]:
vars[n]['dimension']=[]
for d in rmbadname([x.strip() for x in markoutercomma(dim).split('@,@')]):
star = '*'
if d==':':
star=':'
if d in params:
d = str(params[d])
for p in params.keys():
m = re.match(r'(?P<before>.*?)\b'+p+r'\b(?P<after>.*)',d,re.I)
if m:
#outmess('analyzevars:replacing parameter %s in %s (dimension of %s) with %s\n'%(`p`,`d`,`n`,`params[p]`))
d = m.group('before')+str(params[p])+m.group('after')
if d==star:
dl = [star]
else:
dl=markoutercomma(d,':').split('@:@')
if len(dl)==2 and '*' in dl: # e.g. dimension(5:*)
dl = ['*']
d = '*'
if len(dl)==1 and not dl[0]==star: dl = ['1',dl[0]]
if len(dl)==2:
d,v,di = getarrlen(dl,block['vars'].keys())
if d[:4] == '1 * ': d = d[4:]
if di and di[-4:] == '/(1)': di = di[:-4]
if v: savelindims[d] = v,di
vars[n]['dimension'].append(d)
if 'dimension' in vars[n]:
if isintent_c(vars[n]):
shape_macro = 'shape'
else:
shape_macro = 'shape'#'fshape'
if isstringarray(vars[n]):
if 'charselector' in vars[n]:
d = vars[n]['charselector']
if '*' in d:
d = d['*']
errmess('analyzevars: character array "character*%s %s(%s)" is considered as "character %s(%s)"; "intent(c)" is forced.\n'\
%(d,n,
','.join(vars[n]['dimension']),
n,','.join(vars[n]['dimension']+[d])))
vars[n]['dimension'].append(d)
del vars[n]['charselector']
if 'intent' not in vars[n]:
vars[n]['intent'] = []
if 'c' not in vars[n]['intent']:
vars[n]['intent'].append('c')
else:
errmess("analyzevars: charselector=%r unhandled." % (d))
if 'check' not in vars[n] and 'args' in block and n in block['args']:
flag = 'depend' not in vars[n]
if flag:
vars[n]['depend']=[]
vars[n]['check']=[]
if 'dimension' in vars[n]:
#/----< no check
#vars[n]['check'].append('rank(%s)==%s'%(n,len(vars[n]['dimension'])))
i=-1; ni=len(vars[n]['dimension'])
for d in vars[n]['dimension']:
ddeps=[] # dependecies of 'd'
ad=''
pd=''
#origd = d
if d not in vars:
if d in savelindims:
pd,ad='(',savelindims[d][1]
d = savelindims[d][0]
else:
for r in block['args']:
#for r in block['vars'].keys():
if r not in vars:
continue
if re.match(r'.*?\b'+r+r'\b',d,re.I):
ddeps.append(r)
if d in vars:
if 'attrspec' in vars[d]:
for aa in vars[d]['attrspec']:
if aa[:6]=='depend':
ddeps += aa[6:].strip()[1:-1].split(',')
if 'depend' in vars[d]:
ddeps=ddeps+vars[d]['depend']
i=i+1
if d in vars and ('depend' not in vars[d]) \
and ('=' not in vars[d]) and (d not in vars[n]['depend']) \
and l_or(isintent_in, isintent_inout, isintent_inplace)(vars[n]):
vars[d]['depend']=[n]
if ni>1:
vars[d]['=']='%s%s(%s,%s)%s'% (pd,shape_macro,n,i,ad)
else:
vars[d]['=']='%slen(%s)%s'% (pd,n,ad)
# /---< no check
if 1 and 'check' not in vars[d]:
if ni>1:
vars[d]['check']=['%s%s(%s,%i)%s==%s'\
%(pd,shape_macro,n,i,ad,d)]
else:
vars[d]['check']=['%slen(%s)%s>=%s'%(pd,n,ad,d)]
if 'attrspec' not in vars[d]:
vars[d]['attrspec']=['optional']
if ('optional' not in vars[d]['attrspec']) and\
('required' not in vars[d]['attrspec']):
vars[d]['attrspec'].append('optional')
elif d not in ['*',':']:
#/----< no check
#if ni>1: vars[n]['check'].append('shape(%s,%i)==%s'%(n,i,d))
#else: vars[n]['check'].append('len(%s)>=%s'%(n,d))
if flag:
if d in vars:
if n not in ddeps:
vars[n]['depend'].append(d)
else:
vars[n]['depend'] = vars[n]['depend'] + ddeps
elif isstring(vars[n]):
length='1'
if 'charselector' in vars[n]:
if '*' in vars[n]['charselector']:
length = _eval_length(vars[n]['charselector']['*'],
params)
vars[n]['charselector']['*']=length
elif 'len' in vars[n]['charselector']:
length = _eval_length(vars[n]['charselector']['len'],
params)
del vars[n]['charselector']['len']
vars[n]['charselector']['*']=length
if not vars[n]['check']:
del vars[n]['check']
if flag and not vars[n]['depend']:
del vars[n]['depend']
if '=' in vars[n]:
if 'attrspec' not in vars[n]:
vars[n]['attrspec']=[]
if ('optional' not in vars[n]['attrspec']) and \
('required' not in vars[n]['attrspec']):
vars[n]['attrspec'].append('optional')
if 'depend' not in vars[n]:
vars[n]['depend']=[]
for v,m in dep_matches.items():
if m(vars[n]['=']): vars[n]['depend'].append(v)
if not vars[n]['depend']: del vars[n]['depend']
if isscalar(vars[n]):
vars[n]['='] = _eval_scalar(vars[n]['='],params)
for n in vars.keys():
if n==block['name']: # n is block name
if 'note' in vars[n]:
block['note']=vars[n]['note']
if block['block']=='function':
if 'result' in block and block['result'] in vars:
vars[n]=appenddecl(vars[n],vars[block['result']])
if 'prefix' in block:
pr=block['prefix']; ispure=0; isrec=1
pr1=pr.replace('pure','')
ispure=(not pr==pr1)
pr=pr1.replace('recursive','')
isrec=(not pr==pr1)
m=typespattern[0].match(pr)
if m:
typespec,selector,attr,edecl=cracktypespec0(m.group('this'),m.group('after'))
kindselect,charselect,typename=cracktypespec(typespec,selector)
vars[n]['typespec']=typespec
if kindselect:
if 'kind' in kindselect:
try:
kindselect['kind'] = eval(kindselect['kind'],{},params)
except:
pass
vars[n]['kindselector']=kindselect
if charselect: vars[n]['charselector']=charselect
if typename: vars[n]['typename']=typename
if ispure: vars[n]=setattrspec(vars[n],'pure')
if isrec: vars[n]=setattrspec(vars[n],'recursive')
else:
outmess('analyzevars: prefix (%s) were not used\n'%`block['prefix']`)
if not block['block'] in ['module','pythonmodule','python module','block data']:
if 'commonvars' in block:
neededvars=copy.copy(block['args']+block['commonvars'])
else:
neededvars=copy.copy(block['args'])
for n in vars.keys():
if l_or(isintent_callback,isintent_aux)(vars[n]):
neededvars.append(n)
if 'entry' in block:
neededvars.extend(block['entry'].keys())
for k in block['entry'].keys():
for n in block['entry'][k]:
if n not in neededvars:
neededvars.append(n)
if block['block']=='function':
if 'result' in block:
neededvars.append(block['result'])
else:
neededvars.append(block['name'])
if block['block'] in ['subroutine','function']:
name = block['name']
if name in vars and 'intent' in vars[name]:
block['intent'] = vars[name]['intent']
if block['block'] == 'type':
neededvars.extend(vars.keys())
for n in vars.keys():
if n not in neededvars:
del vars[n]
return vars
analyzeargs_re_1 = re.compile(r'\A[a-z]+[\w$]*\Z',re.I)
def analyzeargs(block):
setmesstext(block)
implicitrules,attrrules=buildimplicitrules(block)
if 'args' not in block:
block['args']=[]
args=[]
re_1 = analyzeargs_re_1
for a in block['args']:
if not re_1.match(a): # `a` is an expression
at=determineexprtype(a,block['vars'],implicitrules)
na='e_'
for c in a:
if c not in string.lowercase+string.digits: c='_'
na=na+c
if na[-1]=='_': na=na+'e'
else: na=na+'_e'
a=na
while a in block['vars'] or a in block['args']:
a=a+'r'
block['vars'][a]=at
args.append(a)
if a not in block['vars']:
block['vars'][a]={}
if 'externals' in block and a in block['externals']+block['interfaced']:
block['vars'][a]=setattrspec(block['vars'][a],'external')
block['args']=args
if 'entry' in block:
for k,args1 in block['entry'].items():
for a in args1:
if a not in block['vars']:
block['vars'][a]={}
for b in block['body']:
if b['name'] in args:
if 'externals' not in block:
block['externals']=[]
if b['name'] not in block['externals']:
block['externals'].append(b['name'])
if 'result' in block and block['result'] not in block['vars']:
block['vars'][block['result']]={}
return block
determineexprtype_re_1 = re.compile(r'\A\(.+?[,].+?\)\Z',re.I)
determineexprtype_re_2 = re.compile(r'\A[+-]?\d+(_(P<name>[\w]+)|)\Z',re.I)
determineexprtype_re_3 = re.compile(r'\A[+-]?[\d.]+[\d+-de.]*(_(P<name>[\w]+)|)\Z',re.I)
determineexprtype_re_4 = re.compile(r'\A\(.*\)\Z',re.I)
determineexprtype_re_5 = re.compile(r'\A(?P<name>\w+)\s*\(.*?\)\s*\Z',re.I)
def _ensure_exprdict(r):
if type(r) is type(0):
return {'typespec':'integer'}
if type(r) is type(0.0):
return {'typespec':'real'}
if type(r) is type(0j):
return {'typespec':'complex'}
assert type(r) is type({}),`r`
return r
def determineexprtype(expr,vars,rules={}):
if expr in vars:
return _ensure_exprdict(vars[expr])
expr=expr.strip()
if determineexprtype_re_1.match(expr):
return {'typespec':'complex'}
m=determineexprtype_re_2.match(expr)
if m:
if 'name' in m.groupdict() and m.group('name'):
outmess('determineexprtype: selected kind types not supported (%s)\n'%`expr`)
return {'typespec':'integer'}
m = determineexprtype_re_3.match(expr)
if m:
if 'name' in m.groupdict() and m.group('name'):
outmess('determineexprtype: selected kind types not supported (%s)\n'%`expr`)
return {'typespec':'real'}
for op in ['+','-','*','/']:
for e in [x.strip() for x in markoutercomma(expr,comma=op).split('@'+op+'@')]:
if e in vars:
return _ensure_exprdict(vars[e])
t={}
if determineexprtype_re_4.match(expr): # in parenthesis
t=determineexprtype(expr[1:-1],vars,rules)
else:
m = determineexprtype_re_5.match(expr)
if m:
rn=m.group('name')
t=determineexprtype(m.group('name'),vars,rules)
if t and 'attrspec' in t:
del t['attrspec']
if not t:
if rn[0] in rules:
return _ensure_exprdict(rules[rn[0]])
if expr[0] in '\'"':
return {'typespec':'character','charselector':{'*':'*'}}
if not t:
outmess('determineexprtype: could not determine expressions (%s) type.\n'%(`expr`))
return t
######
def crack2fortrangen(block,tab='\n'):
global skipfuncs, onlyfuncs
setmesstext(block)
ret=''
if isinstance(block, list):
for g in block:
if g and g['block'] in ['function','subroutine']:
if g['name'] in skipfuncs:
continue
if onlyfuncs and g['name'] not in onlyfuncs:
continue
ret=ret+crack2fortrangen(g,tab)
return ret
prefix=''
name=''
args=''
blocktype=block['block']
if blocktype=='program': return ''
al=[]
if 'name' in block:
name=block['name']
if 'args' in block:
vars = block['vars']
al = [a for a in block['args'] if not isintent_callback(vars[a])]
if block['block']=='function' or al:
args='(%s)'%','.join(al)
f2pyenhancements = ''
if 'f2pyenhancements' in block:
for k in block['f2pyenhancements'].keys():
f2pyenhancements = '%s%s%s %s'%(f2pyenhancements,tab+tabchar,k,block['f2pyenhancements'][k])
intent_lst = block.get('intent',[])[:]
if blocktype=='function' and 'callback' in intent_lst:
intent_lst.remove('callback')
if intent_lst:
f2pyenhancements = '%s%sintent(%s) %s'%\
(f2pyenhancements,tab+tabchar,
','.join(intent_lst),name)
use=''
if 'use' in block:
use=use2fortran(block['use'],tab+tabchar)
common=''
if 'common' in block:
common=common2fortran(block['common'],tab+tabchar)
if name=='unknown_interface': name=''
result=''
if 'result' in block:
result=' result (%s)'%block['result']
if block['result'] not in al:
al.append(block['result'])
#if 'prefix' in block:
# prefix=block['prefix']+' '
body=crack2fortrangen(block['body'],tab+tabchar)
vars=vars2fortran(block,block['vars'],al,tab+tabchar)
mess=''
if 'from' in block:
mess='! in %s'%block['from']
if 'entry' in block:
entry_stmts = ''
for k,i in block['entry'].items():
entry_stmts = '%s%sentry %s(%s)' \
% (entry_stmts,tab+tabchar,k,','.join(i))
body = body + entry_stmts
if blocktype=='block data' and name=='_BLOCK_DATA_':
name = ''
ret='%s%s%s %s%s%s %s%s%s%s%s%s%send %s %s'%(tab,prefix,blocktype,name,args,result,mess,f2pyenhancements,use,vars,common,body,tab,blocktype,name)
return ret
def common2fortran(common,tab=''):
ret=''
for k in common.keys():
if k=='_BLNK_':
ret='%s%scommon %s'%(ret,tab,','.join(common[k]))
else:
ret='%s%scommon /%s/ %s'%(ret,tab,k,','.join(common[k]))
return ret
def use2fortran(use,tab=''):
ret=''
for m in use.keys():
ret='%s%suse %s,'%(ret,tab,m)
if use[m]=={}:
if ret and ret[-1]==',': ret=ret[:-1]
continue
if 'only' in use[m] and use[m]['only']:
ret='%s,only:'%(ret)
if 'map' in use[m] and use[m]['map']:
c=' '
for k in use[m]['map'].keys():
if k==use[m]['map'][k]:
ret='%s%s%s'%(ret,c,k); c=','
else:
ret='%s%s%s=>%s'%(ret,c,k,use[m]['map'][k]); c=','
if ret and ret[-1]==',': ret=ret[:-1]
return ret
def true_intent_list(var):
lst = var['intent']
ret = []
for intent in lst:
try:
exec('c = isintent_%s(var)' % intent)
except NameError:
c = 0
if c:
ret.append(intent)
return ret
def vars2fortran(block,vars,args,tab=''):
"""
TODO:
public sub
...
"""
setmesstext(block)
ret=''
nout=[]
for a in args:
if a in block['vars']:
nout.append(a)
if 'commonvars' in block:
for a in block['commonvars']:
if a in vars:
if a not in nout:
nout.append(a)
else:
errmess('vars2fortran: Confused?!: "%s" is not defined in vars.\n'%a)
if 'varnames' in block:
nout.extend(block['varnames'])
for a in vars.keys():
if a not in nout:
nout.append(a)
for a in nout:
if 'depend' in vars[a]:
for d in vars[a]['depend']:
if d in vars and 'depend' in vars[d] and a in vars[d]['depend']:
errmess('vars2fortran: Warning: cross-dependence between variables "%s" and "%s"\n'%(a,d))
if 'externals' in block and a in block['externals']:
if isintent_callback(vars[a]):
ret='%s%sintent(callback) %s'%(ret,tab,a)
ret='%s%sexternal %s'%(ret,tab,a)
if isoptional(vars[a]):
ret='%s%soptional %s'%(ret,tab,a)
if a in vars and 'typespec' not in vars[a]:
continue
cont=1
for b in block['body']:
if a==b['name'] and b['block']=='function':
cont=0;break
if cont:
continue
if a not in vars:
show(vars)
outmess('vars2fortran: No definition for argument "%s".\n'%a)
continue
if a==block['name'] and not block['block']=='function':
continue
if 'typespec' not in vars[a]:
if 'attrspec' in vars[a] and 'external' in vars[a]['attrspec']:
if a in args:
ret='%s%sexternal %s'%(ret,tab,a)
continue
show(vars[a])
outmess('vars2fortran: No typespec for argument "%s".\n'%a)
continue
vardef=vars[a]['typespec']
if vardef=='type' and 'typename' in vars[a]:
vardef='%s(%s)'%(vardef,vars[a]['typename'])
selector={}
if 'kindselector' in vars[a]:
selector=vars[a]['kindselector']
elif 'charselector' in vars[a]:
selector=vars[a]['charselector']
if '*' in selector:
if selector['*'] in ['*',':']:
vardef='%s*(%s)'%(vardef,selector['*'])
else:
vardef='%s*%s'%(vardef,selector['*'])
else:
if 'len' in selector:
vardef='%s(len=%s'%(vardef,selector['len'])
if 'kind' in selector:
vardef='%s,kind=%s)'%(vardef,selector['kind'])
else:
vardef='%s)'%(vardef)
elif 'kind' in selector:
vardef='%s(kind=%s)'%(vardef,selector['kind'])
c=' '
if 'attrspec' in vars[a]:
attr=[]
for l in vars[a]['attrspec']:
if l not in ['external']:
attr.append(l)
if attr:
vardef='%s %s'%(vardef,','.join(attr))
c=','
if 'dimension' in vars[a]:
# if not isintent_c(vars[a]):
# vars[a]['dimension'].reverse()
vardef='%s%sdimension(%s)'%(vardef,c,','.join(vars[a]['dimension']))
c=','
if 'intent' in vars[a]:
lst = true_intent_list(vars[a])
if lst:
vardef='%s%sintent(%s)'%(vardef,c,','.join(lst))
c=','
if 'check' in vars[a]:
vardef='%s%scheck(%s)'%(vardef,c,','.join(vars[a]['check']))
c=','
if 'depend' in vars[a]:
vardef='%s%sdepend(%s)'%(vardef,c,','.join(vars[a]['depend']))
c=','
if '=' in vars[a]:
v = vars[a]['=']
if vars[a]['typespec'] in ['complex','double complex']:
try:
v = eval(v)
v = '(%s,%s)' % (v.real,v.imag)
except:
pass
vardef='%s :: %s=%s'%(vardef,a,v)
else:
vardef='%s :: %s'%(vardef,a)
ret='%s%s%s'%(ret,tab,vardef)
return ret
######
def crackfortran(files):
global usermodules
outmess('Reading fortran codes...\n',0)
readfortrancode(files,crackline)
outmess('Post-processing...\n',0)
usermodules=[]
postlist=postcrack(grouplist[0])
outmess('Post-processing (stage 2)...\n',0)
postlist=postcrack2(postlist)
return usermodules+postlist
def crack2fortran(block):
global f2py_version
pyf=crack2fortrangen(block)+'\n'
header="""! -*- f90 -*-
! Note: the context of this file is case sensitive.
"""
footer="""
! This file was auto-generated with f2py (version:%s).
! See http://cens.ioc.ee/projects/f2py2e/
"""%(f2py_version)
return header+pyf+footer
if __name__ == "__main__":
files=[]
funcs=[]
f=1;f2=0;f3=0
showblocklist=0
for l in sys.argv[1:]:
if l=='': pass
elif l[0]==':':
f=0
elif l=='-quiet':
quiet=1
verbose=0
elif l=='-verbose':
verbose=2
quiet=0
elif l=='-fix':
if strictf77:
outmess('Use option -f90 before -fix if Fortran 90 code is in fix form.\n',0)
skipemptyends=1
sourcecodeform='fix'
elif l=='-skipemptyends':
skipemptyends=1
elif l=='--ignore-contains':
ignorecontains=1
elif l=='-f77':
strictf77=1
sourcecodeform='fix'
elif l=='-f90':
strictf77=0
sourcecodeform='free'
skipemptyends=1
elif l=='-h':
f2=1
elif l=='-show':
showblocklist=1
elif l=='-m':
f3=1
elif l[0]=='-':
errmess('Unknown option %s\n'%`l`)
elif f2:
f2=0
pyffilename=l
elif f3:
f3=0
f77modulename=l
elif f:
try:
open(l).close()
files.append(l)
except IOError,detail:
errmess('IOError: %s\n'%str(detail))
else:
funcs.append(l)
if not strictf77 and f77modulename and not skipemptyends:
outmess("""\
Warning: You have specifyied module name for non Fortran 77 code
that should not need one (expect if you are scanning F90 code
for non module blocks but then you should use flag -skipemptyends
and also be sure that the files do not contain programs without program statement).
""",0)
postlist=crackfortran(files,funcs)
if pyffilename:
outmess('Writing fortran code to file %s\n'%`pyffilename`,0)
pyf=crack2fortran(postlist)
f=open(pyffilename,'w')
f.write(pyf)
f.close()
if showblocklist:
show(postlist)
| bsd-3-clause |
zmarcantel/Marlin | buildroot/share/atom/auto_build.py | 3 | 46162 | #######################################
#
# Marlin 3D Printer Firmware
# Copyright (C) 2019 MarlinFirmware [https://github.com/MarlinFirmware/Marlin]
#
# Based on Sprinter and grbl.
# Copyright (C) 2011 Camiel Gubbels / Erik van der Zalm
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#######################################
#######################################
#
# Revision: 2.0.1
#
# Description: script to automate PlatformIO builds
# CLI: python auto_build.py build_option
# build_option (required)
# build executes -> platformio run -e target_env
# clean executes -> platformio run --target clean -e target_env
# upload executes -> platformio run --target upload -e target_env
# traceback executes -> platformio run --target upload -e target_env
# program executes -> platformio run --target program -e target_env
# test executes -> platformio test upload -e target_env
# remote executes -> platformio remote run --target upload -e target_env
# debug executes -> platformio debug -e target_env
#
# 'traceback' just uses the debug variant of the target environment if one exists
#
#######################################
#######################################
#
# General program flow
#
# 1. Scans Configuration.h for the motherboard name and Marlin version.
# 2. Scans pins.h for the motherboard.
# returns the CPU(s) and platformio environment(s) used by the motherboard
# 3. If further info is needed then a popup gets it from the user.
# 4. The OUTPUT_WINDOW class creates a window to display the output of the PlatformIO program.
# 5. A thread is created by the OUTPUT_WINDOW class in order to execute the RUN_PIO function.
# 6. The RUN_PIO function uses a subprocess to run the CLI version of PlatformIO.
# 7. The "iter(pio_subprocess.stdout.readline, '')" function is used to stream the output of
# PlatformIO back to the RUN_PIO function.
# 8. Each line returned from PlatformIO is formatted to match the color coding seen in the
# PlatformIO GUI.
# 9. If there is a color change within a line then the line is broken at each color change
# and sent separately.
# 10. Each formatted segment (could be a full line or a split line) is put into the queue
# IO_queue as it arrives from the platformio subprocess.
# 11. The OUTPUT_WINDOW class periodically samples IO_queue. If data is available then it
# is written to the window.
# 12. The window stays open until the user closes it.
# 13. The OUTPUT_WINDOW class continues to execute as long as the window is open. This allows
# copying, saving, scrolling of the window. A right click popup is available.
#
#######################################
import sys
import os
pwd = os.getcwd() # make sure we're executing from the correct directory level
pwd = pwd.replace('\\', '/')
if 0 <= pwd.find('buildroot/share/atom'):
pwd = pwd[ : pwd.find('buildroot/share/atom')]
os.chdir(pwd)
print 'pwd: ', pwd
num_args = len(sys.argv)
if num_args > 1:
build_type = str(sys.argv[1])
else:
print 'Please specify build type'
exit()
print'build_type: ', build_type
print '\nWorking\n'
python_ver = sys.version_info[0] # major version - 2 or 3
if python_ver == 2:
print "python version " + str(sys.version_info[0]) + "." + str(sys.version_info[1]) + "." + str(sys.version_info[2])
else:
print "python version " + str(sys.version_info[0])
print "This script only runs under python 2"
exit()
import platform
current_OS = platform.system()
#globals
target_env = ''
board_name = ''
from datetime import datetime, date, time
#########
# Python 2 error messages:
# Can't find a usable init.tcl in the following directories ...
# error "invalid command name "tcl_findLibrary""
#
# Fix for the above errors on my Win10 system:
# search all init.tcl files for the line "package require -exact Tcl" that has the highest 8.5.x number
# copy it into the first directory listed in the error messages
# set the environmental variables TCLLIBPATH and TCL_LIBRARY to the directory where you found the init.tcl file
# reboot
#########
##########################################################################################
#
# popup to get input from user
#
##########################################################################################
def get_answer(board_name, cpu_label_txt, cpu_a_txt, cpu_b_txt):
if python_ver == 2:
import Tkinter as tk
else:
import tkinter as tk
def CPU_exit_3(): # forward declare functions
CPU_exit_3_()
def CPU_exit_4():
CPU_exit_4_()
def kill_session():
kill_session_()
root_get_answer = tk.Tk()
root_get_answer.chk_state_1 = 1 # declare variables used by TK and enable
chk_state_1 = 0 # set initial state of check boxes
global get_answer_val
get_answer_val = 2 # return get_answer_val, set default to match chk_state_1 default
l1 = tk.Label(text=board_name,
fg = "light green",
bg = "dark green",
font = "Helvetica 12 bold").grid(row=1)
l2 = tk.Label(text=cpu_label_txt,
fg = "light green",
bg = "dark green",
font = "Helvetica 16 bold italic").grid(row=2)
b4 = tk.Checkbutton(text=cpu_a_txt,
fg = "black",
font = "Times 20 bold ",
variable=chk_state_1, onvalue=1, offvalue=0,
command = CPU_exit_3).grid(row=3)
b5 = tk.Checkbutton(text=cpu_b_txt,
fg = "black",
font = "Times 20 bold ",
variable=chk_state_1, onvalue=0, offvalue=1,
command = CPU_exit_4).grid(row=4) # use same variable but inverted so they will track
b6 = tk.Button(text="CONFIRM",
fg = "blue",
font = "Times 20 bold ",
command = root_get_answer.destroy).grid(row=5, pady=4)
b7 = tk.Button(text="CANCEL",
fg = "red",
font = "Times 12 bold ",
command = kill_session).grid(row=6, pady=4)
def CPU_exit_3_():
global get_answer_val
get_answer_val = 1
def CPU_exit_4_():
global get_answer_val
get_answer_val = 2
def kill_session_():
raise SystemExit(0) # kill everything
root_get_answer.mainloop()
# end - get answer
#
# move custom board definitions from project folder to PlatformIO
#
def resolve_path(path):
import os
# turn the selection into a partial path
if 0 <= path.find('"'):
path = path[ path.find('"') : ]
if 0 <= path.find(', line '):
path = path.replace(', line ', ':')
path = path.replace('"', '')
#get line and column numbers
line_num = 1
column_num = 1
line_start = path.find(':', 2) # use 2 here so don't eat Windows full path
column_start = path.find(':', line_start + 1)
if column_start == -1:
column_start = len(path)
column_end = path.find(':', column_start + 1)
if column_end == -1:
column_end = len(path)
if 0 <= line_start:
line_num = path[ line_start + 1 : column_start]
if line_num == '':
line_num = 1
if not(column_start == column_end):
column_num = path[ column_start + 1 : column_end]
if column_num == '':
column_num = 0
index_end = path.find(',')
if 0 <= index_end:
path = path[ : index_end] # delete comma and anything after
index_end = path.find(':', 2)
if 0 <= index_end:
path = path[ : path.find(':', 2)] # delete the line number and anything after
path = path.replace('\\','/')
if 1 == path.find(':') and current_OS == 'Windows':
return path, line_num, column_num # found a full path - no need for further processing
elif 0 == path.find('/') and (current_OS == 'Linux' or current_OS == 'Darwin'):
return path, line_num, column_num # found a full path - no need for further processing
else:
# resolve as many '../' as we can
while 0 <= path.find('../'):
end = path.find('../') - 1
start = path.find('/')
while 0 <= path.find('/',start) and end > path.find('/',start):
start = path.find('/',start) + 1
path = path[0:start] + path[end + 4: ]
# this is an alternative to the above - it just deletes the '../' section
# start_temp = path.find('../')
# while 0 <= path.find('../',start_temp):
# start = path.find('../',start_temp)
# start_temp = start + 1
# if 0 <= start:
# path = path[start + 2 : ]
start = path.find('/')
if not(0 == start): # make sure path starts with '/'
while 0 == path.find(' '): # eat any spaces at the beginning
path = path[ 1 : ]
path = '/' + path
if current_OS == 'Windows':
search_path = path.replace('/', '\\') # os.walk uses '\' in Windows
else:
search_path = path
start_path = os.path.abspath('')
# search project directory for the selection
found = False
full_path = ''
for root, directories, filenames in os.walk(start_path):
for filename in filenames:
if 0 <= root.find('.git'): # don't bother looking in this directory
break
full_path = os.path.join(root,filename)
if 0 <= full_path.find(search_path):
found = True
break
if found:
break
return full_path, line_num, column_num
# end - resolve_path
#
# Opens the file in the preferred editor at the line & column number
# If the preferred editor isn't already running then it tries the next.
# If none are open then the system default is used.
#
# Editor order:
# 1. Notepad++ (Windows only)
# 2. Sublime Text
# 3. Atom
# 4. System default (opens at line 1, column 1 only)
#
def open_file(path):
import subprocess
file_path, line_num, column_num = resolve_path(path)
if file_path == '' :
return
if current_OS == 'Windows':
editor_note = subprocess.check_output('wmic process where "name=' + "'notepad++.exe'" + '" get ExecutablePath')
editor_sublime = subprocess.check_output('wmic process where "name=' + "'sublime_text.exe'" + '" get ExecutablePath')
editor_atom = subprocess.check_output('wmic process where "name=' + "'atom.exe'" + '" get ExecutablePath')
if 0 <= editor_note.find('notepad++.exe'):
start = editor_note.find('\n') + 1
end = editor_note.find('\n',start + 5) -4
editor_note = editor_note[ start : end]
command = file_path , ' -n' + str(line_num) , ' -c' + str(column_num)
subprocess.Popen([editor_note, command])
elif 0 <= editor_sublime.find('sublime_text.exe'):
start = editor_sublime.find('\n') + 1
end = editor_sublime.find('\n',start + 5) -4
editor_sublime = editor_sublime[ start : end]
command = file_path + ':' + line_num + ':' + column_num
subprocess.Popen([editor_sublime, command])
elif 0 <= editor_atom.find('atom.exe'):
start = editor_atom.find('\n') + 1
end = editor_atom.find('\n',start + 5) -4
editor_atom = editor_atom[ start : end]
command = file_path + ':' + str(line_num) + ':' + str(column_num)
subprocess.Popen([editor_atom, command])
else:
os.startfile(resolve_path(path)) # open file with default app
elif current_OS == 'Linux':
command = file_path + ':' + str(line_num) + ':' + str(column_num)
index_end = command.find(',')
if 0 <= index_end:
command = command[ : index_end] # sometimes a comma magically appears, don't want it
running_apps = subprocess.Popen('ps ax -o cmd', stdout=subprocess.PIPE, shell=True)
(output, err) = running_apps.communicate()
temp = output.split('\n')
def find_editor_linux(name, search_obj):
for line in search_obj:
if 0 <= line.find(name):
path = line
return True, path
return False , ''
(success_sublime, editor_path_sublime) = find_editor_linux('sublime_text',temp)
(success_atom, editor_path_atom) = find_editor_linux('atom',temp)
if success_sublime:
subprocess.Popen([editor_path_sublime, command])
elif success_atom:
subprocess.Popen([editor_path_atom, command])
else:
os.system('xdg-open ' + file_path )
elif current_OS == 'Darwin': # MAC
command = file_path + ':' + str(line_num) + ':' + str(column_num)
index_end = command.find(',')
if 0 <= index_end:
command = command[ : index_end] # sometimes a comma magically appears, don't want it
running_apps = subprocess.Popen('ps axwww -o command', stdout=subprocess.PIPE, shell=True)
(output, err) = running_apps.communicate()
temp = output.split('\n')
def find_editor_mac(name, search_obj):
for line in search_obj:
if 0 <= line.find(name):
path = line
if 0 <= path.find('-psn'):
path = path[ : path.find('-psn') - 1 ]
return True, path
return False , ''
(success_sublime, editor_path_sublime) = find_editor_mac('Sublime',temp)
(success_atom, editor_path_atom) = find_editor_mac('Atom',temp)
if success_sublime:
subprocess.Popen([editor_path_sublime, command])
elif success_atom:
subprocess.Popen([editor_path_atom, command])
else:
os.system('open ' + file_path )
# end - open_file
# gets the last build environment
def get_build_last():
env_last = ''
DIR_PWD = os.listdir('.')
if '.pioenvs' in DIR_PWD:
date_last = 0.0
DIR__pioenvs = os.listdir('.pioenvs')
for name in DIR__pioenvs:
if 0 <= name.find('.') or 0 <= name.find('-'): # skip files in listing
continue
DIR_temp = os.listdir('.pioenvs/' + name)
for names_temp in DIR_temp:
if 0 == names_temp.find('firmware.'):
date_temp = os.path.getmtime('.pioenvs/' + name + '/' + names_temp)
if date_temp > date_last:
date_last = date_temp
env_last = name
return env_last
# gets the board being built from the Configuration.h file
# returns: board name, major version of Marlin being used (1 or 2)
def get_board_name():
board_name = ''
# get board name
with open('Marlin/Configuration.h', 'r') as myfile:
Configuration_h = myfile.read()
Configuration_h = Configuration_h.split('\n')
Marlin_ver = 0 # set version to invalid number
for lines in Configuration_h:
if 0 == lines.find('#define CONFIGURATION_H_VERSION 01'):
Marlin_ver = 1
if 0 == lines.find('#define CONFIGURATION_H_VERSION 02'):
Marlin_ver = 2
board = lines.find(' BOARD_') + 1
motherboard = lines.find(' MOTHERBOARD ') + 1
define = lines.find('#define ')
comment = lines.find('//')
if (comment == -1 or comment > board) and \
board > motherboard and \
motherboard > define and \
define >= 0 :
spaces = lines.find(' ', board) # find the end of the board substring
if spaces == -1:
board_name = lines[board : ]
else:
board_name = lines[board : spaces]
break
return board_name, Marlin_ver
# extract first environment name it finds after the start position
# returns: environment name and position to start the next search from
def get_env_from_line(line, start_position):
env = ''
next_position = -1
env_position = line.find('env:', start_position)
if 0 < env_position:
next_position = line.find(' ', env_position + 4)
if 0 < next_position:
env = line[env_position + 4 : next_position]
else:
env = line[env_position + 4 : ] # at the end of the line
return env, next_position
#scans pins.h for board name and returns the environment(s) it finds
def get_starting_env(board_name_full, version):
# get environment starting point
if version == 1:
path = 'Marlin/pins.h'
if version == 2:
path = 'Marlin/src/pins/pins.h'
with open(path, 'r') as myfile:
pins_h = myfile.read()
env_A = ''
env_B = ''
env_C = ''
board_name = board_name_full[ 6 : ] # only use the part after "BOARD_" since we're searching the pins.h file
pins_h = pins_h.split('\n')
environment = ''
board_line = ''
cpu_A = ''
cpu_B = ''
i = 0
list_start_found = False
for lines in pins_h:
i = i + 1 # i is always one ahead of the index into pins_h
if 0 < lines.find("Unknown MOTHERBOARD value set in Configuration.h"):
break # no more
if 0 < lines.find('1280'):
list_start_found = True
if list_start_found == False: # skip lines until find start of CPU list
continue
board = lines.find(board_name)
comment_start = lines.find('// ')
cpu_A_loc = comment_start
cpu_B_loc = 0
if board > 0: # need to look at the next line for environment info
cpu_line = pins_h[i]
comment_start = cpu_line.find('// ')
env_A, next_position = get_env_from_line(cpu_line, comment_start) # get name of environment & start of search for next
env_B, next_position = get_env_from_line(cpu_line, next_position) # get next environment, if it exists
env_C, next_position = get_env_from_line(cpu_line, next_position) # get next environment, if it exists
break
return env_A, env_B, env_C
# scans input string for CPUs that the users may need to select from
# returns: CPU name
def get_CPU_name(environment):
CPU_list = ('1280', '2560','644', '1284', 'LPC1768', 'DUE')
CPU_name = ''
for CPU in CPU_list:
if 0 < environment.find(CPU):
return CPU
# get environment to be used for the build
# returns: environment
def get_env(board_name, ver_Marlin):
def no_environment():
print 'ERROR - no environment for this board'
print board_name
raise SystemExit(0) # no environment so quit
def invalid_board():
print 'ERROR - invalid board'
print board_name
raise SystemExit(0) # quit if unable to find board
CPU_question = ( ('1280', '2560', " 1280 or 2560 CPU? "), ('644', '1284', " 644 or 1284 CPU? ") )
if 0 < board_name.find('MELZI') :
get_answer(' ' + board_name + ' ', " Which flavor of Melzi? ", "Melzi (Optiboot bootloader)", "Melzi ")
if 1 == get_answer_val:
target_env = 'melzi_optiboot'
else:
target_env = 'melzi'
else:
env_A, env_B, env_C = get_starting_env(board_name, ver_Marlin)
if env_A == '':
no_environment()
if env_B == '':
return env_A # only one environment so finished
CPU_A = get_CPU_name(env_A)
CPU_B = get_CPU_name(env_B)
for item in CPU_question:
if CPU_A == item[0]:
get_answer(' ' + board_name + ' ', item[2], item[0], item[1])
if 2 == get_answer_val:
target_env = env_B
else:
target_env = env_A
return target_env
if env_A == 'LPC1768':
if build_type == 'traceback' or (build_type == 'clean' and get_build_last() == 'LPC1768_debug_and_upload'):
target_env = 'LPC1768_debug_and_upload'
else:
target_env = 'LPC1768'
elif env_A == 'DUE':
target_env = 'DUE'
if build_type == 'traceback' or (build_type == 'clean' and get_build_last() == 'DUE_debug'):
target_env = 'DUE_debug'
elif env_B == 'DUE_USB':
get_answer(' ' + board_name + ' ', " DUE: need download port ", "USB (native USB) port", "Programming port ")
if 1 == get_answer_val:
target_env = 'DUE_USB'
else:
target_env = 'DUE'
else:
invalid_board()
if build_type == 'traceback' and not(target_env == 'LPC1768_debug_and_upload' or target_env == 'DUE_debug') and Marlin_ver == 2:
print "ERROR - this board isn't setup for traceback"
print 'board_name: ', board_name
print 'target_env: ', target_env
raise SystemExit(0)
return target_env
# end - get_env
# puts screen text into queue so that the parent thread can fetch the data from this thread
import Queue
IO_queue = Queue.Queue()
PIO_queue = Queue.Queue()
def write_to_screen_queue(text, format_tag = 'normal'):
double_in = [text, format_tag]
IO_queue.put(double_in, block = False)
#
# send one line to the terminal screen with syntax highlighting
#
# input: unformatted text, flags from previous run
# returns: formatted text ready to go to the terminal, flags from this run
#
# This routine remembers the status from call to call because previous
# lines can affect how the current line is highlighted
#
# 'static' variables - init here and then keep updating them from within print_line
warning = False
warning_FROM = False
error = False
standard = True
prev_line_COM = False
next_line_warning = False
warning_continue = False
line_counter = 0
def line_print(line_input):
global warning
global warning_FROM
global error
global standard
global prev_line_COM
global next_line_warning
global warning_continue
global line_counter
# all '0' elements must precede all '1' elements or they'll be skipped
platformio_highlights = [
['Environment', 0, 'highlight_blue'],
['[SKIP]', 1, 'warning'],
['[ERROR]', 1, 'error'],
['[SUCCESS]', 1, 'highlight_green']
]
def write_to_screen_with_replace(text, highlights): # search for highlights & split line accordingly
did_something = False
for highlight in highlights:
found = text.find(highlight[0])
if did_something == True:
break
if found >= 0 :
did_something = True
if 0 == highlight[1]:
found_1 = text.find(' ')
found_tab = text.find('\t')
if found_1 < 0 or found_1 > found_tab:
found_1 = found_tab
write_to_screen_queue(text[ : found_1 + 1 ])
for highlight_2 in highlights:
if highlight[0] == highlight_2[0] :
continue
found = text.find(highlight_2[0])
if found >= 0 :
found_space = text.find(' ', found_1 + 1)
found_tab = text.find('\t', found_1 + 1)
if found_space < 0 or found_space > found_tab:
found_space = found_tab
found_right = text.find(']', found + 1)
write_to_screen_queue(text[found_1 + 1 : found_space + 1 ], highlight[2])
write_to_screen_queue(text[found_space + 1 : found + 1 ])
write_to_screen_queue(text[found + 1 : found_right], highlight_2[2])
write_to_screen_queue(text[found_right : ] + '\n')
break
break
if 1 == highlight[1]:
found_right = text.find(']', found + 1)
write_to_screen_queue(text[ : found + 1 ])
write_to_screen_queue(text[found + 1 : found_right ], highlight[2])
write_to_screen_queue(text[found_right : ] + '\n')
break
if did_something == False:
r_loc = text.find('\r') + 1
if r_loc > 0 and r_loc < len(text): # need to split this line
text = text.split('\r')
for line in text:
write_to_screen_queue(line + '\n')
else:
write_to_screen_queue(text + '\n')
# end - write_to_screen_with_replace
# scan the line
line_counter = line_counter + 1
max_search = len(line_input)
if max_search > 3 :
max_search = 3
beginning = line_input[:max_search]
# set flags
if 0 < line_input.find(': warning: '): # start of warning block
warning = True
warning_FROM = False
error = False
standard = False
prev_line_COM = False
prev_line_COM = False
warning_continue = True
if 0 < line_input.find('Thank you') or 0 < line_input.find('SUMMARY') :
warning = False #standard line found
warning_FROM = False
error = False
standard = True
prev_line_COM = False
warning_continue = False
elif beginning == 'War' or \
beginning == '#er' or \
beginning == 'In ' or \
(beginning != 'Com' and prev_line_COM == True and not(beginning == 'Arc' or beginning == 'Lin' or beginning == 'Ind') or \
next_line_warning == True):
warning = True #warning found
warning_FROM = False
error = False
standard = False
prev_line_COM = False
elif beginning == 'Com' or \
beginning == 'Ver' or \
beginning == ' [E' or \
beginning == 'Rem' or \
beginning == 'Bui' or \
beginning == 'Ind' or \
beginning == 'PLA':
warning = False #standard line found
warning_FROM = False
error = False
standard = True
prev_line_COM = False
warning_continue = False
elif beginning == '***':
warning = False # error found
warning_FROM = False
error = True
standard = False
prev_line_COM = False
elif 0 < line_input.find(': error:') or \
0 < line_input.find(': fatal error:'): # start of warning /error block
warning = False # error found
warning_FROM = False
error = True
standard = False
prev_line_COM = False
warning_continue = True
elif beginning == 'fro' and warning == True or \
beginning == '.pi' : # start of warning /error block
warning_FROM = True
prev_line_COM = False
warning_continue = True
elif warning_continue == True:
warning = True
warning_FROM = False # keep the warning status going until find a standard line or an error
error = False
standard = False
prev_line_COM = False
warning_continue = True
else:
warning = False # unknown so assume standard line
warning_FROM = False
error = False
standard = True
prev_line_COM = False
warning_continue = False
if beginning == 'Com':
prev_line_COM = True
# print based on flags
if standard == True:
write_to_screen_with_replace(line_input, platformio_highlights) #print white on black with substitutions
if warning == True:
write_to_screen_queue(line_input + '\n', 'warning')
if error == True:
write_to_screen_queue(line_input + '\n', 'error')
# end - line_print
def run_PIO(dummy):
##########################################################################
# #
# run Platformio #
# #
##########################################################################
# build platformio run -e target_env
# clean platformio run --target clean -e target_env
# upload platformio run --target upload -e target_env
# traceback platformio run --target upload -e target_env
# program platformio run --target program -e target_env
# test platformio test upload -e target_env
# remote platformio remote run --target upload -e target_env
# debug platformio debug -e target_env
global build_type
global target_env
global board_name
print 'build_type: ', build_type
import subprocess
import sys
print 'starting platformio'
if build_type == 'build':
# platformio run -e target_env
# combine stdout & stderr so all compile messages are included
pio_subprocess = subprocess.Popen(['platformio', 'run', '-e', target_env], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
elif build_type == 'clean':
# platformio run --target clean -e target_env
# combine stdout & stderr so all compile messages are included
pio_subprocess = subprocess.Popen(['platformio', 'run', '--target', 'clean', '-e', target_env], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
elif build_type == 'upload':
# platformio run --target upload -e target_env
# combine stdout & stderr so all compile messages are included
pio_subprocess = subprocess.Popen(['platformio', 'run', '--target', 'upload', '-e', target_env], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
elif build_type == 'traceback':
# platformio run --target upload -e target_env - select the debug environment if there is one
# combine stdout & stderr so all compile messages are included
pio_subprocess = subprocess.Popen(['platformio', 'run', '--target', 'upload', '-e', target_env], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
elif build_type == 'program':
# platformio run --target program -e target_env
# combine stdout & stderr so all compile messages are included
pio_subprocess = subprocess.Popen(['platformio', 'run', '--target', 'program', '-e', target_env], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
elif build_type == 'test':
#platformio test upload -e target_env
# combine stdout & stderr so all compile messages are included
pio_subprocess = subprocess.Popen(['platformio', 'test', 'upload', '-e', target_env], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
elif build_type == 'remote':
# platformio remote run --target upload -e target_env
# combine stdout & stderr so all compile messages are included
pio_subprocess = subprocess.Popen(['platformio', 'remote', 'run', '--target', 'program', '-e', target_env], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
elif build_type == 'debug':
# platformio debug -e target_env
# combine stdout & stderr so all compile messages are included
pio_subprocess = subprocess.Popen(['platformio', 'debug', '-e', target_env], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
else:
print 'ERROR - unknown build type: ', build_type
raise SystemExit(0) # kill everything
# stream output from subprocess and split it into lines
for line in iter(pio_subprocess.stdout.readline, ''):
line_print(line.replace('\n', ''))
# append info used to run PlatformIO
write_to_screen_queue('\nBoard name: ' + board_name + '\n') # put build info at the bottom of the screen
write_to_screen_queue('Build type: ' + build_type + '\n')
write_to_screen_queue('Environment used: ' + target_env + '\n')
write_to_screen_queue(str(datetime.now()) + '\n')
# end - run_PIO
########################################################################
import time
import threading
import Tkinter as tk
import ttk
import Queue
import subprocess
import sys
que = Queue.Queue()
#IO_queue = Queue.Queue()
from Tkinter import Tk, Frame, Text, Scrollbar, Menu
from tkMessageBox import askokcancel
import tkFileDialog
from tkMessageBox import askokcancel
import tkFileDialog
class output_window(Text):
# based on Super Text
global continue_updates
continue_updates = True
global search_position
search_position = '' # start with invalid search position
global error_found
error_found = False # are there any errors?
def __init__(self):
self.root = tk.Tk()
self.frame = tk.Frame(self.root)
self.frame.pack(fill='both', expand=True)
# text widget
#self.text = tk.Text(self.frame, borderwidth=3, relief="sunken")
Text.__init__(self, self.frame, borderwidth=3, relief="sunken")
self.config(tabs=(400,)) # configure Text widget tab stops
self.config(background = 'black', foreground = 'white', font= ("consolas", 12), wrap = 'word', undo = 'True')
# self.config(background = 'black', foreground = 'white', font= ("consolas", 12), wrap = 'none', undo = 'True')
self.config(height = 24, width = 100)
self.config(insertbackground = 'pale green') # keyboard insertion point
self.pack(side='left', fill='both', expand=True)
self.tag_config('normal', foreground = 'white')
self.tag_config('warning', foreground = 'yellow' )
self.tag_config('error', foreground = 'red')
self.tag_config('highlight_green', foreground = 'green')
self.tag_config('highlight_blue', foreground = 'cyan')
self.tag_config('error_highlight_inactive', background = 'dim gray')
self.tag_config('error_highlight_active', background = 'light grey')
self.bind_class("Text","<Control-a>", self.select_all) # required in windows, works in others
self.bind_all("<Control-Shift-E>", self.scroll_errors)
self.bind_class("<Control-Shift-R>", self.rebuild)
# scrollbar
scrb = tk.Scrollbar(self.frame, orient='vertical', command=self.yview)
self.config(yscrollcommand=scrb.set)
scrb.pack(side='right', fill='y')
# self.scrb_Y = tk.Scrollbar(self.frame, orient='vertical', command=self.yview)
# self.scrb_Y.config(yscrollcommand=self.scrb_Y.set)
# self.scrb_Y.pack(side='right', fill='y')
#
# self.scrb_X = tk.Scrollbar(self.frame, orient='horizontal', command=self.xview)
# self.scrb_X.config(xscrollcommand=self.scrb_X.set)
# self.scrb_X.pack(side='bottom', fill='x')
# scrb_X = tk.Scrollbar(self, orient=tk.HORIZONTAL, command=self.xview) # tk.HORIZONTAL now have a horizsontal scroll bar BUT... shrinks it to a postage stamp and hides far right behind the vertical scroll bar
# self.config(xscrollcommand=scrb_X.set)
# scrb_X.pack(side='bottom', fill='x')
#
# scrb= tk.Scrollbar(self, orient='vertical', command=self.yview)
# self.config(yscrollcommand=scrb.set)
# scrb.pack(side='right', fill='y')
# self.config(height = 240, width = 1000) # didn't get the size baCK TO NORMAL
# self.pack(side='left', fill='both', expand=True) # didn't get the size baCK TO NORMAL
# pop-up menu
self.popup = tk.Menu(self, tearoff=0)
self.popup.add_command(label='Copy', command=self._copy)
self.popup.add_command(label='Paste', command=self._paste)
self.popup.add_separator()
self.popup.add_command(label='Cut', command=self._cut)
self.popup.add_separator()
self.popup.add_command(label='Select All', command=self._select_all)
self.popup.add_command(label='Clear All', command=self._clear_all)
self.popup.add_separator()
self.popup.add_command(label='Save As', command=self._file_save_as)
self.popup.add_separator()
# self.popup.add_command(label='Repeat Build(CTL-shift-r)', command=self._rebuild)
self.popup.add_command(label='Repeat Build', command=self._rebuild)
self.popup.add_separator()
self.popup.add_command(label='Scroll Errors (CTL-shift-e)', command=self._scroll_errors)
self.popup.add_separator()
self.popup.add_command(label='Open File at Cursor', command=self._open_selected_file)
if current_OS == 'Darwin': # MAC
self.bind('<Button-2>', self._show_popup) # macOS only
else:
self.bind('<Button-3>', self._show_popup) # Windows & Linux
# threading & subprocess section
def start_thread(self, ):
global continue_updates
# create then start a secondary thread to run an arbitrary function
# must have at least one argument
self.secondary_thread = threading.Thread(target = lambda q, arg1: q.put(run_PIO(arg1)), args=(que, ''))
self.secondary_thread.start()
continue_updates = True
# check the Queue in 50ms
self.root.after(50, self.check_thread)
self.root.after(50, self.update)
def check_thread(self): # wait for user to kill the window
global continue_updates
if continue_updates == True:
self.root.after(10, self.check_thread)
def update(self):
global continue_updates
if continue_updates == True:
self.root.after(10, self.update)#method is called every 50ms
temp_text = ['0','0']
if IO_queue.empty():
if not(self.secondary_thread.is_alive()):
continue_updates = False # queue is exhausted and thread is dead so no need for further updates
else:
try:
temp_text = IO_queue.get(block = False)
except Queue.Empty:
continue_updates = False # queue is exhausted so no need for further updates
else:
self.insert('end', temp_text[0], temp_text[1])
self.see("end") # make the last line visible (scroll text off the top)
# text editing section
def _scroll_errors(self):
global search_position
global error_found
if search_position == '': # first time so highlight all errors
countVar = tk.IntVar()
search_position = '1.0'
search_count = 0
while not(search_position == '') and search_count < 100:
search_position = self.search("error", search_position, stopindex="end", count=countVar, nocase=1)
search_count = search_count + 1
if not(search_position == ''):
error_found = True
end_pos = '{}+{}c'.format(search_position, 5)
self.tag_add("error_highlight_inactive", search_position, end_pos)
search_position = '{}+{}c'.format(search_position, 1) # point to the next character for new search
else:
break
if error_found:
if search_position == '':
search_position = self.search("error", '1.0', stopindex="end", nocase=1) # new search
else: # remove active highlight
end_pos = '{}+{}c'.format(search_position, 5)
start_pos = '{}+{}c'.format(search_position, -1)
self.tag_remove("error_highlight_active", start_pos, end_pos)
search_position = self.search("error", search_position, stopindex="end", nocase=1) # finds first occurrence AGAIN on the first time through
if search_position == "": # wrap around
search_position = self.search("error", '1.0', stopindex="end", nocase=1)
end_pos = '{}+{}c'.format(search_position, 5)
self.tag_add("error_highlight_active", search_position, end_pos) # add active highlight
self.see(search_position)
search_position = '{}+{}c'.format(search_position, 1) # point to the next character for new search
def scroll_errors(self, event):
self._scroll_errors()
def _rebuild(self):
#global board_name
#global Marlin_ver
#global target_env
#board_name, Marlin_ver = get_board_name()
#target_env = get_env(board_name, Marlin_ver)
self.start_thread()
def rebuild(self, event):
print "event happened"
self._rebuild()
def _open_selected_file(self):
current_line = self.index('insert')
line_start = current_line[ : current_line.find('.')] + '.0'
line_end = current_line[ : current_line.find('.')] + '.200'
self.mark_set("path_start", line_start)
self.mark_set("path_end", line_end)
path = self.get("path_start", "path_end")
from_loc = path.find('from ')
colon_loc = path.find(': ')
if 0 <= from_loc and ((colon_loc == -1) or (from_loc < colon_loc)) :
path = path [ from_loc + 5 : ]
if 0 <= colon_loc:
path = path [ : colon_loc ]
if 0 <= path.find('\\') or 0 <= path.find('/'): # make sure it really contains a path
open_file(path)
def _file_save_as(self):
self.filename = tkFileDialog.asksaveasfilename(defaultextension = '.txt')
f = open(self.filename, 'w')
f.write(self.get('1.0', 'end'))
f.close()
def copy(self, event):
try:
selection = self.get(*self.tag_ranges('sel'))
self.clipboard_clear()
self.clipboard_append(selection)
except TypeError:
pass
def cut(self, event):
try:
selection = self.get(*self.tag_ranges('sel'))
self.clipboard_clear()
self.clipboard_append(selection)
self.delete(*self.tag_ranges('sel'))
except TypeError:
pass
def _show_popup(self, event):
'''right-click popup menu'''
if self.root.focus_get() != self:
self.root.focus_set()
try:
self.popup.tk_popup(event.x_root, event.y_root, 0)
finally:
self.popup.grab_release()
def _cut(self):
try:
selection = self.get(*self.tag_ranges('sel'))
self.clipboard_clear()
self.clipboard_append(selection)
self.delete(*self.tag_ranges('sel'))
except TypeError:
pass
def cut(self, event):
self._cut()
def _copy(self):
try:
selection = self.get(*self.tag_ranges('sel'))
self.clipboard_clear()
self.clipboard_append(selection)
except TypeError:
pass
def copy(self, event):
self._copy()
def _paste(self):
self.insert('insert', self.selection_get(selection='CLIPBOARD'))
def _select_all(self):
self.tag_add('sel', '1.0', 'end')
def select_all(self, event):
self.tag_add('sel', '1.0', 'end')
def _clear_all(self):
'''erases all text'''
isok = askokcancel('Clear All', 'Erase all text?', frame=self,
default='ok')
if isok:
self.delete('1.0', 'end')
# end - output_window
def main():
##########################################################################
# #
# main program #
# #
##########################################################################
global build_type
global target_env
global board_name
board_name, Marlin_ver = get_board_name()
target_env = get_env(board_name, Marlin_ver)
os.environ["BUILD_TYPE"] = build_type # let sub processes know what is happening
os.environ["TARGET_ENV"] = target_env
os.environ["BOARD_NAME"] = board_name
auto_build = output_window()
auto_build.start_thread() # executes the "run_PIO" function
auto_build.root.mainloop()
if __name__ == '__main__':
main()
| gpl-2.0 |
luoshao23/ML_algorithm | Clustering/Pred_KNN.py | 1 | 4903 | from random import random, randint
import math
import numpy as np
import plotly.plotly as py
import plotly.graph_objs as go
weightdomain = [(0, 20)] * 4
def wineprice(rating, age):
peak_age = rating - 50
price = float(rating) / 2
if age > peak_age:
price = price * (5 - (age - peak_age))
else:
price = price * (5 * float(age + 1) / peak_age)
if price < 0:
price = 0.0
return price
def wineset1():
rows = []
for i in xrange(300):
rating = random() * 50 + 50
age = random() * 50
price = wineprice(rating, age)
price *= (random() * 0.2 + 0.9)
rows.append((rating, age, price))
rows = np.array(rows)
return rows
def wineset2():
rows = []
for i in xrange(300):
rating = random() * 50 + 50
age = random() * 50
aisle = float(randint(1, 20))
bottlesize = [375.0, 750.0, 1500.0, 3000.0][randint(0, 3)]
price = wineprice(rating, age)
price *= (bottlesize / 750)
price *= (random() * 0.2 + 0.9)
rows.append((rating, age, aisle, bottlesize, price))
rows = np.array(rows)
return rows
def wineset3():
rows = wineset1()
for row in rows:
if random() < 0.5:
row[-1] *= 0.5
return rows
def euclidean(v1, v2):
d = 0.0
for i in xrange(len(v1)):
d += (v1[i] - v2[i])**2
return math.sqrt(d)
def getdistances(data, vec1):
distancelist = []
for i in xrange(len(data)):
vec2 = data[i][:-1]
distancelist.append((euclidean(vec1, vec2), i))
distancelist.sort()
return distancelist
def knnestimate(data, vec1, k=5):
dlist = getdistances(data, vec1)
avg = 0.0
for i in xrange(k):
idx = dlist[i][1]
avg += data[idx][-1]
avg = avg / k
return avg
def inverseweight(dist, num=1.0, const=0.1):
return num / (dist + const)
def subtractweight(dist, const=1.0):
if dist > const:
return 0
else:
return const - dist
def gaussian(dist, sigma=5.0):
return math.exp(-dist**2 / (2 * sigma**2))
def weightedknn(data, vec1, k=5, weightf=gaussian):
dlist = getdistances(data, vec1)
avg = 0.0
totalweight = 0.0
for i in xrange(k):
dist = dlist[i][0]
idx = dlist[i][1]
weight = weightf(dist)
avg += weight * data[idx][-1]
totalweight += weight
if totalweight == 0:
return 0
avg = avg / totalweight
return avg
def dividedata(data, test=0.05):
trainset = []
testset = []
for row in data:
if random() < test:
testset.append(row)
else:
trainset.append(row)
return trainset, testset
def testalgorithm(algf, trainset, testset):
error = 0.0
for row in testset:
guess = algf(trainset, row[:-1])
error += (row[-1] - guess)**2
return error / len(testset)
def crossvalidate(algf, data, trials=100, test=0.05):
error = 0.0
for i in xrange(trials):
trainset, testset = dividedata(data, test)
error += testalgorithm(algf, trainset, testset)
return error / trials
def rescale(data, scale=None):
if scale is not None and len(scale) == data.shape[1] - 1:
scaleddata = data * (scale + [1])
else:
scaleddata = data / (np.mean(data, 0) + 0.0001)
scaleddata[:, -1] = data[:, -1]
return scaleddata
def createcostfunction(algf, data):
def costf(scale):
sdata = rescale(data, scale)
return crossvalidate(algf, sdata, trials=20)
return costf
def probguess(data, vec1, low, high, k=5, weightf=gaussian):
dlist = getdistances(data, vec1)
nweight = 0.0
tweight = 0.0
for i in xrange(k):
dist = dlist[i][0]
idx = dlist[i][1]
weight = weightf(dist)
v = data[idx][-1]
if v>=low and v<=high:
nweight += weight
tweight += weight
if tweight == 0:
return 0
return nweight/tweight
def cumulativegraph(data,vec1,high,k=5,weightf=gaussian):
t1 = np.arange(0.0, high, 0.1)
cprob = np.array([probguess(data, vec1, 0, v, k, weightf) for v in t1])
data = go.Scatter(x=t1, y=cprob)
fig = go.Figure(data=[data])
py.plot(fig, filename='wineguess')
def probabilitygraph(data, vec1, high, k=5, weightf=gaussian, ss=5.0):
t1 = np.arange(0.0, high, 0.1)
probs = np.array([probguess(data, vec1, v, v+0.1, k, weightf) for v in t1])
smoothed = []
for i in xrange(len(probs)):
sv = 0.0
for j in xrange(len(probs)):
dist = abs(i-j)*0.1
weight = gaussian(dist, sigma=ss)
sv += weight*probs[j]
smoothed.append(sv)
smoothed = np.array(smoothed)
data = go.Scatter(x=t1, y=smoothed)
fig = go.Figure(data=[data])
py.plot(fig, filename='wineguess_smoothed')
data = wineset1()
| mit |
snnn/tensorflow | tensorflow/python/ops/bitwise_ops_test.py | 25 | 7698 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for bitwise operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import six
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import bitwise_ops
from tensorflow.python.ops import gen_bitwise_ops
from tensorflow.python.platform import googletest
class BitwiseOpTest(test_util.TensorFlowTestCase):
def __init__(self, method_name="runTest"):
super(BitwiseOpTest, self).__init__(method_name)
def testBinaryOps(self):
dtype_list = [dtypes.int8, dtypes.int16, dtypes.int32, dtypes.int64,
dtypes.uint8, dtypes.uint16, dtypes.uint32, dtypes.uint64]
with self.test_session(use_gpu=True) as sess:
for dtype in dtype_list:
lhs = constant_op.constant([0, 5, 3, 14], dtype=dtype)
rhs = constant_op.constant([5, 0, 7, 11], dtype=dtype)
and_result, or_result, xor_result = sess.run(
[bitwise_ops.bitwise_and(lhs, rhs),
bitwise_ops.bitwise_or(lhs, rhs),
bitwise_ops.bitwise_xor(lhs, rhs)])
self.assertAllEqual(and_result, [0, 0, 3, 10])
self.assertAllEqual(or_result, [5, 5, 7, 15])
self.assertAllEqual(xor_result, [5, 5, 4, 5])
def testPopulationCountOp(self):
dtype_list = [dtypes.int8, dtypes.int16,
dtypes.int32, dtypes.int64,
dtypes.uint8, dtypes.uint16]
raw_inputs = [0, 1, -1, 3, -3, 5, -5, 14, -14,
127, 128, 255, 256, 65535, 65536,
2**31 - 1, 2**31, 2**32 - 1, 2**32, -2**32 + 1, -2**32,
-2**63 + 1, 2**63 - 1]
def count_bits(x):
return sum([bin(z).count("1") for z in six.iterbytes(x.tobytes())])
for dtype in dtype_list:
with self.test_session(use_gpu=True) as sess:
print("PopulationCount test: ", dtype)
inputs = np.array(raw_inputs, dtype=dtype.as_numpy_dtype)
truth = [count_bits(x) for x in inputs]
input_tensor = constant_op.constant(inputs, dtype=dtype)
popcnt_result = sess.run(gen_bitwise_ops.population_count(input_tensor))
self.assertAllEqual(truth, popcnt_result)
def testInvertOp(self):
dtype_list = [dtypes.int8, dtypes.int16, dtypes.int32, dtypes.int64,
dtypes.uint8, dtypes.uint16, dtypes.uint32, dtypes.uint64]
inputs = [0, 5, 3, 14]
with self.test_session(use_gpu=True) as sess:
for dtype in dtype_list:
# Because of issues with negative numbers, let's test this indirectly.
# 1. invert(a) and a = 0
# 2. invert(a) or a = invert(0)
input_tensor = constant_op.constant(inputs, dtype=dtype)
not_a_and_a, not_a_or_a, not_0 = sess.run(
[bitwise_ops.bitwise_and(
input_tensor, bitwise_ops.invert(input_tensor)),
bitwise_ops.bitwise_or(
input_tensor, bitwise_ops.invert(input_tensor)),
bitwise_ops.invert(constant_op.constant(0, dtype=dtype))])
self.assertAllEqual(not_a_and_a, [0, 0, 0, 0])
self.assertAllEqual(not_a_or_a, [not_0] * 4)
# For unsigned dtypes let's also check the result directly.
if dtype.is_unsigned:
inverted = sess.run(bitwise_ops.invert(input_tensor))
expected = [dtype.max - x for x in inputs]
self.assertAllEqual(inverted, expected)
def testShiftsWithPositiveLHS(self):
dtype_list = [np.int8, np.int16, np.int32, np.int64,
np.uint8, np.uint16, np.uint32, np.uint64]
with self.test_session(use_gpu=True) as sess:
for dtype in dtype_list:
lhs = np.array([0, 5, 3, 14], dtype=dtype)
rhs = np.array([5, 0, 7, 3], dtype=dtype)
left_shift_result, right_shift_result = sess.run(
[bitwise_ops.left_shift(lhs, rhs),
bitwise_ops.right_shift(lhs, rhs)])
self.assertAllEqual(left_shift_result, np.left_shift(lhs, rhs))
self.assertAllEqual(right_shift_result, np.right_shift(lhs, rhs))
def testShiftsWithNegativeLHS(self):
dtype_list = [np.int8, np.int16, np.int32, np.int64]
with self.test_session(use_gpu=True) as sess:
for dtype in dtype_list:
lhs = np.array([-1, -5, -3, -14], dtype=dtype)
rhs = np.array([5, 0, 7, 11], dtype=dtype)
left_shift_result, right_shift_result = sess.run(
[bitwise_ops.left_shift(lhs, rhs),
bitwise_ops.right_shift(lhs, rhs)])
self.assertAllEqual(left_shift_result, np.left_shift(lhs, rhs))
self.assertAllEqual(right_shift_result, np.right_shift(lhs, rhs))
def testImplementationDefinedShiftsDoNotCrash(self):
dtype_list = [np.int8, np.int16, np.int32, np.int64]
with self.test_session(use_gpu=True) as sess:
for dtype in dtype_list:
lhs = np.array([-1, -5, -3, -14], dtype=dtype)
rhs = np.array([-2, 64, 101, 32], dtype=dtype)
# We intentionally do not test for specific values here since the exact
# outputs are implementation-defined. However, we should not crash or
# trigger an undefined-behavior error from tools such as
# AddressSanitizer.
sess.run([bitwise_ops.left_shift(lhs, rhs),
bitwise_ops.right_shift(lhs, rhs)])
def testShapeInference(self):
dtype_list = [dtypes.int8, dtypes.int16, dtypes.int32, dtypes.int64,
dtypes.uint8, dtypes.uint16]
with self.test_session(use_gpu=True) as sess:
for dtype in dtype_list:
lhs = constant_op.constant([[0], [3], [5]], dtype=dtype)
rhs = constant_op.constant([[1, 2, 4]], dtype=dtype)
and_tensor = bitwise_ops.bitwise_and(lhs, rhs)
or_tensor = bitwise_ops.bitwise_or(lhs, rhs)
xor_tensor = bitwise_ops.bitwise_xor(lhs, rhs)
ls_tensor = bitwise_ops.left_shift(lhs, rhs)
rs_tensor = bitwise_ops.right_shift(lhs, rhs)
and_result, or_result, xor_result, ls_result, rs_result = sess.run(
[and_tensor, or_tensor, xor_tensor, ls_tensor, rs_tensor])
# Compare shape inference with result
self.assertAllEqual(and_tensor.get_shape().as_list(), and_result.shape)
self.assertAllEqual(and_tensor.get_shape().as_list(), [3, 3])
self.assertAllEqual(or_tensor.get_shape().as_list(), or_result.shape)
self.assertAllEqual(or_tensor.get_shape().as_list(), [3, 3])
self.assertAllEqual(xor_tensor.get_shape().as_list(), xor_result.shape)
self.assertAllEqual(xor_tensor.get_shape().as_list(), [3, 3])
self.assertAllEqual(ls_tensor.get_shape().as_list(), ls_result.shape)
self.assertAllEqual(ls_tensor.get_shape().as_list(), [3, 3])
self.assertAllEqual(rs_tensor.get_shape().as_list(), rs_result.shape)
self.assertAllEqual(rs_tensor.get_shape().as_list(), [3, 3])
if __name__ == "__main__":
googletest.main()
| apache-2.0 |
gaohr/SEIMS | preprocess/reclass_landuse.py | 2 | 2171 | #! /usr/bin/env python
# coding=utf-8
# @Redistribute landuse parameters
# @Author: Junzhi Liu
# @Revised: Liang-Jun Zhu
#
import sqlite3
from config import *
from util import *
def ReclassLanduse(landuseFile, dbname, dstdir):
# code:{property_name:value}
# for example:
# 1:{"clay":0.12, "sand":0.1}
property_map = {}
str_sql = 'select landuse_id, ' + \
','.join(LANDUSE_ATTR_LIST) + ' from LanduseLookup'
property_namelist = LANDUSE_ATTR_LIST
num_propeties = len(property_namelist)
for i in xrange(num_propeties):
property_map[property_namelist[i]] = {}
conn = sqlite3.connect(dbname)
cursor = conn.cursor()
cursor.execute(str_sql)
for row in cursor:
# print row
id = int(row[0])
for i in xrange(num_propeties):
pName = property_namelist[i]
dic = property_map[pName]
if pName != "USLE_P":
dic[id] = row[i + 1]
else:
dic[id] = 1
ds = gdal.Open(landuseFile)
band = ds.GetRasterBand(1)
data = band.ReadAsArray()
xsize = band.XSize
ysize = band.YSize
noDataValue = band.GetNoDataValue()
# print noDataValue
geotransform = ds.GetGeoTransform()
srs = osr.SpatialReference()
srs.ImportFromWkt(ds.GetProjection())
n = xsize * ysize
data.shape = n
attrList = []
for iprop in xrange(num_propeties):
pname = property_namelist[iprop]
filename = dstdir + os.sep + pname + ".tif"
attrList.append(filename)
data_prop = numpy.zeros(n)
dic = property_map[pname]
for i in xrange(n):
id = int(data[i])
data_prop[i] = dic[id] if id > 0 else noDataValue
data_prop.shape = (ysize, xsize)
WriteGTiffFile(filename, ysize, xsize, data_prop,
geotransform, srs, noDataValue, gdal.GDT_Float32)
print 'The landuse parameters are generated!'
return attrList
if __name__ == '__main__':
LoadConfiguration(GetINIfile())
ReclassLanduse(WORKING_DIR + os.sep + landuseMFile,
TXT_DB_DIR + os.sep + sqliteFile, WORKING_DIR)
| gpl-2.0 |
ryanahall/django | django/db/models/options.py | 6 | 35477 | from __future__ import unicode_literals
import warnings
from bisect import bisect
from collections import OrderedDict, defaultdict
from itertools import chain
from django.apps import apps
from django.conf import settings
from django.core.exceptions import FieldDoesNotExist
from django.db.models.fields import AutoField
from django.db.models.fields.proxy import OrderWrt
from django.db.models.fields.related import ManyToManyField
from django.utils import six
from django.utils.datastructures import ImmutableList, OrderedSet
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.encoding import (
force_text, python_2_unicode_compatible, smart_text,
)
from django.utils.functional import cached_property
from django.utils.lru_cache import lru_cache
from django.utils.text import camel_case_to_spaces
from django.utils.translation import override, string_concat
PROXY_PARENTS = object()
EMPTY_RELATION_TREE = tuple()
IMMUTABLE_WARNING = (
"The return type of '%s' should never be mutated. If you want to manipulate this list "
"for your own use, make a copy first."
)
DEFAULT_NAMES = ('verbose_name', 'verbose_name_plural', 'db_table', 'ordering',
'unique_together', 'permissions', 'get_latest_by',
'order_with_respect_to', 'app_label', 'db_tablespace',
'abstract', 'managed', 'proxy', 'swappable', 'auto_created',
'index_together', 'apps', 'default_permissions',
'select_on_save', 'default_related_name')
class raise_deprecation(object):
def __init__(self, suggested_alternative):
self.suggested_alternative = suggested_alternative
def __call__(self, fn):
def wrapper(*args, **kwargs):
warnings.warn(
"'%s is an unofficial API that has been deprecated. "
"You may be able to replace it with '%s'" % (
fn.__name__,
self.suggested_alternative,
),
RemovedInDjango20Warning, stacklevel=2
)
return fn(*args, **kwargs)
return wrapper
def normalize_together(option_together):
"""
option_together can be either a tuple of tuples, or a single
tuple of two strings. Normalize it to a tuple of tuples, so that
calling code can uniformly expect that.
"""
try:
if not option_together:
return ()
if not isinstance(option_together, (tuple, list)):
raise TypeError
first_element = next(iter(option_together))
if not isinstance(first_element, (tuple, list)):
option_together = (option_together,)
# Normalize everything to tuples
return tuple(tuple(ot) for ot in option_together)
except TypeError:
# If the value of option_together isn't valid, return it
# verbatim; this will be picked up by the check framework later.
return option_together
def make_immutable_fields_list(name, data):
return ImmutableList(data, warning=IMMUTABLE_WARNING % name)
@python_2_unicode_compatible
class Options(object):
FORWARD_PROPERTIES = ('fields', 'many_to_many', 'concrete_fields',
'local_concrete_fields', '_forward_fields_map')
REVERSE_PROPERTIES = ('related_objects', 'fields_map', '_relation_tree')
def __init__(self, meta, app_label=None):
self._get_fields_cache = {}
self.proxied_children = []
self.local_fields = []
self.local_many_to_many = []
self.virtual_fields = []
self.model_name = None
self.verbose_name = None
self.verbose_name_plural = None
self.db_table = ''
self.ordering = []
self._ordering_clash = False
self.unique_together = []
self.index_together = []
self.select_on_save = False
self.default_permissions = ('add', 'change', 'delete')
self.permissions = []
self.object_name = None
self.app_label = app_label
self.get_latest_by = None
self.order_with_respect_to = None
self.db_tablespace = settings.DEFAULT_TABLESPACE
self.meta = meta
self.pk = None
self.has_auto_field = False
self.auto_field = None
self.abstract = False
self.managed = True
self.proxy = False
# For any class that is a proxy (including automatically created
# classes for deferred object loading), proxy_for_model tells us
# which class this model is proxying. Note that proxy_for_model
# can create a chain of proxy models. For non-proxy models, the
# variable is always None.
self.proxy_for_model = None
# For any non-abstract class, the concrete class is the model
# in the end of the proxy_for_model chain. In particular, for
# concrete models, the concrete_model is always the class itself.
self.concrete_model = None
self.swappable = None
self.parents = OrderedDict()
self.auto_created = False
# To handle various inheritance situations, we need to track where
# managers came from (concrete or abstract base classes). `managers`
# keeps a list of 3-tuples of the form:
# (creation_counter, instance, abstract(=True))
self.managers = []
# List of all lookups defined in ForeignKey 'limit_choices_to' options
# from *other* models. Needed for some admin checks. Internal use only.
self.related_fkey_lookups = []
# A custom app registry to use, if you're making a separate model set.
self.apps = apps
self.default_related_name = None
@lru_cache(maxsize=None)
def _map_model(self, link):
# This helper function is used to allow backwards compatibility with
# the previous API. No future methods should use this function.
# It maps a field to (field, model or related_model,) depending on the
# field type.
model = link.model._meta.concrete_model
if model is self.model:
model = None
return link, model
@lru_cache(maxsize=None)
def _map_model_details(self, link):
# This helper function is used to allow backwards compatibility with
# the previous API. No future methods should use this function.
# This function maps a field to a tuple of:
# (field, model or related_model, direct, is_m2m) depending on the
# field type.
direct = not link.auto_created or link.concrete
model = link.model._meta.concrete_model
if model is self.model:
model = None
m2m = link.is_relation and link.many_to_many
return link, model, direct, m2m
@property
def app_config(self):
# Don't go through get_app_config to avoid triggering imports.
return self.apps.app_configs.get(self.app_label)
@property
def installed(self):
return self.app_config is not None
@property
def abstract_managers(self):
return [
(counter, instance.name, instance) for counter, instance, abstract
in self.managers if abstract
]
@property
def concrete_managers(self):
return [
(counter, instance.name, instance) for counter, instance, abstract
in self.managers if not abstract
]
def contribute_to_class(self, cls, name):
from django.db import connection
from django.db.backends.utils import truncate_name
cls._meta = self
self.model = cls
# First, construct the default values for these options.
self.object_name = cls.__name__
self.model_name = self.object_name.lower()
self.verbose_name = camel_case_to_spaces(self.object_name)
# Store the original user-defined values for each option,
# for use when serializing the model definition
self.original_attrs = {}
# Next, apply any overridden values from 'class Meta'.
if self.meta:
meta_attrs = self.meta.__dict__.copy()
for name in self.meta.__dict__:
# Ignore any private attributes that Django doesn't care about.
# NOTE: We can't modify a dictionary's contents while looping
# over it, so we loop over the *original* dictionary instead.
if name.startswith('_'):
del meta_attrs[name]
for attr_name in DEFAULT_NAMES:
if attr_name in meta_attrs:
setattr(self, attr_name, meta_attrs.pop(attr_name))
self.original_attrs[attr_name] = getattr(self, attr_name)
elif hasattr(self.meta, attr_name):
setattr(self, attr_name, getattr(self.meta, attr_name))
self.original_attrs[attr_name] = getattr(self, attr_name)
self.unique_together = normalize_together(self.unique_together)
self.index_together = normalize_together(self.index_together)
# verbose_name_plural is a special case because it uses a 's'
# by default.
if self.verbose_name_plural is None:
self.verbose_name_plural = string_concat(self.verbose_name, 's')
# order_with_respect_and ordering are mutually exclusive.
self._ordering_clash = bool(self.ordering and self.order_with_respect_to)
# Any leftover attributes must be invalid.
if meta_attrs != {}:
raise TypeError("'class Meta' got invalid attribute(s): %s" % ','.join(meta_attrs.keys()))
else:
self.verbose_name_plural = string_concat(self.verbose_name, 's')
del self.meta
# If the db_table wasn't provided, use the app_label + model_name.
if not self.db_table:
self.db_table = "%s_%s" % (self.app_label, self.model_name)
self.db_table = truncate_name(self.db_table, connection.ops.max_name_length())
def _prepare(self, model):
if self.order_with_respect_to:
# The app registry will not be ready at this point, so we cannot
# use get_field().
query = self.order_with_respect_to
try:
self.order_with_respect_to = next(
f for f in self._get_fields(reverse=False)
if f.name == query or f.attname == query
)
except StopIteration:
raise FieldDoesNotExist('%s has no field named %r' % (self.object_name, query))
self.ordering = ('_order',)
if not any(isinstance(field, OrderWrt) for field in model._meta.local_fields):
model.add_to_class('_order', OrderWrt())
else:
self.order_with_respect_to = None
if self.pk is None:
if self.parents:
# Promote the first parent link in lieu of adding yet another
# field.
field = next(six.itervalues(self.parents))
# Look for a local field with the same name as the
# first parent link. If a local field has already been
# created, use it instead of promoting the parent
already_created = [fld for fld in self.local_fields if fld.name == field.name]
if already_created:
field = already_created[0]
field.primary_key = True
self.setup_pk(field)
else:
auto = AutoField(verbose_name='ID', primary_key=True,
auto_created=True)
model.add_to_class('id', auto)
def add_field(self, field, virtual=False):
# Insert the given field in the order in which it was created, using
# the "creation_counter" attribute of the field.
# Move many-to-many related fields from self.fields into
# self.many_to_many.
if virtual:
self.virtual_fields.append(field)
elif field.is_relation and field.many_to_many:
self.local_many_to_many.insert(bisect(self.local_many_to_many, field), field)
else:
self.local_fields.insert(bisect(self.local_fields, field), field)
self.setup_pk(field)
# If the field being added is a relation to another known field,
# expire the cache on this field and the forward cache on the field
# being referenced, because there will be new relationships in the
# cache. Otherwise, expire the cache of references *to* this field.
# The mechanism for getting at the related model is slightly odd -
# ideally, we'd just ask for field.related_model. However, related_model
# is a cached property, and all the models haven't been loaded yet, so
# we need to make sure we don't cache a string reference.
if field.is_relation and hasattr(field.remote_field, 'model') and field.remote_field.model:
try:
field.remote_field.model._meta._expire_cache(forward=False)
except AttributeError:
pass
self._expire_cache()
else:
self._expire_cache(reverse=False)
def setup_pk(self, field):
if not self.pk and field.primary_key:
self.pk = field
field.serialize = False
def setup_proxy(self, target):
"""
Does the internal setup so that the current model is a proxy for
"target".
"""
self.pk = target._meta.pk
self.proxy_for_model = target
self.db_table = target._meta.db_table
def __repr__(self):
return '<Options for %s>' % self.object_name
def __str__(self):
return "%s.%s" % (smart_text(self.app_label), smart_text(self.model_name))
@property
def verbose_name_raw(self):
"""
There are a few places where the untranslated verbose name is needed
(so that we get the same value regardless of currently active
locale).
"""
with override(None):
return force_text(self.verbose_name)
@property
def swapped(self):
"""
Has this model been swapped out for another? If so, return the model
name of the replacement; otherwise, return None.
For historical reasons, model name lookups using get_model() are
case insensitive, so we make sure we are case insensitive here.
"""
if self.swappable:
model_label = '%s.%s' % (self.app_label, self.model_name)
swapped_for = getattr(settings, self.swappable, None)
if swapped_for:
try:
swapped_label, swapped_object = swapped_for.split('.')
except ValueError:
# setting not in the format app_label.model_name
# raising ImproperlyConfigured here causes problems with
# test cleanup code - instead it is raised in get_user_model
# or as part of validation.
return swapped_for
if '%s.%s' % (swapped_label, swapped_object.lower()) not in (None, model_label):
return swapped_for
return None
@cached_property
def fields(self):
"""
Returns a list of all forward fields on the model and its parents,
excluding ManyToManyFields.
Private API intended only to be used by Django itself; get_fields()
combined with filtering of field properties is the public API for
obtaining this field list.
"""
# For legacy reasons, the fields property should only contain forward
# fields that are not virtual or with a m2m cardinality. Therefore we
# pass these three filters as filters to the generator.
# The third lambda is a longwinded way of checking f.related_model - we don't
# use that property directly because related_model is a cached property,
# and all the models may not have been loaded yet; we don't want to cache
# the string reference to the related_model.
is_not_an_m2m_field = lambda f: not (f.is_relation and f.many_to_many)
is_not_a_generic_relation = lambda f: not (f.is_relation and f.one_to_many)
is_not_a_generic_foreign_key = lambda f: not (
f.is_relation and f.many_to_one and not (hasattr(f.remote_field, 'model') and f.remote_field.model)
)
return make_immutable_fields_list(
"fields",
(f for f in self._get_fields(reverse=False) if
is_not_an_m2m_field(f) and is_not_a_generic_relation(f)
and is_not_a_generic_foreign_key(f))
)
@cached_property
def concrete_fields(self):
"""
Returns a list of all concrete fields on the model and its parents.
Private API intended only to be used by Django itself; get_fields()
combined with filtering of field properties is the public API for
obtaining this field list.
"""
return make_immutable_fields_list(
"concrete_fields", (f for f in self.fields if f.concrete)
)
@cached_property
def local_concrete_fields(self):
"""
Returns a list of all concrete fields on the model.
Private API intended only to be used by Django itself; get_fields()
combined with filtering of field properties is the public API for
obtaining this field list.
"""
return make_immutable_fields_list(
"local_concrete_fields", (f for f in self.local_fields if f.concrete)
)
@raise_deprecation(suggested_alternative="get_fields()")
def get_fields_with_model(self):
return [self._map_model(f) for f in self.get_fields()]
@raise_deprecation(suggested_alternative="get_fields()")
def get_concrete_fields_with_model(self):
return [self._map_model(f) for f in self.concrete_fields]
@cached_property
def many_to_many(self):
"""
Returns a list of all many to many fields on the model and its parents.
Private API intended only to be used by Django itself; get_fields()
combined with filtering of field properties is the public API for
obtaining this list.
"""
return make_immutable_fields_list(
"many_to_many",
(f for f in self._get_fields(reverse=False)
if f.is_relation and f.many_to_many)
)
@cached_property
def related_objects(self):
"""
Returns all related objects pointing to the current model. The related
objects can come from a one-to-one, one-to-many, or many-to-many field
relation type.
Private API intended only to be used by Django itself; get_fields()
combined with filtering of field properties is the public API for
obtaining this field list.
"""
all_related_fields = self._get_fields(forward=False, reverse=True, include_hidden=True)
return make_immutable_fields_list(
"related_objects",
(obj for obj in all_related_fields
if not obj.hidden or obj.field.many_to_many)
)
@raise_deprecation(suggested_alternative="get_fields()")
def get_m2m_with_model(self):
return [self._map_model(f) for f in self.many_to_many]
@cached_property
def _forward_fields_map(self):
res = {}
fields = self._get_fields(reverse=False)
for field in fields:
res[field.name] = field
# Due to the way Django's internals work, get_field() should also
# be able to fetch a field by attname. In the case of a concrete
# field with relation, includes the *_id name too
try:
res[field.attname] = field
except AttributeError:
pass
return res
@cached_property
def fields_map(self):
res = {}
fields = self._get_fields(forward=False, include_hidden=True)
for field in fields:
res[field.name] = field
# Due to the way Django's internals work, get_field() should also
# be able to fetch a field by attname. In the case of a concrete
# field with relation, includes the *_id name too
try:
res[field.attname] = field
except AttributeError:
pass
return res
def get_field(self, field_name, many_to_many=None):
"""
Returns a field instance given a field name. The field can be either a
forward or reverse field, unless many_to_many is specified; if it is,
only forward fields will be returned.
The many_to_many argument exists for backwards compatibility reasons;
it has been deprecated and will be removed in Django 2.0.
"""
m2m_in_kwargs = many_to_many is not None
if m2m_in_kwargs:
# Always throw a warning if many_to_many is used regardless of
# whether it alters the return type or not.
warnings.warn(
"The 'many_to_many' argument on get_field() is deprecated; "
"use a filter on field.many_to_many instead.",
RemovedInDjango20Warning
)
try:
# In order to avoid premature loading of the relation tree
# (expensive) we prefer checking if the field is a forward field.
field = self._forward_fields_map[field_name]
if many_to_many is False and field.many_to_many:
raise FieldDoesNotExist(
'%s has no field named %r' % (self.object_name, field_name)
)
return field
except KeyError:
# If the app registry is not ready, reverse fields are
# unavailable, therefore we throw a FieldDoesNotExist exception.
if not self.apps.models_ready:
raise FieldDoesNotExist(
"%s has no field named %r. The app cache isn't ready yet, "
"so if this is an auto-created related field, it won't "
"be available yet." % (self.object_name, field_name)
)
try:
if m2m_in_kwargs:
# Previous API does not allow searching reverse fields.
raise FieldDoesNotExist('%s has no field named %r' % (self.object_name, field_name))
# Retrieve field instance by name from cached or just-computed
# field map.
return self.fields_map[field_name]
except KeyError:
raise FieldDoesNotExist('%s has no field named %r' % (self.object_name, field_name))
@raise_deprecation(suggested_alternative="get_field()")
def get_field_by_name(self, name):
return self._map_model_details(self.get_field(name))
@raise_deprecation(suggested_alternative="get_fields()")
def get_all_field_names(self):
names = set()
fields = self.get_fields()
for field in fields:
# For backwards compatibility GenericForeignKey should not be
# included in the results.
if field.is_relation and field.many_to_one and field.related_model is None:
continue
# Relations to child proxy models should not be included.
if (field.model != self.model and
field.model._meta.concrete_model == self.concrete_model):
continue
names.add(field.name)
if hasattr(field, 'attname'):
names.add(field.attname)
return list(names)
@raise_deprecation(suggested_alternative="get_fields()")
def get_all_related_objects(self, local_only=False, include_hidden=False,
include_proxy_eq=False):
include_parents = True if local_only is False else PROXY_PARENTS
fields = self._get_fields(
forward=False, reverse=True,
include_parents=include_parents,
include_hidden=include_hidden,
)
fields = (obj for obj in fields if not isinstance(obj.field, ManyToManyField))
if include_proxy_eq:
children = chain.from_iterable(c._relation_tree
for c in self.concrete_model._meta.proxied_children
if c is not self)
relations = (f.remote_field for f in children
if include_hidden or not f.remote_field.field.remote_field.is_hidden())
fields = chain(fields, relations)
return list(fields)
@raise_deprecation(suggested_alternative="get_fields()")
def get_all_related_objects_with_model(self, local_only=False, include_hidden=False,
include_proxy_eq=False):
return [
self._map_model(f) for f in self.get_all_related_objects(
local_only=local_only,
include_hidden=include_hidden,
include_proxy_eq=include_proxy_eq,
)
]
@raise_deprecation(suggested_alternative="get_fields()")
def get_all_related_many_to_many_objects(self, local_only=False):
include_parents = True if local_only is not True else PROXY_PARENTS
fields = self._get_fields(
forward=False, reverse=True,
include_parents=include_parents, include_hidden=True
)
return [obj for obj in fields if isinstance(obj.field, ManyToManyField)]
@raise_deprecation(suggested_alternative="get_fields()")
def get_all_related_m2m_objects_with_model(self):
fields = self._get_fields(forward=False, reverse=True, include_hidden=True)
return [self._map_model(obj) for obj in fields if isinstance(obj.field, ManyToManyField)]
def get_base_chain(self, model):
"""
Returns a list of parent classes leading to 'model' (order from closet
to most distant ancestor). This has to handle the case were 'model' is
a grandparent or even more distant relation.
"""
if not self.parents:
return None
if model in self.parents:
return [model]
for parent in self.parents:
res = parent._meta.get_base_chain(model)
if res:
res.insert(0, parent)
return res
return None
def get_parent_list(self):
"""
Returns all the ancestors of this model as a list ordered by MRO.
Useful for determining if something is an ancestor, regardless of lineage.
"""
result = OrderedSet(self.parents)
for parent in self.parents:
for ancestor in parent._meta.get_parent_list():
result.add(ancestor)
return list(result)
def get_ancestor_link(self, ancestor):
"""
Returns the field on the current model which points to the given
"ancestor". This is possible an indirect link (a pointer to a parent
model, which points, eventually, to the ancestor). Used when
constructing table joins for model inheritance.
Returns None if the model isn't an ancestor of this one.
"""
if ancestor in self.parents:
return self.parents[ancestor]
for parent in self.parents:
# Tries to get a link field from the immediate parent
parent_link = parent._meta.get_ancestor_link(ancestor)
if parent_link:
# In case of a proxied model, the first link
# of the chain to the ancestor is that parent
# links
return self.parents[parent] or parent_link
def _populate_directed_relation_graph(self):
"""
This method is used by each model to find its reverse objects. As this
method is very expensive and is accessed frequently (it looks up every
field in a model, in every app), it is computed on first access and then
is set as a property on every model.
"""
related_objects_graph = defaultdict(list)
all_models = self.apps.get_models(include_auto_created=True)
for model in all_models:
# Abstract model's fields are copied to child models, hence we will
# see the fields from the child models.
if model._meta.abstract:
continue
fields_with_relations = (
f for f in model._meta._get_fields(reverse=False, include_parents=False)
if f.is_relation and f.related_model is not None
)
for f in fields_with_relations:
if not isinstance(f.remote_field.model, six.string_types):
related_objects_graph[f.remote_field.model._meta].append(f)
for model in all_models:
# Set the relation_tree using the internal __dict__. In this way
# we avoid calling the cached property. In attribute lookup,
# __dict__ takes precedence over a data descriptor (such as
# @cached_property). This means that the _meta._relation_tree is
# only called if related_objects is not in __dict__.
related_objects = related_objects_graph[model._meta]
model._meta.__dict__['_relation_tree'] = related_objects
# It seems it is possible that self is not in all_models, so guard
# against that with default for get().
return self.__dict__.get('_relation_tree', EMPTY_RELATION_TREE)
@cached_property
def _relation_tree(self):
return self._populate_directed_relation_graph()
def _expire_cache(self, forward=True, reverse=True):
# This method is usually called by apps.cache_clear(), when the
# registry is finalized, or when a new field is added.
properties_to_expire = []
if forward:
properties_to_expire.extend(self.FORWARD_PROPERTIES)
if reverse and not self.abstract:
properties_to_expire.extend(self.REVERSE_PROPERTIES)
for cache_key in properties_to_expire:
try:
delattr(self, cache_key)
except AttributeError:
pass
self._get_fields_cache = {}
def get_fields(self, include_parents=True, include_hidden=False):
"""
Returns a list of fields associated to the model. By default will only
return forward fields. This can be changed by enabling or disabling
field types using the parameters:
- include_parents: include fields derived from inheritance
- include_hidden: include fields that have a related_name that
starts with a "+"
"""
if include_parents is False:
include_parents = PROXY_PARENTS
return self._get_fields(include_parents=include_parents, include_hidden=include_hidden)
def _get_fields(self, forward=True, reverse=True, include_parents=True, include_hidden=False,
seen_models=None):
"""
Internal helper function to return fields of the model.
* If forward=True, then fields defined on this model are returned.
* If reverse=True, then relations pointing to this model are returned.
* If include_hidden=True, then fields with is_hidden=True are returned.
* The include_parents argument toggles if fields from parent models
should be included. It has three values: True, False, and
PROXY_PARENTS. When set to PROXY_PARENTS, the call will return all
fields defined for the current model or any of its parents in the
parent chain to the model's concrete model.
"""
if include_parents not in (True, False, PROXY_PARENTS):
raise TypeError("Invalid argument for include_parents: %s" % (include_parents,))
# This helper function is used to allow recursion in ``get_fields()``
# implementation and to provide a fast way for Django's internals to
# access specific subsets of fields.
# We must keep track of which models we have already seen. Otherwise we
# could include the same field multiple times from different models.
topmost_call = False
if seen_models is None:
seen_models = set()
topmost_call = True
seen_models.add(self.model)
# Creates a cache key composed of all arguments
cache_key = (forward, reverse, include_parents, include_hidden, topmost_call)
try:
# In order to avoid list manipulation. Always return a shallow copy
# of the results.
return self._get_fields_cache[cache_key]
except KeyError:
pass
fields = []
# Recursively call _get_fields() on each parent, with the same
# options provided in this call.
if include_parents is not False:
for parent in self.parents:
# In diamond inheritance it is possible that we see the same
# model from two different routes. In that case, avoid adding
# fields from the same parent again.
if parent in seen_models:
continue
if (parent._meta.concrete_model != self.concrete_model and
include_parents == PROXY_PARENTS):
continue
for obj in parent._meta._get_fields(
forward=forward, reverse=reverse, include_parents=include_parents,
include_hidden=include_hidden, seen_models=seen_models):
if hasattr(obj, 'parent_link') and obj.parent_link:
continue
fields.append(obj)
if reverse:
# Tree is computed once and cached until the app cache is expired.
# It is composed of a list of fields pointing to the current model
# from other models.
all_fields = self._relation_tree
for field in all_fields:
# If hidden fields should be included or the relation is not
# intentionally hidden, add to the fields dict.
if include_hidden or not field.remote_field.hidden:
fields.append(field.remote_field)
if forward:
fields.extend(
field for field in chain(self.local_fields, self.local_many_to_many)
)
# Virtual fields are recopied to each child model, and they get a
# different model as field.model in each child. Hence we have to
# add the virtual fields separately from the topmost call. If we
# did this recursively similar to local_fields, we would get field
# instances with field.model != self.model.
if topmost_call:
fields.extend(
f for f in self.virtual_fields
)
# In order to avoid list manipulation. Always
# return a shallow copy of the results
fields = make_immutable_fields_list("get_fields()", fields)
# Store result into cache for later access
self._get_fields_cache[cache_key] = fields
return fields
| bsd-3-clause |
simonjbeaumont/planex | planex/init.py | 1 | 1365 | #!/usr/bin/env python
"""
Creates or regenerates a Makefile with special planex-init comments
"""
import os
import logging
MAKEFILE_PATH = "/usr/share/planex"
def create_makefile():
""" Checks if a Makefile exists with special planex-init comments in it.
If not, it creates or regenerates the Makefile while preserving its
existing contents.
"""
name = "Makefile"
firstline = "# Start generated by planex-init\n"
autogen = "include %s/Makefile.rules\n" % (MAKEFILE_PATH)
endline = "# End generated by planex-init\n"
if not os.path.exists(name):
logging.debug("Creating Makefile")
with open(name, 'w') as makefile:
makefile.write(firstline)
makefile.write(autogen)
makefile.write(endline)
return
with open(name, 'r') as makefile:
lines = makefile.readlines()
try:
start = lines.index(firstline)
end = lines.index(endline)
lines = lines[:start + 1] + [autogen] + lines[end:]
except ValueError:
logging.error("Couldn't find planex-init stanza in Makefile")
with open(name, 'w') as makefile:
makefile.writelines(lines)
def main():
"""
Main entry point
"""
logging.basicConfig(format='%(message)s', level=logging.ERROR)
create_makefile()
if __name__ == "__main__":
main()
| lgpl-2.1 |
jsdosa/TizenRT | external/protobuf/python/compatibility_tests/v2.5.0/tests/google/protobuf/internal/text_format_test.py | 27 | 24605 | #! /usr/bin/python
#
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Test for google.protobuf.text_format."""
__author__ = 'kenton@google.com (Kenton Varda)'
import difflib
import re
import unittest
from google.protobuf import text_format
from google.protobuf.internal import test_util
from google.protobuf import unittest_pb2
from google.protobuf import unittest_mset_pb2
class TextFormatTest(unittest.TestCase):
def ReadGolden(self, golden_filename):
f = test_util.GoldenFile(golden_filename)
golden_lines = f.readlines()
f.close()
return golden_lines
def CompareToGoldenFile(self, text, golden_filename):
golden_lines = self.ReadGolden(golden_filename)
self.CompareToGoldenLines(text, golden_lines)
def CompareToGoldenText(self, text, golden_text):
self.CompareToGoldenLines(text, golden_text.splitlines(1))
def CompareToGoldenLines(self, text, golden_lines):
actual_lines = text.splitlines(1)
self.assertEqual(golden_lines, actual_lines,
"Text doesn't match golden. Diff:\n" +
''.join(difflib.ndiff(golden_lines, actual_lines)))
def testPrintAllFields(self):
message = unittest_pb2.TestAllTypes()
test_util.SetAllFields(message)
self.CompareToGoldenFile(
self.RemoveRedundantZeros(text_format.MessageToString(message)),
'text_format_unittest_data.txt')
def testPrintAllExtensions(self):
message = unittest_pb2.TestAllExtensions()
test_util.SetAllExtensions(message)
self.CompareToGoldenFile(
self.RemoveRedundantZeros(text_format.MessageToString(message)),
'text_format_unittest_extensions_data.txt')
def testPrintMessageSet(self):
message = unittest_mset_pb2.TestMessageSetContainer()
ext1 = unittest_mset_pb2.TestMessageSetExtension1.message_set_extension
ext2 = unittest_mset_pb2.TestMessageSetExtension2.message_set_extension
message.message_set.Extensions[ext1].i = 23
message.message_set.Extensions[ext2].str = 'foo'
self.CompareToGoldenText(text_format.MessageToString(message),
'message_set {\n'
' [protobuf_unittest.TestMessageSetExtension1] {\n'
' i: 23\n'
' }\n'
' [protobuf_unittest.TestMessageSetExtension2] {\n'
' str: \"foo\"\n'
' }\n'
'}\n')
"""
def testPrintBadEnumValue(self):
message = unittest_pb2.TestAllTypes()
message.optional_nested_enum = 100
message.optional_foreign_enum = 101
message.optional_import_enum = 102
self.CompareToGoldenText(
text_format.MessageToString(message),
'optional_nested_enum: 100\n'
'optional_foreign_enum: 101\n'
'optional_import_enum: 102\n')
def testPrintBadEnumValueExtensions(self):
message = unittest_pb2.TestAllExtensions()
message.Extensions[unittest_pb2.optional_nested_enum_extension] = 100
message.Extensions[unittest_pb2.optional_foreign_enum_extension] = 101
message.Extensions[unittest_pb2.optional_import_enum_extension] = 102
self.CompareToGoldenText(
text_format.MessageToString(message),
'[protobuf_unittest.optional_nested_enum_extension]: 100\n'
'[protobuf_unittest.optional_foreign_enum_extension]: 101\n'
'[protobuf_unittest.optional_import_enum_extension]: 102\n')
"""
def testPrintExotic(self):
message = unittest_pb2.TestAllTypes()
message.repeated_int64.append(-9223372036854775808)
message.repeated_uint64.append(18446744073709551615)
message.repeated_double.append(123.456)
message.repeated_double.append(1.23e22)
message.repeated_double.append(1.23e-18)
message.repeated_string.append('\000\001\a\b\f\n\r\t\v\\\'"')
message.repeated_string.append(u'\u00fc\ua71f')
self.CompareToGoldenText(
self.RemoveRedundantZeros(text_format.MessageToString(message)),
'repeated_int64: -9223372036854775808\n'
'repeated_uint64: 18446744073709551615\n'
'repeated_double: 123.456\n'
'repeated_double: 1.23e+22\n'
'repeated_double: 1.23e-18\n'
'repeated_string: '
'"\\000\\001\\007\\010\\014\\n\\r\\t\\013\\\\\\\'\\""\n'
'repeated_string: "\\303\\274\\352\\234\\237"\n')
def testPrintNestedMessageAsOneLine(self):
message = unittest_pb2.TestAllTypes()
msg = message.repeated_nested_message.add()
msg.bb = 42;
self.CompareToGoldenText(
text_format.MessageToString(message, as_one_line=True),
'repeated_nested_message { bb: 42 }')
def testPrintRepeatedFieldsAsOneLine(self):
message = unittest_pb2.TestAllTypes()
message.repeated_int32.append(1)
message.repeated_int32.append(1)
message.repeated_int32.append(3)
message.repeated_string.append("Google")
message.repeated_string.append("Zurich")
self.CompareToGoldenText(
text_format.MessageToString(message, as_one_line=True),
'repeated_int32: 1 repeated_int32: 1 repeated_int32: 3 '
'repeated_string: "Google" repeated_string: "Zurich"')
def testPrintNestedNewLineInStringAsOneLine(self):
message = unittest_pb2.TestAllTypes()
message.optional_string = "a\nnew\nline"
self.CompareToGoldenText(
text_format.MessageToString(message, as_one_line=True),
'optional_string: "a\\nnew\\nline"')
def testPrintMessageSetAsOneLine(self):
message = unittest_mset_pb2.TestMessageSetContainer()
ext1 = unittest_mset_pb2.TestMessageSetExtension1.message_set_extension
ext2 = unittest_mset_pb2.TestMessageSetExtension2.message_set_extension
message.message_set.Extensions[ext1].i = 23
message.message_set.Extensions[ext2].str = 'foo'
self.CompareToGoldenText(
text_format.MessageToString(message, as_one_line=True),
'message_set {'
' [protobuf_unittest.TestMessageSetExtension1] {'
' i: 23'
' }'
' [protobuf_unittest.TestMessageSetExtension2] {'
' str: \"foo\"'
' }'
' }')
def testPrintExoticAsOneLine(self):
message = unittest_pb2.TestAllTypes()
message.repeated_int64.append(-9223372036854775808)
message.repeated_uint64.append(18446744073709551615)
message.repeated_double.append(123.456)
message.repeated_double.append(1.23e22)
message.repeated_double.append(1.23e-18)
message.repeated_string.append('\000\001\a\b\f\n\r\t\v\\\'"')
message.repeated_string.append(u'\u00fc\ua71f')
self.CompareToGoldenText(
self.RemoveRedundantZeros(
text_format.MessageToString(message, as_one_line=True)),
'repeated_int64: -9223372036854775808'
' repeated_uint64: 18446744073709551615'
' repeated_double: 123.456'
' repeated_double: 1.23e+22'
' repeated_double: 1.23e-18'
' repeated_string: '
'"\\000\\001\\007\\010\\014\\n\\r\\t\\013\\\\\\\'\\""'
' repeated_string: "\\303\\274\\352\\234\\237"')
def testRoundTripExoticAsOneLine(self):
message = unittest_pb2.TestAllTypes()
message.repeated_int64.append(-9223372036854775808)
message.repeated_uint64.append(18446744073709551615)
message.repeated_double.append(123.456)
message.repeated_double.append(1.23e22)
message.repeated_double.append(1.23e-18)
message.repeated_string.append('\000\001\a\b\f\n\r\t\v\\\'"')
message.repeated_string.append(u'\u00fc\ua71f')
# Test as_utf8 = False.
wire_text = text_format.MessageToString(
message, as_one_line=True, as_utf8=False)
parsed_message = unittest_pb2.TestAllTypes()
text_format.Merge(wire_text, parsed_message)
self.assertEquals(message, parsed_message)
# Test as_utf8 = True.
wire_text = text_format.MessageToString(
message, as_one_line=True, as_utf8=True)
parsed_message = unittest_pb2.TestAllTypes()
text_format.Merge(wire_text, parsed_message)
self.assertEquals(message, parsed_message)
def testPrintRawUtf8String(self):
message = unittest_pb2.TestAllTypes()
message.repeated_string.append(u'\u00fc\ua71f')
text = text_format.MessageToString(message, as_utf8 = True)
self.CompareToGoldenText(text, 'repeated_string: "\303\274\352\234\237"\n')
parsed_message = unittest_pb2.TestAllTypes()
text_format.Merge(text, parsed_message)
self.assertEquals(message, parsed_message)
def testMessageToString(self):
message = unittest_pb2.ForeignMessage()
message.c = 123
self.assertEqual('c: 123\n', str(message))
def RemoveRedundantZeros(self, text):
# Some platforms print 1e+5 as 1e+005. This is fine, but we need to remove
# these zeros in order to match the golden file.
text = text.replace('e+0','e+').replace('e+0','e+') \
.replace('e-0','e-').replace('e-0','e-')
# Floating point fields are printed with .0 suffix even if they are
# actualy integer numbers.
text = re.compile('\.0$', re.MULTILINE).sub('', text)
return text
def testMergeGolden(self):
golden_text = '\n'.join(self.ReadGolden('text_format_unittest_data.txt'))
parsed_message = unittest_pb2.TestAllTypes()
text_format.Merge(golden_text, parsed_message)
message = unittest_pb2.TestAllTypes()
test_util.SetAllFields(message)
self.assertEquals(message, parsed_message)
def testMergeGoldenExtensions(self):
golden_text = '\n'.join(self.ReadGolden(
'text_format_unittest_extensions_data.txt'))
parsed_message = unittest_pb2.TestAllExtensions()
text_format.Merge(golden_text, parsed_message)
message = unittest_pb2.TestAllExtensions()
test_util.SetAllExtensions(message)
self.assertEquals(message, parsed_message)
def testMergeAllFields(self):
message = unittest_pb2.TestAllTypes()
test_util.SetAllFields(message)
ascii_text = text_format.MessageToString(message)
parsed_message = unittest_pb2.TestAllTypes()
text_format.Merge(ascii_text, parsed_message)
self.assertEqual(message, parsed_message)
test_util.ExpectAllFieldsSet(self, message)
def testMergeAllExtensions(self):
message = unittest_pb2.TestAllExtensions()
test_util.SetAllExtensions(message)
ascii_text = text_format.MessageToString(message)
parsed_message = unittest_pb2.TestAllExtensions()
text_format.Merge(ascii_text, parsed_message)
self.assertEqual(message, parsed_message)
def testMergeMessageSet(self):
message = unittest_pb2.TestAllTypes()
text = ('repeated_uint64: 1\n'
'repeated_uint64: 2\n')
text_format.Merge(text, message)
self.assertEqual(1, message.repeated_uint64[0])
self.assertEqual(2, message.repeated_uint64[1])
message = unittest_mset_pb2.TestMessageSetContainer()
text = ('message_set {\n'
' [protobuf_unittest.TestMessageSetExtension1] {\n'
' i: 23\n'
' }\n'
' [protobuf_unittest.TestMessageSetExtension2] {\n'
' str: \"foo\"\n'
' }\n'
'}\n')
text_format.Merge(text, message)
ext1 = unittest_mset_pb2.TestMessageSetExtension1.message_set_extension
ext2 = unittest_mset_pb2.TestMessageSetExtension2.message_set_extension
self.assertEquals(23, message.message_set.Extensions[ext1].i)
self.assertEquals('foo', message.message_set.Extensions[ext2].str)
def testMergeExotic(self):
message = unittest_pb2.TestAllTypes()
text = ('repeated_int64: -9223372036854775808\n'
'repeated_uint64: 18446744073709551615\n'
'repeated_double: 123.456\n'
'repeated_double: 1.23e+22\n'
'repeated_double: 1.23e-18\n'
'repeated_string: \n'
'"\\000\\001\\007\\010\\014\\n\\r\\t\\013\\\\\\\'\\""\n'
'repeated_string: "foo" \'corge\' "grault"\n'
'repeated_string: "\\303\\274\\352\\234\\237"\n'
'repeated_string: "\\xc3\\xbc"\n'
'repeated_string: "\xc3\xbc"\n')
text_format.Merge(text, message)
self.assertEqual(-9223372036854775808, message.repeated_int64[0])
self.assertEqual(18446744073709551615, message.repeated_uint64[0])
self.assertEqual(123.456, message.repeated_double[0])
self.assertEqual(1.23e22, message.repeated_double[1])
self.assertEqual(1.23e-18, message.repeated_double[2])
self.assertEqual(
'\000\001\a\b\f\n\r\t\v\\\'"', message.repeated_string[0])
self.assertEqual('foocorgegrault', message.repeated_string[1])
self.assertEqual(u'\u00fc\ua71f', message.repeated_string[2])
self.assertEqual(u'\u00fc', message.repeated_string[3])
def testMergeEmptyText(self):
message = unittest_pb2.TestAllTypes()
text = ''
text_format.Merge(text, message)
self.assertEquals(unittest_pb2.TestAllTypes(), message)
def testMergeInvalidUtf8(self):
message = unittest_pb2.TestAllTypes()
text = 'repeated_string: "\\xc3\\xc3"'
self.assertRaises(text_format.ParseError, text_format.Merge, text, message)
def testMergeSingleWord(self):
message = unittest_pb2.TestAllTypes()
text = 'foo'
self.assertRaisesWithMessage(
text_format.ParseError,
('1:1 : Message type "protobuf_unittest.TestAllTypes" has no field named '
'"foo".'),
text_format.Merge, text, message)
def testMergeUnknownField(self):
message = unittest_pb2.TestAllTypes()
text = 'unknown_field: 8\n'
self.assertRaisesWithMessage(
text_format.ParseError,
('1:1 : Message type "protobuf_unittest.TestAllTypes" has no field named '
'"unknown_field".'),
text_format.Merge, text, message)
def testMergeBadExtension(self):
message = unittest_pb2.TestAllExtensions()
text = '[unknown_extension]: 8\n'
self.assertRaises(
text_format.ParseError,
text_format.Merge, text, message)
message = unittest_pb2.TestAllTypes()
self.assertRaisesWithMessage(
text_format.ParseError,
('1:2 : Message type "protobuf_unittest.TestAllTypes" does not have '
'extensions.'),
text_format.Merge, text, message)
def testMergeGroupNotClosed(self):
message = unittest_pb2.TestAllTypes()
text = 'RepeatedGroup: <'
self.assertRaisesWithMessage(
text_format.ParseError, '1:16 : Expected ">".',
text_format.Merge, text, message)
text = 'RepeatedGroup: {'
self.assertRaisesWithMessage(
text_format.ParseError, '1:16 : Expected "}".',
text_format.Merge, text, message)
def testMergeEmptyGroup(self):
message = unittest_pb2.TestAllTypes()
text = 'OptionalGroup: {}'
text_format.Merge(text, message)
self.assertTrue(message.HasField('optionalgroup'))
message.Clear()
message = unittest_pb2.TestAllTypes()
text = 'OptionalGroup: <>'
text_format.Merge(text, message)
self.assertTrue(message.HasField('optionalgroup'))
def testMergeBadEnumValue(self):
message = unittest_pb2.TestAllTypes()
text = 'optional_nested_enum: BARR'
self.assertRaisesWithMessage(
text_format.ParseError,
('1:23 : Enum type "protobuf_unittest.TestAllTypes.NestedEnum" '
'has no value named BARR.'),
text_format.Merge, text, message)
message = unittest_pb2.TestAllTypes()
text = 'optional_nested_enum: 100'
self.assertRaisesWithMessage(
text_format.ParseError,
('1:23 : Enum type "protobuf_unittest.TestAllTypes.NestedEnum" '
'has no value with number 100.'),
text_format.Merge, text, message)
def testMergeBadIntValue(self):
message = unittest_pb2.TestAllTypes()
text = 'optional_int32: bork'
self.assertRaisesWithMessage(
text_format.ParseError,
('1:17 : Couldn\'t parse integer: bork'),
text_format.Merge, text, message)
def testMergeStringFieldUnescape(self):
message = unittest_pb2.TestAllTypes()
text = r'''repeated_string: "\xf\x62"
repeated_string: "\\xf\\x62"
repeated_string: "\\\xf\\\x62"
repeated_string: "\\\\xf\\\\x62"
repeated_string: "\\\\\xf\\\\\x62"
repeated_string: "\x5cx20"'''
text_format.Merge(text, message)
SLASH = '\\'
self.assertEqual('\x0fb', message.repeated_string[0])
self.assertEqual(SLASH + 'xf' + SLASH + 'x62', message.repeated_string[1])
self.assertEqual(SLASH + '\x0f' + SLASH + 'b', message.repeated_string[2])
self.assertEqual(SLASH + SLASH + 'xf' + SLASH + SLASH + 'x62',
message.repeated_string[3])
self.assertEqual(SLASH + SLASH + '\x0f' + SLASH + SLASH + 'b',
message.repeated_string[4])
self.assertEqual(SLASH + 'x20', message.repeated_string[5])
def assertRaisesWithMessage(self, e_class, e, func, *args, **kwargs):
"""Same as assertRaises, but also compares the exception message."""
if hasattr(e_class, '__name__'):
exc_name = e_class.__name__
else:
exc_name = str(e_class)
try:
func(*args, **kwargs)
except e_class as expr:
if str(expr) != e:
msg = '%s raised, but with wrong message: "%s" instead of "%s"'
raise self.failureException(msg % (exc_name,
str(expr).encode('string_escape'),
e.encode('string_escape')))
return
else:
raise self.failureException('%s not raised' % exc_name)
class TokenizerTest(unittest.TestCase):
"""
def testSimpleTokenCases(self):
text = ('identifier1:"string1"\n \n\n'
'identifier2 : \n \n123 \n identifier3 :\'string\'\n'
'identifiER_4 : 1.1e+2 ID5:-0.23 ID6:\'aaaa\\\'bbbb\'\n'
'ID7 : "aa\\"bb"\n\n\n\n ID8: {A:inf B:-inf C:true D:false}\n'
'ID9: 22 ID10: -111111111111111111 ID11: -22\n'
'ID12: 2222222222222222222 ID13: 1.23456f ID14: 1.2e+2f '
'false_bool: 0 true_BOOL:t \n true_bool1: 1 false_BOOL1:f ' )
tokenizer = text_format._Tokenizer(text)
methods = [(tokenizer.ConsumeIdentifier, 'identifier1'),
':',
(tokenizer.ConsumeString, 'string1'),
(tokenizer.ConsumeIdentifier, 'identifier2'),
':',
(tokenizer.ConsumeInt32, 123),
(tokenizer.ConsumeIdentifier, 'identifier3'),
':',
(tokenizer.ConsumeString, 'string'),
(tokenizer.ConsumeIdentifier, 'identifiER_4'),
':',
(tokenizer.ConsumeFloat, 1.1e+2),
(tokenizer.ConsumeIdentifier, 'ID5'),
':',
(tokenizer.ConsumeFloat, -0.23),
(tokenizer.ConsumeIdentifier, 'ID6'),
':',
(tokenizer.ConsumeString, 'aaaa\'bbbb'),
(tokenizer.ConsumeIdentifier, 'ID7'),
':',
(tokenizer.ConsumeString, 'aa\"bb'),
(tokenizer.ConsumeIdentifier, 'ID8'),
':',
'{',
(tokenizer.ConsumeIdentifier, 'A'),
':',
(tokenizer.ConsumeFloat, float('inf')),
(tokenizer.ConsumeIdentifier, 'B'),
':',
(tokenizer.ConsumeFloat, -float('inf')),
(tokenizer.ConsumeIdentifier, 'C'),
':',
(tokenizer.ConsumeBool, True),
(tokenizer.ConsumeIdentifier, 'D'),
':',
(tokenizer.ConsumeBool, False),
'}',
(tokenizer.ConsumeIdentifier, 'ID9'),
':',
(tokenizer.ConsumeUint32, 22),
(tokenizer.ConsumeIdentifier, 'ID10'),
':',
(tokenizer.ConsumeInt64, -111111111111111111),
(tokenizer.ConsumeIdentifier, 'ID11'),
':',
(tokenizer.ConsumeInt32, -22),
(tokenizer.ConsumeIdentifier, 'ID12'),
':',
(tokenizer.ConsumeUint64, 2222222222222222222),
(tokenizer.ConsumeIdentifier, 'ID13'),
':',
(tokenizer.ConsumeFloat, 1.23456),
(tokenizer.ConsumeIdentifier, 'ID14'),
':',
(tokenizer.ConsumeFloat, 1.2e+2),
(tokenizer.ConsumeIdentifier, 'false_bool'),
':',
(tokenizer.ConsumeBool, False),
(tokenizer.ConsumeIdentifier, 'true_BOOL'),
':',
(tokenizer.ConsumeBool, True),
(tokenizer.ConsumeIdentifier, 'true_bool1'),
':',
(tokenizer.ConsumeBool, True),
(tokenizer.ConsumeIdentifier, 'false_BOOL1'),
':',
(tokenizer.ConsumeBool, False)]
i = 0
while not tokenizer.AtEnd():
m = methods[i]
if type(m) == str:
token = tokenizer.token
self.assertEqual(token, m)
tokenizer.NextToken()
else:
self.assertEqual(m[1], m[0]())
i += 1
def testConsumeIntegers(self):
# This test only tests the failures in the integer parsing methods as well
# as the '0' special cases.
int64_max = (1 << 63) - 1
uint32_max = (1 << 32) - 1
text = '-1 %d %d' % (uint32_max + 1, int64_max + 1)
tokenizer = text_format._Tokenizer(text)
self.assertRaises(text_format.ParseError, tokenizer.ConsumeUint32)
self.assertRaises(text_format.ParseError, tokenizer.ConsumeUint64)
self.assertEqual(-1, tokenizer.ConsumeInt32())
self.assertRaises(text_format.ParseError, tokenizer.ConsumeUint32)
self.assertRaises(text_format.ParseError, tokenizer.ConsumeInt32)
self.assertEqual(uint32_max + 1, tokenizer.ConsumeInt64())
self.assertRaises(text_format.ParseError, tokenizer.ConsumeInt64)
self.assertEqual(int64_max + 1, tokenizer.ConsumeUint64())
self.assertTrue(tokenizer.AtEnd())
text = '-0 -0 0 0'
tokenizer = text_format._Tokenizer(text)
self.assertEqual(0, tokenizer.ConsumeUint32())
self.assertEqual(0, tokenizer.ConsumeUint64())
self.assertEqual(0, tokenizer.ConsumeUint32())
self.assertEqual(0, tokenizer.ConsumeUint64())
self.assertTrue(tokenizer.AtEnd())
"""
def testConsumeByteString(self):
text = '"string1\''
tokenizer = text_format._Tokenizer(text)
self.assertRaises(text_format.ParseError, tokenizer.ConsumeByteString)
text = 'string1"'
tokenizer = text_format._Tokenizer(text)
self.assertRaises(text_format.ParseError, tokenizer.ConsumeByteString)
text = '\n"\\xt"'
tokenizer = text_format._Tokenizer(text)
self.assertRaises(text_format.ParseError, tokenizer.ConsumeByteString)
text = '\n"\\"'
tokenizer = text_format._Tokenizer(text)
self.assertRaises(text_format.ParseError, tokenizer.ConsumeByteString)
text = '\n"\\x"'
tokenizer = text_format._Tokenizer(text)
self.assertRaises(text_format.ParseError, tokenizer.ConsumeByteString)
def testConsumeBool(self):
text = 'not-a-bool'
tokenizer = text_format._Tokenizer(text)
self.assertRaises(text_format.ParseError, tokenizer.ConsumeBool)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
ogenstad/ansible | lib/ansible/modules/network/f5/bigip_virtual_address.py | 23 | 19782 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: bigip_virtual_address
short_description: Manage LTM virtual addresses on a BIG-IP
description:
- Manage LTM virtual addresses on a BIG-IP.
version_added: "2.4"
options:
address:
description:
- Virtual address. This value cannot be modified after it is set.
required: True
aliases:
- name
netmask:
description:
- Netmask of the provided virtual address. This value cannot be
modified after it is set.
default: 255.255.255.255
connection_limit:
description:
- Specifies the number of concurrent connections that the system
allows on this virtual address.
arp_state:
description:
- Specifies whether the system accepts ARP requests. When (disabled),
specifies that the system does not accept ARP requests. Note that
both ARP and ICMP Echo must be disabled in order for forwarding
virtual servers using that virtual address to forward ICMP packets.
If (enabled), then the packets are dropped.
choices:
- enabled
- disabled
auto_delete:
description:
- Specifies whether the system automatically deletes the virtual
address with the deletion of the last associated virtual server.
When C(disabled), specifies that the system leaves the virtual
address even when all associated virtual servers have been deleted.
When creating the virtual address, the default value is C(enabled).
choices:
- enabled
- disabled
icmp_echo:
description:
- Specifies how the systems sends responses to (ICMP) echo requests
on a per-virtual address basis for enabling route advertisement.
When C(enabled), the BIG-IP system intercepts ICMP echo request
packets and responds to them directly. When C(disabled), the BIG-IP
system passes ICMP echo requests through to the backend servers.
When (selective), causes the BIG-IP system to internally enable or
disable responses based on virtual server state; C(when_any_available),
C(when_all_available, or C(always), regardless of the state of any
virtual servers.
choices:
- enabled
- disabled
- selective
state:
description:
- The virtual address state. If C(absent), an attempt to delete the
virtual address will be made. This will only succeed if this
virtual address is not in use by a virtual server. C(present) creates
the virtual address and enables it. If C(enabled), enable the virtual
address if it exists. If C(disabled), create the virtual address if
needed, and set state to C(disabled).
default: present
choices:
- present
- absent
- enabled
- disabled
advertise_route:
description:
- Specifies what routes of the virtual address the system advertises.
When C(when_any_available), advertises the route when any virtual
server is available. When C(when_all_available), advertises the
route when all virtual servers are available. When (always), always
advertises the route regardless of the virtual servers available.
choices:
- always
- when_all_available
- when_any_available
use_route_advertisement:
description:
- Specifies whether the system uses route advertisement for this
virtual address. When disabled, the system does not advertise
routes for this virtual address.
type: bool
partition:
description:
- Device partition to manage resources on.
default: Common
version_added: 2.5
traffic_group:
description:
- The traffic group for the virtual address. When creating a new address,
if this value is not specified, the default of C(/Common/traffic-group-1)
will be used.
version_added: 2.5
notes:
- Requires the netaddr Python package on the host. This is as easy as pip
install netaddr.
extends_documentation_fragment: f5
requirements:
- netaddr
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = r'''
- name: Add virtual address
bigip_virtual_address:
server: lb.mydomain.net
user: admin
password: secret
state: present
partition: Common
address: 10.10.10.10
delegate_to: localhost
- name: Enable route advertisement on the virtual address
bigip_virtual_address:
server: lb.mydomain.net
user: admin
password: secret
state: present
address: 10.10.10.10
use_route_advertisement: yes
delegate_to: localhost
'''
RETURN = r'''
use_route_advertisement:
description: The new setting for whether to use route advertising or not.
returned: changed
type: bool
sample: true
auto_delete:
description: New setting for auto deleting virtual address.
returned: changed
type: string
sample: enabled
icmp_echo:
description: New ICMP echo setting applied to virtual address.
returned: changed
type: string
sample: disabled
connection_limit:
description: The new connection limit of the virtual address.
returned: changed
type: int
sample: 1000
netmask:
description: The netmask of the virtual address.
returned: created
type: int
sample: 2345
arp_state:
description: The new way the virtual address handles ARP requests.
returned: changed
type: string
sample: disabled
address:
description: The address of the virtual address.
returned: created
type: int
sample: 2345
state:
description: The new state of the virtual address.
returned: changed
type: string
sample: disabled
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
from ansible.module_utils.parsing.convert_bool import BOOLEANS_TRUE
from ansible.module_utils.parsing.convert_bool import BOOLEANS_FALSE
HAS_DEVEL_IMPORTS = False
try:
# Sideband repository used for dev
from library.module_utils.network.f5.bigip import HAS_F5SDK
from library.module_utils.network.f5.bigip import F5Client
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import cleanup_tokens
from library.module_utils.network.f5.common import fqdn_name
from library.module_utils.network.f5.common import f5_argument_spec
try:
from library.module_utils.network.f5.common import iControlUnexpectedHTTPError
except ImportError:
HAS_F5SDK = False
HAS_DEVEL_IMPORTS = True
except ImportError:
# Upstream Ansible
from ansible.module_utils.network.f5.bigip import HAS_F5SDK
from ansible.module_utils.network.f5.bigip import F5Client
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import cleanup_tokens
from ansible.module_utils.network.f5.common import fqdn_name
from ansible.module_utils.network.f5.common import f5_argument_spec
try:
from ansible.module_utils.network.f5.common import iControlUnexpectedHTTPError
except ImportError:
HAS_F5SDK = False
try:
import netaddr
HAS_NETADDR = True
except ImportError:
HAS_NETADDR = False
class Parameters(AnsibleF5Parameters):
api_map = {
'routeAdvertisement': 'use_route_advertisement',
'autoDelete': 'auto_delete',
'icmpEcho': 'icmp_echo',
'connectionLimit': 'connection_limit',
'serverScope': 'advertise_route',
'mask': 'netmask',
'arp': 'arp_state',
'trafficGroup': 'traffic_group',
}
updatables = [
'use_route_advertisement', 'auto_delete', 'icmp_echo', 'connection_limit',
'arp_state', 'enabled', 'advertise_route', 'traffic_group'
]
returnables = [
'use_route_advertisement', 'auto_delete', 'icmp_echo', 'connection_limit',
'netmask', 'arp_state', 'address', 'state'
]
api_attributes = [
'routeAdvertisement', 'autoDelete', 'icmpEcho', 'connectionLimit',
'advertiseRoute', 'arp', 'mask', 'enabled', 'serverScope', 'trafficGroup'
]
def _fqdn_name(self, value):
if value is not None and not value.startswith('/'):
return '/{0}/{1}'.format(self.partition, value)
return value
@property
def advertise_route(self):
if self._values['advertise_route'] is None:
return None
elif self._values['advertise_route'] in ['any', 'when_any_available']:
return 'any'
elif self._values['advertise_route'] in ['all', 'when_all_available']:
return 'all'
elif self._values['advertise_route'] in ['none', 'always']:
return 'none'
@property
def connection_limit(self):
if self._values['connection_limit'] is None:
return None
return int(self._values['connection_limit'])
@property
def use_route_advertisement(self):
if self._values['use_route_advertisement'] is None:
return None
elif self._values['use_route_advertisement'] in BOOLEANS_TRUE:
return 'enabled'
elif self._values['use_route_advertisement'] == 'enabled':
return 'enabled'
else:
return 'disabled'
@property
def enabled(self):
if self._values['state'] in ['enabled', 'present']:
return 'yes'
elif self._values['enabled'] in BOOLEANS_TRUE:
return 'yes'
elif self._values['state'] == 'disabled':
return 'no'
elif self._values['enabled'] in BOOLEANS_FALSE:
return 'no'
else:
return None
@property
def address(self):
if self._values['address'] is None:
return None
try:
ip = netaddr.IPAddress(self._values['address'])
return str(ip)
except netaddr.core.AddrFormatError:
raise F5ModuleError(
"The provided 'address' is not a valid IP address"
)
@property
def netmask(self):
if self._values['netmask'] is None:
return None
try:
ip = netaddr.IPAddress(self._values['netmask'])
return str(ip)
except netaddr.core.AddrFormatError:
raise F5ModuleError(
"The provided 'netmask' is not a valid IP address"
)
@property
def auto_delete(self):
if self._values['auto_delete'] is None:
return None
elif self._values['auto_delete'] in BOOLEANS_TRUE:
return True
elif self._values['auto_delete'] == 'enabled':
return True
else:
return False
@property
def state(self):
if self.enabled == 'yes' and self._values['state'] != 'present':
return 'enabled'
elif self.enabled == 'no':
return 'disabled'
else:
return self._values['state']
@property
def traffic_group(self):
if self._values['traffic_group'] is None:
return None
else:
result = self._fqdn_name(self._values['traffic_group'])
if result.startswith('/Common/'):
return result
else:
raise F5ModuleError(
"Traffic groups can only exist in /Common"
)
def to_return(self):
result = {}
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
return result
class Changes(Parameters):
pass
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
@property
def traffic_group(self):
if self.want.traffic_group != self.have.traffic_group:
return self.want.traffic_group
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = kwargs.get('client', None)
self.have = None
self.want = Parameters(client=self.client, params=self.module.params)
self.changes = Changes()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = Changes(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = Changes(params=changed)
return True
return False
def exec_module(self):
changed = False
result = dict()
state = self.want.state
try:
if state in ['present', 'enabled', 'disabled']:
changed = self.present()
elif state == "absent":
changed = self.absent()
except iControlUnexpectedHTTPError as e:
raise F5ModuleError(str(e))
changes = self.changes.to_return()
result.update(**changes)
result.update(dict(changed=changed))
return result
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def absent(self):
changed = False
if self.exists():
changed = self.remove()
return changed
def read_current_from_device(self):
resource = self.client.api.tm.ltm.virtual_address_s.virtual_address.load(
name=self.want.address,
partition=self.want.partition
)
result = resource.attrs
return Parameters(params=result)
def exists(self):
result = self.client.api.tm.ltm.virtual_address_s.virtual_address.exists(
name=self.want.address,
partition=self.want.partition
)
return result
def update(self):
self.have = self.read_current_from_device()
if self.want.netmask is not None:
if self.have.netmask != self.want.netmask:
raise F5ModuleError(
"The netmask cannot be changed. Delete and recreate "
"the virtual address if you need to do this."
)
if self.want.address is not None:
if self.have.address != self.want.address:
raise F5ModuleError(
"The address cannot be changed. Delete and recreate "
"the virtual address if you need to do this."
)
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def update_on_device(self):
params = self.want.api_params()
resource = self.client.api.tm.ltm.virtual_address_s.virtual_address.load(
name=self.want.address,
partition=self.want.partition
)
resource.modify(**params)
def create(self):
self._set_changed_options()
if self.want.traffic_group is None:
self.want.update({'traffic_group': '/Common/traffic-group-1'})
if self.module.check_mode:
return True
self.create_on_device()
if self.exists():
return True
else:
raise F5ModuleError("Failed to create the virtual address")
def create_on_device(self):
params = self.want.api_params()
self.client.api.tm.ltm.virtual_address_s.virtual_address.create(
name=self.want.address,
partition=self.want.partition,
address=self.want.address,
**params
)
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the virtual address")
return True
def remove_from_device(self):
resource = self.client.api.tm.ltm.virtual_address_s.virtual_address.load(
name=self.want.address,
partition=self.want.partition
)
resource.delete()
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
state=dict(
default='present',
choices=['present', 'absent', 'disabled', 'enabled']
),
address=dict(
type='str',
required=True,
aliases=['name']
),
netmask=dict(
type='str',
default='255.255.255.255',
),
connection_limit=dict(
type='int'
),
arp_state=dict(
choices=['enabled', 'disabled'],
),
auto_delete=dict(
choices=['enabled', 'disabled'],
),
icmp_echo=dict(
choices=['enabled', 'disabled', 'selective'],
),
advertise_route=dict(
choices=['always', 'when_all_available', 'when_any_available'],
),
use_route_advertisement=dict(
type='bool'
),
traffic_group=dict(),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode
)
if not HAS_F5SDK:
module.fail_json(msg="The python f5-sdk module is required")
if not HAS_NETADDR:
module.fail_json(msg="The python netaddr module is required")
try:
client = F5Client(**module.params)
mm = ModuleManager(module=module, client=client)
results = mm.exec_module()
cleanup_tokens(client)
module.exit_json(**results)
except F5ModuleError as ex:
cleanup_tokens(client)
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main()
| gpl-3.0 |
FNST-OpenStack/horizon | openstack_dashboard/dashboards/project/stacks/forms.py | 13 | 18349 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import logging
from django.utils import html
from django.utils.translation import ugettext_lazy as _
from django.views.decorators.debug import sensitive_variables # noqa
from oslo_utils import strutils
import six
from horizon import exceptions
from horizon import forms
from horizon import messages
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.images \
import utils as image_utils
from openstack_dashboard.dashboards.project.instances \
import utils as instance_utils
LOG = logging.getLogger(__name__)
def create_upload_form_attributes(prefix, input_type, name):
"""Creates attribute dicts for the switchable upload form
:type prefix: str
:param prefix: prefix (environment, template) of field
:type input_type: str
:param input_type: field type (file, raw, url)
:type name: str
:param name: translated text label to display to user
:rtype: dict
:return: an attribute set to pass to form build
"""
attributes = {'class': 'switched', 'data-switch-on': prefix + 'source'}
attributes['data-' + prefix + 'source-' + input_type] = name
return attributes
class TemplateForm(forms.SelfHandlingForm):
class Meta(object):
name = _('Select Template')
help_text = _('Select a template to launch a stack.')
# TODO(jomara) - update URL choice for template & environment files
# w/ client side download when applicable
base_choices = [('file', _('File')),
('raw', _('Direct Input'))]
url_choice = [('url', _('URL'))]
attributes = {'class': 'switchable', 'data-slug': 'templatesource'}
template_source = forms.ChoiceField(label=_('Template Source'),
choices=base_choices + url_choice,
widget=forms.Select(attrs=attributes))
attributes = create_upload_form_attributes(
'template',
'file',
_('Template File'))
template_upload = forms.FileField(
label=_('Template File'),
help_text=_('A local template to upload.'),
widget=forms.FileInput(attrs=attributes),
required=False)
attributes = create_upload_form_attributes(
'template',
'url',
_('Template URL'))
template_url = forms.URLField(
label=_('Template URL'),
help_text=_('An external (HTTP) URL to load the template from.'),
widget=forms.TextInput(attrs=attributes),
required=False)
attributes = create_upload_form_attributes(
'template',
'raw',
_('Template Data'))
template_data = forms.CharField(
label=_('Template Data'),
help_text=_('The raw contents of the template.'),
widget=forms.widgets.Textarea(attrs=attributes),
required=False)
attributes = {'data-slug': 'envsource', 'class': 'switchable'}
environment_source = forms.ChoiceField(
label=_('Environment Source'),
choices=base_choices,
widget=forms.Select(attrs=attributes),
required=False)
attributes = create_upload_form_attributes(
'env',
'file',
_('Environment File'))
environment_upload = forms.FileField(
label=_('Environment File'),
help_text=_('A local environment to upload.'),
widget=forms.FileInput(attrs=attributes),
required=False)
attributes = create_upload_form_attributes(
'env',
'raw',
_('Environment Data'))
environment_data = forms.CharField(
label=_('Environment Data'),
help_text=_('The raw contents of the environment file.'),
widget=forms.widgets.Textarea(attrs=attributes),
required=False)
def __init__(self, *args, **kwargs):
self.next_view = kwargs.pop('next_view')
super(TemplateForm, self).__init__(*args, **kwargs)
def clean(self):
cleaned = super(TemplateForm, self).clean()
files = self.request.FILES
self.clean_uploaded_files('template', _('template'), cleaned, files)
self.clean_uploaded_files('environment', _('environment'), cleaned,
files)
# Validate the template and get back the params.
kwargs = {}
if cleaned['template_data']:
kwargs['template'] = cleaned['template_data']
else:
kwargs['template_url'] = cleaned['template_url']
if cleaned['environment_data']:
kwargs['environment'] = cleaned['environment_data']
try:
validated = api.heat.template_validate(self.request, **kwargs)
cleaned['template_validate'] = validated
except Exception as e:
raise forms.ValidationError(six.text_type(e))
return cleaned
def clean_uploaded_files(self, prefix, field_label, cleaned, files):
"""Cleans Template & Environment data from form upload.
Does some of the crunchy bits for processing uploads vs raw
data depending on what the user specified. Identical process
for environment data & template data.
:type prefix: str
:param prefix: prefix (environment, template) of field
:type field_label: str
:param field_label: translated prefix str for messages
:type input_type: dict
:param prefix: existing cleaned fields from form
:rtype: dict
:return: cleaned dict including environment & template data
"""
upload_str = prefix + "_upload"
data_str = prefix + "_data"
url = cleaned.get(prefix + '_url')
data = cleaned.get(prefix + '_data')
has_upload = upload_str in files
# Uploaded file handler
if has_upload and not url:
log_template_name = files[upload_str].name
LOG.info('got upload %s' % log_template_name)
tpl = files[upload_str].read()
if tpl.startswith('{'):
try:
json.loads(tpl)
except Exception as e:
msg = _('There was a problem parsing the'
' %(prefix)s: %(error)s')
msg = msg % {'prefix': prefix, 'error': e}
raise forms.ValidationError(msg)
cleaned[data_str] = tpl
# URL handler
elif url and (has_upload or data):
msg = _('Please specify a %s using only one source method.')
msg = msg % field_label
raise forms.ValidationError(msg)
elif prefix == 'template':
# Check for raw template input - blank environment allowed
if not url and not data:
msg = _('You must specify a template via one of the '
'available sources.')
raise forms.ValidationError(msg)
def create_kwargs(self, data):
kwargs = {'parameters': data['template_validate'],
'environment_data': data['environment_data'],
'template_data': data['template_data'],
'template_url': data['template_url']}
if data.get('stack_id'):
kwargs['stack_id'] = data['stack_id']
return kwargs
def handle(self, request, data):
kwargs = self.create_kwargs(data)
# NOTE (gabriel): This is a bit of a hack, essentially rewriting this
# request so that we can chain it as an input to the next view...
# but hey, it totally works.
request.method = 'GET'
return self.next_view.as_view()(request, **kwargs)
class ChangeTemplateForm(TemplateForm):
class Meta(object):
name = _('Edit Template')
help_text = _('Select a new template to re-launch a stack.')
stack_id = forms.CharField(label=_('Stack ID'),
widget=forms.widgets.HiddenInput)
stack_name = forms.CharField(label=_('Stack Name'),
widget=forms.TextInput(attrs={'readonly':
'readonly'}))
class PreviewTemplateForm(TemplateForm):
class Meta(object):
name = _('Preview Template')
help_text = _('Select a new template to preview a stack.')
class CreateStackForm(forms.SelfHandlingForm):
param_prefix = '__param_'
class Meta(object):
name = _('Create Stack')
template_data = forms.CharField(
widget=forms.widgets.HiddenInput,
required=False)
template_url = forms.CharField(
widget=forms.widgets.HiddenInput,
required=False)
environment_data = forms.CharField(
widget=forms.widgets.HiddenInput,
required=False)
parameters = forms.CharField(
widget=forms.widgets.HiddenInput)
stack_name = forms.RegexField(
max_length=255,
label=_('Stack Name'),
help_text=_('Name of the stack to create.'),
regex=r"^[a-zA-Z][a-zA-Z0-9_.-]*$",
error_messages={'invalid':
_('Name must start with a letter and may '
'only contain letters, numbers, underscores, '
'periods and hyphens.')})
timeout_mins = forms.IntegerField(
initial=60,
label=_('Creation Timeout (minutes)'),
help_text=_('Stack creation timeout in minutes.'))
enable_rollback = forms.BooleanField(
label=_('Rollback On Failure'),
help_text=_('Enable rollback on create/update failure.'),
required=False)
def __init__(self, *args, **kwargs):
parameters = kwargs.pop('parameters')
# special case: load template data from API, not passed in params
if(kwargs.get('validate_me')):
parameters = kwargs.pop('validate_me')
super(CreateStackForm, self).__init__(*args, **kwargs)
self._build_parameter_fields(parameters)
def _build_parameter_fields(self, template_validate):
self.fields['password'] = forms.CharField(
label=_('Password for user "%s"') % self.request.user.username,
help_text=_('This is required for operations to be performed '
'throughout the lifecycle of the stack'),
widget=forms.PasswordInput())
self.help_text = template_validate['Description']
params = template_validate.get('Parameters', {})
if template_validate.get('ParameterGroups'):
params_in_order = []
for group in template_validate['ParameterGroups']:
for param in group.get('parameters', []):
if param in params:
params_in_order.append((param, params[param]))
else:
# no parameter groups, simply sorted to make the order fixed
params_in_order = sorted(params.items())
for param_key, param in params_in_order:
field = None
field_key = self.param_prefix + param_key
field_args = {
'initial': param.get('Default', None),
'label': param.get('Label', param_key),
'help_text': html.escape(param.get('Description', '')),
'required': param.get('Default', None) is None
}
param_type = param.get('Type', None)
hidden = strutils.bool_from_string(param.get('NoEcho', 'false'))
if 'CustomConstraint' in param:
choices = self._populate_custom_choices(
param['CustomConstraint'])
field_args['choices'] = choices
field = forms.ChoiceField(**field_args)
elif 'AllowedValues' in param:
choices = map(lambda x: (x, x), param['AllowedValues'])
field_args['choices'] = choices
field = forms.ChoiceField(**field_args)
elif param_type == 'Json' and 'Default' in param:
field_args['initial'] = json.dumps(param['Default'])
field = forms.CharField(**field_args)
elif param_type in ('CommaDelimitedList', 'String', 'Json'):
if 'MinLength' in param:
field_args['min_length'] = int(param['MinLength'])
field_args['required'] = param.get('MinLength', 0) > 0
if 'MaxLength' in param:
field_args['max_length'] = int(param['MaxLength'])
if hidden:
field_args['widget'] = forms.PasswordInput()
field = forms.CharField(**field_args)
elif param_type == 'Number':
if 'MinValue' in param:
field_args['min_value'] = int(param['MinValue'])
if 'MaxValue' in param:
field_args['max_value'] = int(param['MaxValue'])
field = forms.IntegerField(**field_args)
# heat-api currently returns the boolean type in lowercase
# (see https://bugs.launchpad.net/heat/+bug/1361448)
# so for better compatibility both are checked here
elif param_type in ('Boolean', 'boolean'):
field = forms.BooleanField(**field_args)
if field:
self.fields[field_key] = field
@sensitive_variables('password')
def handle(self, request, data):
prefix_length = len(self.param_prefix)
params_list = [(k[prefix_length:], v) for (k, v) in six.iteritems(data)
if k.startswith(self.param_prefix)]
fields = {
'stack_name': data.get('stack_name'),
'timeout_mins': data.get('timeout_mins'),
'disable_rollback': not(data.get('enable_rollback')),
'parameters': dict(params_list),
'password': data.get('password')
}
if data.get('template_data'):
fields['template'] = data.get('template_data')
else:
fields['template_url'] = data.get('template_url')
if data.get('environment_data'):
fields['environment'] = data.get('environment_data')
try:
api.heat.stack_create(self.request, **fields)
messages.success(request, _("Stack creation started."))
return True
except Exception:
exceptions.handle(request)
def _populate_custom_choices(self, custom_type):
if custom_type == 'neutron.network':
return instance_utils.network_field_data(self.request, True)
if custom_type == 'nova.keypair':
return instance_utils.keypair_field_data(self.request, True)
if custom_type == 'glance.image':
return image_utils.image_field_data(self.request, True)
if custom_type == 'nova.flavor':
return instance_utils.flavor_field_data(self.request, True)
return []
class EditStackForm(CreateStackForm):
class Meta(object):
name = _('Update Stack Parameters')
stack_id = forms.CharField(
label=_('Stack ID'),
widget=forms.widgets.HiddenInput)
stack_name = forms.CharField(
label=_('Stack Name'),
widget=forms.TextInput(attrs={'readonly': 'readonly'}))
@sensitive_variables('password')
def handle(self, request, data):
prefix_length = len(self.param_prefix)
params_list = [(k[prefix_length:], v) for (k, v) in six.iteritems(data)
if k.startswith(self.param_prefix)]
stack_id = data.get('stack_id')
fields = {
'stack_name': data.get('stack_name'),
'timeout_mins': data.get('timeout_mins'),
'disable_rollback': not(data.get('enable_rollback')),
'parameters': dict(params_list),
'password': data.get('password')
}
# if the user went directly to this form, resubmit the existing
# template data. otherwise, submit what they had from the first form
if data.get('template_data'):
fields['template'] = data.get('template_data')
elif data.get('template_url'):
fields['template_url'] = data.get('template_url')
elif data.get('parameters'):
fields['template'] = data.get('parameters')
try:
api.heat.stack_update(self.request, stack_id=stack_id, **fields)
messages.success(request, _("Stack update started."))
return True
except Exception:
exceptions.handle(request)
class PreviewStackForm(CreateStackForm):
class Meta(object):
name = _('Preview Stack Parameters')
def __init__(self, *args, **kwargs):
self.next_view = kwargs.pop('next_view')
super(CreateStackForm, self).__init__(*args, **kwargs)
def handle(self, request, data):
prefix_length = len(self.param_prefix)
params_list = [(k[prefix_length:], v) for (k, v) in six.iteritems(data)
if k.startswith(self.param_prefix)]
fields = {
'stack_name': data.get('stack_name'),
'timeout_mins': data.get('timeout_mins'),
'disable_rollback': not(data.get('enable_rollback')),
'parameters': dict(params_list),
}
if data.get('template_data'):
fields['template'] = data.get('template_data')
else:
fields['template_url'] = data.get('template_url')
if data.get('environment_data'):
fields['environment'] = data.get('environment_data')
try:
stack_preview = api.heat.stack_preview(self.request, **fields)
request.method = 'GET'
return self.next_view.as_view()(request,
stack_preview=stack_preview)
except Exception:
exceptions.handle(request)
| apache-2.0 |
fullfanta/mxnet | python/mxnet/rnn/io.py | 2 | 7353 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable=too-many-arguments, too-many-locals
"""Definition of various recurrent neural network cells."""
from __future__ import print_function
import bisect
import random
import numpy as np
from ..io import DataIter, DataBatch, DataDesc
from .. import ndarray
def encode_sentences(sentences, vocab=None, invalid_label=-1, invalid_key='\n', start_label=0):
"""Encode sentences and (optionally) build a mapping
from string tokens to integer indices. Unknown keys
will be added to vocabulary.
Parameters
----------
sentences : list of list of str
A list of sentences to encode. Each sentence
should be a list of string tokens.
vocab : None or dict of str -> int
Optional input Vocabulary
invalid_label : int, default -1
Index for invalid token, like <end-of-sentence>
invalid_key : str, default '\\n'
Key for invalid token. Use '\\n' for end
of sentence by default.
start_label : int
lowest index.
Returns
-------
result : list of list of int
encoded sentences
vocab : dict of str -> int
result vocabulary
"""
idx = start_label
if vocab is None:
vocab = {invalid_key: invalid_label}
new_vocab = True
else:
new_vocab = False
res = []
for sent in sentences:
coded = []
for word in sent:
if word not in vocab:
assert new_vocab, "Unknown token %s"%word
if idx == invalid_label:
idx += 1
vocab[word] = idx
idx += 1
coded.append(vocab[word])
res.append(coded)
return res, vocab
class BucketSentenceIter(DataIter):
"""Simple bucketing iterator for language model.
The label at each sequence step is the following token
in the sequence.
Parameters
----------
sentences : list of list of int
Encoded sentences.
batch_size : int
Batch size of the data.
invalid_label : int, optional
Key for invalid label, e.g. <end-of-sentence>. The default is -1.
dtype : str, optional
Data type of the encoding. The default data type is 'float32'.
buckets : list of int, optional
Size of the data buckets. Automatically generated if None.
data_name : str, optional
Name of the data. The default name is 'data'.
label_name : str, optional
Name of the label. The default name is 'softmax_label'.
layout : str, optional
Format of data and label. 'NT' means (batch_size, length)
and 'TN' means (length, batch_size).
"""
def __init__(self, sentences, batch_size, buckets=None, invalid_label=-1,
data_name='data', label_name='softmax_label', dtype='float32',
layout='NT'):
super(BucketSentenceIter, self).__init__()
if not buckets:
buckets = [i for i, j in enumerate(np.bincount([len(s) for s in sentences]))
if j >= batch_size]
buckets.sort()
ndiscard = 0
self.data = [[] for _ in buckets]
for i, sent in enumerate(sentences):
buck = bisect.bisect_left(buckets, len(sent))
if buck == len(buckets):
ndiscard += 1
continue
buff = np.full((buckets[buck],), invalid_label, dtype=dtype)
buff[:len(sent)] = sent
self.data[buck].append(buff)
self.data = [np.asarray(i, dtype=dtype) for i in self.data if i]
print("WARNING: discarded %d sentences longer than the largest bucket."%ndiscard)
self.batch_size = batch_size
self.buckets = buckets
self.data_name = data_name
self.label_name = label_name
self.dtype = dtype
self.invalid_label = invalid_label
self.nddata = []
self.ndlabel = []
self.major_axis = layout.find('N')
self.layout = layout
self.default_bucket_key = max(buckets)
if self.major_axis == 0:
self.provide_data = [DataDesc(
name=self.data_name, shape=(batch_size, self.default_bucket_key),
layout=self.layout)]
self.provide_label = [DataDesc(
name=self.label_name, shape=(batch_size, self.default_bucket_key),
layout=self.layout)]
elif self.major_axis == 1:
self.provide_data = [DataDesc(
name=self.data_name, shape=(self.default_bucket_key, batch_size),
layout=self.layout)]
self.provide_label = [DataDesc(
name=self.label_name, shape=(self.default_bucket_key, batch_size),
layout=self.layout)]
else:
raise ValueError("Invalid layout %s: Must by NT (batch major) or TN (time major)")
self.idx = []
for i, buck in enumerate(self.data):
self.idx.extend([(i, j) for j in range(0, len(buck) - batch_size + 1, batch_size)])
self.curr_idx = 0
self.reset()
def reset(self):
"""Resets the iterator to the beginning of the data."""
self.curr_idx = 0
random.shuffle(self.idx)
for buck in self.data:
np.random.shuffle(buck)
self.nddata = []
self.ndlabel = []
for buck in self.data:
label = np.empty_like(buck)
label[:, :-1] = buck[:, 1:]
label[:, -1] = self.invalid_label
self.nddata.append(ndarray.array(buck, dtype=self.dtype))
self.ndlabel.append(ndarray.array(label, dtype=self.dtype))
def next(self):
"""Returns the next batch of data."""
if self.curr_idx == len(self.idx):
raise StopIteration
i, j = self.idx[self.curr_idx]
self.curr_idx += 1
if self.major_axis == 1:
data = self.nddata[i][j:j+self.batch_size].T
label = self.ndlabel[i][j:j+self.batch_size].T
else:
data = self.nddata[i][j:j+self.batch_size]
label = self.ndlabel[i][j:j+self.batch_size]
return DataBatch([data], [label], pad=0,
bucket_key=self.buckets[i],
provide_data=[DataDesc(
name=self.data_name, shape=data.shape,
layout=self.layout)],
provide_label=[DataDesc(
name=self.label_name, shape=label.shape,
layout=self.layout)])
| apache-2.0 |
jic-dtool/dtool-create | setup.py | 1 | 1165 | from setuptools import setup
url = "https://github.com/jic-dtool/dtool-create"
version = "0.23.4"
readme = open('README.rst').read()
setup(
name="dtool-create",
packages=["dtool_create"],
package_data={"dtool_create": ["templates/*"]},
version=version,
description="Dtool plugin for creating datasets",
long_description=readme,
include_package_data=True,
author="Tjelvar Olsson",
author_email="tjelvar.olsson@gmail.com",
url=url,
install_requires=[
"click",
"dtoolcore>=3.6",
"dtool_cli>=0.6.0",
"dtool_symlink>=0.2.0",
"dtool_http",
"ruamel.yaml",
],
entry_points={
"dtool.cli": [
"create=dtool_create.dataset:create",
"name=dtool_create.dataset:name",
"readme=dtool_create.dataset:readme",
"add=dtool_create.dataset:add",
"freeze=dtool_create.dataset:freeze",
"copy=dtool_create.dataset:copy",
"cp=dtool_create.dataset:cp",
"publish=dtool_create.publish:publish",
],
},
download_url="{}/tarball/{}".format(url, version),
license="MIT"
)
| mit |
cjellick/agent | tests/tests/cattle/plugins/host_info/main.py | 2 | 1722 | import logging
from tests.cattle.plugins.host_info.memory import MemoryCollector
from tests.cattle.plugins.host_info.os_c import OSCollector
from tests.cattle.plugins.host_info.cpu import CpuCollector
from tests.cattle.plugins.host_info.disk import DiskCollector
from tests.cattle.plugins.host_info.iops import IopsCollector
log = logging.getLogger('host_info')
class HostInfo(object):
def __init__(self, docker_client=None):
self.docker_client = docker_client
self.iops_collector = IopsCollector()
self.collectors = [MemoryCollector(),
OSCollector(self.docker_client),
DiskCollector(self.docker_client),
CpuCollector(),
self.iops_collector]
def collect_data(self):
data = {}
for collector in self.collectors:
try:
data[collector.key_name()] = collector.get_data()
except:
log.exception(
"Error collecting {0} stats".format(collector.key_name()))
data[collector.key_name()] = {}
return data
def host_labels(self, label_pfx="io.rancher.host"):
labels = {}
for collector in self.collectors:
try:
get_labels = getattr(collector, "get_labels", None)
if callable(get_labels):
labels.update(get_labels(label_pfx))
except:
log.exception(
"Error getting {0} labels".format(collector.key_name()))
return labels if len(labels) > 0 else None
def get_default_disk(self):
return self.iops_collector.get_default_disk()
| apache-2.0 |
Flexget/Flexget | setup.py | 1 | 2371 | import sys
from pathlib import Path
from typing import List
from setuptools import find_packages, setup
long_description = Path('README.rst').read_text()
# Populates __version__ without importing the package
__version__ = None
with open('flexget/_version.py', encoding='utf-8') as ver_file:
exec(ver_file.read()) # pylint: disable=W0122
if not __version__:
print('Could not find __version__ from flexget/_version.py')
sys.exit(1)
def load_requirements(filename: str) -> List[str]:
return [
line.strip()
for line in Path(filename).read_text().splitlines()
if not line.startswith('#')
]
setup(
name='FlexGet',
version=__version__,
description='FlexGet is a program aimed to automate downloading or processing content (torrents, podcasts, etc.) '
'from different sources like RSS-feeds, html-pages, various sites and more.',
long_description=long_description,
long_description_content_type='text/x-rst',
author='Marko Koivusalo',
author_email='marko.koivusalo@gmail.com',
license='MIT',
url='https://flexget.com',
project_urls={
'Repository': 'https://github.com/Flexget/Flexget',
'Issue Tracker': 'https://github.com/Flexget/Flexget/issues',
'Forum': 'https://discuss.flexget.com',
},
packages=find_packages(exclude=['flexget.tests']),
include_package_data=True,
zip_safe=False,
install_requires=load_requirements('requirements.txt'),
tests_require=['pytest'],
extras_require={'dev': load_requirements('dev-requirements.txt')},
entry_points={
'console_scripts': ['flexget = flexget:main'],
'gui_scripts': [
'flexget-headless = flexget:main'
], # This is useful on Windows to avoid a cmd popup
},
python_requires='>=3.6',
classifiers=[
"Development Status :: 5 - Production/Stable",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
],
)
| mit |
probablytom/tomwallis.net | venv/lib/python2.7/site-packages/django/core/mail/backends/smtp.py | 43 | 4723 | """SMTP email backend class."""
import smtplib
import ssl
import threading
from django.conf import settings
from django.core.mail.backends.base import BaseEmailBackend
from django.core.mail.utils import DNS_NAME
from django.core.mail.message import sanitize_address
class EmailBackend(BaseEmailBackend):
"""
A wrapper that manages the SMTP network connection.
"""
def __init__(self, host=None, port=None, username=None, password=None,
use_tls=None, fail_silently=False, use_ssl=None, timeout=None,
**kwargs):
super(EmailBackend, self).__init__(fail_silently=fail_silently)
self.host = host or settings.EMAIL_HOST
self.port = port or settings.EMAIL_PORT
self.username = settings.EMAIL_HOST_USER if username is None else username
self.password = settings.EMAIL_HOST_PASSWORD if password is None else password
self.use_tls = settings.EMAIL_USE_TLS if use_tls is None else use_tls
self.use_ssl = settings.EMAIL_USE_SSL if use_ssl is None else use_ssl
self.timeout = timeout
if self.use_ssl and self.use_tls:
raise ValueError(
"EMAIL_USE_TLS/EMAIL_USE_SSL are mutually exclusive, so only set "
"one of those settings to True.")
self.connection = None
self._lock = threading.RLock()
def open(self):
"""
Ensures we have a connection to the email server. Returns whether or
not a new connection was required (True or False).
"""
if self.connection:
# Nothing to do if the connection is already open.
return False
connection_class = smtplib.SMTP_SSL if self.use_ssl else smtplib.SMTP
# If local_hostname is not specified, socket.getfqdn() gets used.
# For performance, we use the cached FQDN for local_hostname.
connection_params = {'local_hostname': DNS_NAME.get_fqdn()}
if self.timeout is not None:
connection_params['timeout'] = self.timeout
try:
self.connection = connection_class(self.host, self.port, **connection_params)
# TLS/SSL are mutually exclusive, so only attempt TLS over
# non-secure connections.
if not self.use_ssl and self.use_tls:
self.connection.ehlo()
self.connection.starttls()
self.connection.ehlo()
if self.username and self.password:
self.connection.login(self.username, self.password)
return True
except smtplib.SMTPException:
if not self.fail_silently:
raise
def close(self):
"""Closes the connection to the email server."""
if self.connection is None:
return
try:
try:
self.connection.quit()
except (ssl.SSLError, smtplib.SMTPServerDisconnected):
# This happens when calling quit() on a TLS connection
# sometimes, or when the connection was already disconnected
# by the server.
self.connection.close()
except smtplib.SMTPException:
if self.fail_silently:
return
raise
finally:
self.connection = None
def send_messages(self, email_messages):
"""
Sends one or more EmailMessage objects and returns the number of email
messages sent.
"""
if not email_messages:
return
with self._lock:
new_conn_created = self.open()
if not self.connection:
# We failed silently on open().
# Trying to send would be pointless.
return
num_sent = 0
for message in email_messages:
sent = self._send(message)
if sent:
num_sent += 1
if new_conn_created:
self.close()
return num_sent
def _send(self, email_message):
"""A helper method that does the actual sending."""
if not email_message.recipients():
return False
from_email = sanitize_address(email_message.from_email, email_message.encoding)
recipients = [sanitize_address(addr, email_message.encoding)
for addr in email_message.recipients()]
message = email_message.message()
try:
self.connection.sendmail(from_email, recipients, message.as_bytes(linesep='\r\n'))
except smtplib.SMTPException:
if not self.fail_silently:
raise
return False
return True
| artistic-2.0 |
kbc-developers/android_kernel_htc_msm8960 | Documentation/networking/cxacru-cf.py | 14668 | 1626 | #!/usr/bin/env python
# Copyright 2009 Simon Arlott
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Usage: cxacru-cf.py < cxacru-cf.bin
# Output: values string suitable for the sysfs adsl_config attribute
#
# Warning: cxacru-cf.bin with MD5 hash cdbac2689969d5ed5d4850f117702110
# contains mis-aligned values which will stop the modem from being able
# to make a connection. If the first and last two bytes are removed then
# the values become valid, but the modulation will be forced to ANSI
# T1.413 only which may not be appropriate.
#
# The original binary format is a packed list of le32 values.
import sys
import struct
i = 0
while True:
buf = sys.stdin.read(4)
if len(buf) == 0:
break
elif len(buf) != 4:
sys.stdout.write("\n")
sys.stderr.write("Error: read {0} not 4 bytes\n".format(len(buf)))
sys.exit(1)
if i > 0:
sys.stdout.write(" ")
sys.stdout.write("{0:x}={1}".format(i, struct.unpack("<I", buf)[0]))
i += 1
sys.stdout.write("\n")
| gpl-2.0 |
methane/logbook | logbook/notifiers.py | 2 | 9892 | # -*- coding: utf-8 -*-
"""
logbook.notifiers
~~~~~~~~~~~~~~~~~
System notify handlers for OSX and Linux.
:copyright: (c) 2010 by Armin Ronacher, Christopher Grebs.
:license: BSD, see LICENSE for more details.
"""
import os
import sys
import base64
from time import time
from logbook.base import NOTSET, ERROR, WARNING
from logbook.handlers import Handler, LimitingHandlerMixin
from logbook.helpers import get_application_name, PY2, http_client
if PY2:
from urllib import urlencode
else:
from urllib.parse import urlencode
def create_notification_handler(application_name=None, level=NOTSET, icon=None):
"""Creates a handler perfectly fit the current platform. On Linux
systems this creates a :class:`LibNotifyHandler`, on OS X systems it
will create a :class:`GrowlHandler`.
"""
if sys.platform == 'darwin':
return GrowlHandler(application_name, level=level, icon=icon)
return LibNotifyHandler(application_name, level=level, icon=icon)
class NotificationBaseHandler(Handler, LimitingHandlerMixin):
"""Baseclass for notification handlers."""
def __init__(self, application_name=None, record_limit=None,
record_delta=None, level=NOTSET, filter=None, bubble=False):
Handler.__init__(self, level, filter, bubble)
LimitingHandlerMixin.__init__(self, record_limit, record_delta)
if application_name is None:
application_name = get_application_name()
self.application_name = application_name
def make_title(self, record):
"""Called to get the title from the record."""
return u('%s: %s') % (record.channel, record.level_name.title())
def make_text(self, record):
"""Called to get the text of the record."""
return record.message
class GrowlHandler(NotificationBaseHandler):
"""A handler that dispatches to Growl. Requires that either growl-py or
py-Growl are installed.
"""
def __init__(self, application_name=None, icon=None, host=None,
password=None, record_limit=None, record_delta=None,
level=NOTSET, filter=None, bubble=False):
NotificationBaseHandler.__init__(self, application_name, record_limit,
record_delta, level, filter, bubble)
# growl is using the deprecated md5 module, but we really don't need
# to see that deprecation warning
from warnings import filterwarnings
filterwarnings(module='Growl', category=DeprecationWarning,
action='ignore')
try:
import Growl
self._growl = Growl
except ImportError:
raise RuntimeError('The growl module is not available. You have '
'to install either growl-py or py-Growl to '
'use the GrowlHandler.')
if icon is not None:
if not os.path.isfile(icon):
raise IOError('Filename to an icon expected.')
icon = self._growl.Image.imageFromPath(icon)
else:
try:
icon = self._growl.Image.imageWithIconForCurrentApplication()
except TypeError:
icon = None
self._notifier = self._growl.GrowlNotifier(
applicationName=self.application_name,
applicationIcon=icon,
notifications=['Notset', 'Debug', 'Info', 'Notice', 'Warning',
'Error', 'Critical'],
hostname=host,
password=password
)
self._notifier.register()
def is_sticky(self, record):
"""Returns `True` if the sticky flag should be set for this record.
The default implementation marks errors and criticals sticky.
"""
return record.level >= ERROR
def get_priority(self, record):
"""Returns the priority flag for Growl. Errors and criticals are
get highest priority (2), warnings get higher priority (1) and the
rest gets 0. Growl allows values between -2 and 2.
"""
if record.level >= ERROR:
return 2
elif record.level == WARNING:
return 1
return 0
def emit(self, record):
if not self.check_delivery(record)[1]:
return
self._notifier.notify(record.level_name.title(),
self.make_title(record),
self.make_text(record),
sticky=self.is_sticky(record),
priority=self.get_priority(record))
class LibNotifyHandler(NotificationBaseHandler):
"""A handler that dispatches to libnotify. Requires pynotify installed.
If `no_init` is set to `True` the initialization of libnotify is skipped.
"""
def __init__(self, application_name=None, icon=None, no_init=False,
record_limit=None, record_delta=None, level=NOTSET,
filter=None, bubble=False):
NotificationBaseHandler.__init__(self, application_name, record_limit,
record_delta, level, filter, bubble)
try:
import pynotify
self._pynotify = pynotify
except ImportError:
raise RuntimeError('The pynotify library is required for '
'the LibNotifyHandler.')
self.icon = icon
if not no_init:
pynotify.init(self.application_name)
def set_notifier_icon(self, notifier, icon):
"""Used to attach an icon on a notifier object."""
try:
from gtk import gdk
except ImportError:
#TODO: raise a warning?
raise RuntimeError('The gtk.gdk module is required to set an icon.')
if icon is not None:
if not isinstance(icon, gdk.Pixbuf):
icon = gdk.pixbuf_new_from_file(icon)
notifier.set_icon_from_pixbuf(icon)
def get_expires(self, record):
"""Returns either EXPIRES_DEFAULT or EXPIRES_NEVER for this record.
The default implementation marks errors and criticals as EXPIRES_NEVER.
"""
pn = self._pynotify
return pn.EXPIRES_NEVER if record.level >= ERROR else pn.EXPIRES_DEFAULT
def get_urgency(self, record):
"""Returns the urgency flag for pynotify. Errors and criticals are
get highest urgency (CRITICAL), warnings get higher priority (NORMAL)
and the rest gets LOW.
"""
pn = self._pynotify
if record.level >= ERROR:
return pn.URGENCY_CRITICAL
elif record.level == WARNING:
return pn.URGENCY_NORMAL
return pn.URGENCY_LOW
def emit(self, record):
if not self.check_delivery(record)[1]:
return
notifier = self._pynotify.Notification(self.make_title(record),
self.make_text(record))
notifier.set_urgency(self.get_urgency(record))
notifier.set_timeout(self.get_expires(record))
self.set_notifier_icon(notifier, self.icon)
notifier.show()
class BoxcarHandler(NotificationBaseHandler):
"""Sends notifications to boxcar.io. Can be forwarded to your iPhone or
other compatible device.
"""
api_url = 'https://boxcar.io/notifications/'
def __init__(self, email, password, record_limit=None, record_delta=None,
level=NOTSET, filter=None, bubble=False):
NotificationBaseHandler.__init__(self, None, record_limit, record_delta,
level, filter, bubble)
self.email = email
self.password = password
def get_screen_name(self, record):
"""Returns the value of the screen name field."""
return record.level_name.title()
def emit(self, record):
if not self.check_delivery(record)[1]:
return
body = urlencode({
'notification[from_screen_name]':
self.get_screen_name(record).encode('utf-8'),
'notification[message]':
self.make_text(record).encode('utf-8'),
'notification[from_remote_service_id]': str(int(time() * 100))
})
con = http_client.HTTPSConnection('boxcar.io')
con.request('POST', '/notifications/', headers={
'Authorization': 'Basic ' +
base64.b64encode((u('%s:%s') %
(self.email, self.password)).encode('utf-8')).strip(),
}, body=body)
con.close()
class NotifoHandler(NotificationBaseHandler):
"""Sends notifications to notifo.com. Can be forwarded to your Desktop,
iPhone, or other compatible device.
"""
def __init__(self, application_name=None, username=None, secret=None,
record_limit=None, record_delta=None, level=NOTSET, filter=None,
bubble=False, hide_level=False):
try:
import notifo
except ImportError:
raise RuntimeError(
'The notifo module is not available. You have '
'to install notifo to use the NotifoHandler.'
)
NotificationBaseHandler.__init__(self, None, record_limit, record_delta,
level, filter, bubble)
self._notifo = notifo
self.application_name = application_name
self.username = username
self.secret = secret
self.hide_level = hide_level
def emit(self, record):
if self.hide_level:
_level_name = None
else:
_level_name = self.level_name
self._notifo.send_notification(self.username, self.secret, None,
record.message, self.application_name,
_level_name, None)
| bsd-3-clause |
diorcety/intellij-community | python/lib/Lib/encodings/hex_codec.py | 528 | 2309 | """ Python 'hex_codec' Codec - 2-digit hex content transfer encoding
Unlike most of the other codecs which target Unicode, this codec
will return Python string objects for both encode and decode.
Written by Marc-Andre Lemburg (mal@lemburg.com).
"""
import codecs, binascii
### Codec APIs
def hex_encode(input,errors='strict'):
""" Encodes the object input and returns a tuple (output
object, length consumed).
errors defines the error handling to apply. It defaults to
'strict' handling which is the only currently supported
error handling for this codec.
"""
assert errors == 'strict'
output = binascii.b2a_hex(input)
return (output, len(input))
def hex_decode(input,errors='strict'):
""" Decodes the object input and returns a tuple (output
object, length consumed).
input must be an object which provides the bf_getreadbuf
buffer slot. Python strings, buffer objects and memory
mapped files are examples of objects providing this slot.
errors defines the error handling to apply. It defaults to
'strict' handling which is the only currently supported
error handling for this codec.
"""
assert errors == 'strict'
output = binascii.a2b_hex(input)
return (output, len(input))
class Codec(codecs.Codec):
def encode(self, input,errors='strict'):
return hex_encode(input,errors)
def decode(self, input,errors='strict'):
return hex_decode(input,errors)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
assert self.errors == 'strict'
return binascii.b2a_hex(input)
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
assert self.errors == 'strict'
return binascii.a2b_hex(input)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='hex',
encode=hex_encode,
decode=hex_decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamwriter=StreamWriter,
streamreader=StreamReader,
)
| apache-2.0 |
sgenoud/scikit-learn | sklearn/mixture/tests/test_gmm.py | 3 | 12260 | import itertools
import unittest
import numpy as np
from numpy.testing import assert_array_equal, assert_array_almost_equal, \
assert_raises
from scipy import stats
from sklearn import mixture
from sklearn.datasets.samples_generator import make_spd_matrix
rng = np.random.RandomState(0)
def test_sample_gaussian():
"""
Test sample generation from mixture.sample_gaussian where covariance
is diagonal, spherical and full
"""
n_features, n_samples = 2, 300
axis = 1
mu = rng.randint(10) * rng.rand(n_features)
cv = (rng.rand(n_features) + 1.0) ** 2
samples = mixture.sample_gaussian(
mu, cv, covariance_type='diag', n_samples=n_samples)
assert np.allclose(samples.mean(axis), mu, atol=1.3)
assert np.allclose(samples.var(axis), cv, atol=1.5)
# the same for spherical covariances
cv = (rng.rand() + 1.0) ** 2
samples = mixture.sample_gaussian(
mu, cv, covariance_type='spherical', n_samples=n_samples)
assert np.allclose(samples.mean(axis), mu, atol=1.5)
assert np.allclose(
samples.var(axis), np.repeat(cv, n_features), atol=1.5)
# and for full covariances
A = rng.randn(n_features, n_features)
cv = np.dot(A.T, A) + np.eye(n_features)
samples = mixture.sample_gaussian(
mu, cv, covariance_type='full', n_samples=n_samples)
assert np.allclose(samples.mean(axis), mu, atol=1.3)
assert np.allclose(np.cov(samples), cv, atol=2.5)
def _naive_lmvnpdf_diag(X, mu, cv):
# slow and naive implementation of lmvnpdf
ref = np.empty((len(X), len(mu)))
stds = np.sqrt(cv)
for i, (m, std) in enumerate(itertools.izip(mu, stds)):
ref[:, i] = np.log(stats.norm.pdf(X, m, std)).sum(axis=1)
return ref
def test_lmvnpdf_diag():
"""
test a slow and naive implementation of lmvnpdf and
compare it to the vectorized version (mixture.lmvnpdf) to test
for correctness
"""
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
cv = (rng.rand(n_components, n_features) + 1.0) ** 2
X = rng.randint(10) * rng.rand(n_samples, n_features)
ref = _naive_lmvnpdf_diag(X, mu, cv)
lpr = mixture.log_multivariate_normal_density(X, mu, cv, 'diag')
assert_array_almost_equal(lpr, ref)
def test_lmvnpdf_spherical():
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
spherecv = rng.rand(n_components, 1) ** 2 + 1
X = rng.randint(10) * rng.rand(n_samples, n_features)
cv = np.tile(spherecv, (n_features, 1))
reference = _naive_lmvnpdf_diag(X, mu, cv)
lpr = mixture.log_multivariate_normal_density(X, mu, spherecv,
'spherical')
assert_array_almost_equal(lpr, reference)
def test_lmvnpdf_full():
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
cv = (rng.rand(n_components, n_features) + 1.0) ** 2
X = rng.randint(10) * rng.rand(n_samples, n_features)
fullcv = np.array([np.diag(x) for x in cv])
reference = _naive_lmvnpdf_diag(X, mu, cv)
lpr = mixture.log_multivariate_normal_density(X, mu, fullcv, 'full')
assert_array_almost_equal(lpr, reference)
def test_GMM_attributes():
n_components, n_features = 10, 4
covariance_type = 'diag'
g = mixture.GMM(n_components, covariance_type, random_state=rng)
weights = rng.rand(n_components)
weights = weights / weights.sum()
means = rng.randint(-20, 20, (n_components, n_features))
assert g.n_components == n_components
assert g.covariance_type == covariance_type
g.weights_ = weights
assert_array_almost_equal(g.weights_, weights)
g.means_ = means
assert_array_almost_equal(g.means_, means)
covars = (0.1 + 2 * rng.rand(n_components, n_features)) ** 2
g.covars_ = covars
assert_array_almost_equal(g.covars_, covars)
assert_raises(ValueError, g._set_covars, [])
assert_raises(ValueError, g._set_covars,
np.zeros((n_components - 2, n_features)))
assert_raises(ValueError, mixture.GMM, n_components=20,
covariance_type='badcovariance_type')
class GMMTester():
do_test_eval = True
def _setUp(self):
self.n_components = 10
self.n_features = 4
self.weights = rng.rand(self.n_components)
self.weights = self.weights / self.weights.sum()
self.means = rng.randint(-20, 20, (self.n_components, self.n_features))
self.threshold = -0.5
self.I = np.eye(self.n_features)
self.covars = {'spherical': (0.1 + 2 * \
rng.rand(self.n_components, self.n_features)) ** 2,
'tied': make_spd_matrix(self.n_features, random_state=0) +\
5 * self.I,
'diag': (0.1 + 2 * rng.rand(self.n_components,\
self.n_features)) ** 2,
'full': np.array([make_spd_matrix(self.n_features,\
random_state=0)
+ 5 * self.I for x in range(self.n_components)])}
def test_eval(self):
if not self.do_test_eval:
return # DPGMM does not support setting the means and
# covariances before fitting There is no way of fixing this
# due to the variational parameters being more expressive than
# covariance matrices
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type, random_state=rng)
# Make sure the means are far apart so responsibilities.argmax()
# picks the actual component used to generate the observations.
g.means_ = 20 * self.means
g.covars_ = self.covars[self.covariance_type]
g.weights_ = self.weights
gaussidx = np.repeat(range(self.n_components), 5)
n_samples = len(gaussidx)
X = rng.randn(n_samples, self.n_features) + g.means_[gaussidx]
ll, responsibilities = g.eval(X)
self.assertEqual(len(ll), n_samples)
self.assertEqual(responsibilities.shape,
(n_samples, self.n_components))
assert_array_almost_equal(responsibilities.sum(axis=1),
np.ones(n_samples))
assert_array_equal(responsibilities.argmax(axis=1), gaussidx)
def test_sample(self, n=100):
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type, random_state=rng)
# Make sure the means are far apart so responsibilities.argmax()
# picks the actual component used to generate the observations.
g.means_ = 20 * self.means
g.covars_ = np.maximum(self.covars[self.covariance_type], 0.1)
g.weights_ = self.weights
samples = g.sample(n)
self.assertEquals(samples.shape, (n, self.n_features))
def test_train(self, params='wmc'):
g = mixture.GMM(n_components=self.n_components,
covariance_type=self.covariance_type)
g.weights_ = self.weights
g.means_ = self.means
g.covars_ = 20 * self.covars[self.covariance_type]
# Create a training set by sampling from the predefined distribution.
X = g.sample(n_samples=100)
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-1,
n_iter=1, init_params=params)
g.fit(X)
# Do one training iteration at a time so we can keep track of
# the log likelihood to make sure that it increases after each
# iteration.
trainll = []
for iter in xrange(5):
g.params = params
g.init_params = ''
g.fit(X)
trainll.append(self.score(g, X))
g.n_iter = 10
g.init_params = ''
g.params = params
g.fit(X) # finish fitting
# Note that the log likelihood will sometimes decrease by a
# very small amount after it has more or less converged due to
# the addition of min_covar to the covariance (to prevent
# underflow). This is why the threshold is set to -0.5
# instead of 0.
delta_min = np.diff(trainll).min()
self.assertTrue(
delta_min > self.threshold,
"The min nll increase is %f which is lower than the admissible"
" threshold of %f, for model %s. The likelihoods are %s."
% (delta_min, self.threshold, self.covariance_type, trainll))
def test_train_degenerate(self, params='wmc'):
""" Train on degenerate data with 0 in some dimensions
"""
# Create a training set by sampling from the predefined distribution.
X = rng.randn(100, self.n_features)
X.T[1:] = 0
g = self.model(n_components=2, covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-3, n_iter=5,
init_params=params)
g.fit(X)
trainll = g.score(X)
self.assertTrue(np.sum(np.abs(trainll / 100 / X.shape[1])) < 5)
def test_train_1d(self, params='wmc'):
""" Train on 1-D data
"""
# Create a training set by sampling from the predefined distribution.
X = rng.randn(100, 1)
#X.T[1:] = 0
g = self.model(n_components=2, covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-7, n_iter=5,
init_params=params)
g.fit(X)
trainll = g.score(X)
if isinstance(g, mixture.DPGMM):
self.assertTrue(np.sum(np.abs(trainll / 100)) < 5)
else:
self.assertTrue(np.sum(np.abs(trainll / 100)) < 2)
def score(self, g, X):
return g.score(X).sum()
class TestGMMWithSphericalCovars(unittest.TestCase, GMMTester):
covariance_type = 'spherical'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithDiagonalCovars(unittest.TestCase, GMMTester):
covariance_type = 'diag'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithTiedCovars(unittest.TestCase, GMMTester):
covariance_type = 'tied'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithFullCovars(unittest.TestCase, GMMTester):
covariance_type = 'full'
model = mixture.GMM
setUp = GMMTester._setUp
def test_multiple_init():
"""Test that multiple inits does not much worse than a single one"""
X = rng.randn(30, 5)
X[:10] += 2
g = mixture.GMM(n_components=2, covariance_type='spherical',
random_state=rng, min_covar=1e-7, n_iter=5)
train1 = g.fit(X).score(X).sum()
g.n_init = 5
train2 = g.fit(X).score(X).sum()
assert train2 >= train1 - 1.e-2
def test_n_parameters():
"""Test that the right number of parameters is estimated"""
n_samples, n_dim, n_components = 7, 5, 2
X = rng.randn(n_samples, n_dim)
n_params = {'spherical': 13, 'diag': 21, 'tied': 26, 'full': 41}
for cv_type in ['full', 'tied', 'diag', 'spherical']:
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7, n_iter=1)
g.fit(X)
assert g._n_parameters() == n_params[cv_type]
def test_aic():
""" Test the aic and bic criteria"""
n_samples, n_dim, n_components = 50, 3, 2
X = rng.randn(n_samples, n_dim)
SGH = 0.5 * (X.var() + np.log(2 * np.pi)) # standard gaussian entropy
for cv_type in ['full', 'tied', 'diag', 'spherical']:
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7)
g.fit(X)
aic = 2 * n_samples * SGH * n_dim + 2 * g._n_parameters()
bic = (2 * n_samples * SGH * n_dim +
np.log(n_samples) * g._n_parameters())
bound = n_dim * 3. / np.sqrt(n_samples)
assert np.abs(g.aic(X) - aic) / n_samples < bound
assert np.abs(g.bic(X) - bic) / n_samples < bound
if __name__ == '__main__':
import nose
nose.runmodule()
| bsd-3-clause |
katarzynamazur/esxi_scripts | copy_register_vms/lifesaver.py | 1 | 1457 | #!/usr/bin/env python3
import sys
def help():
print("\nUsage:\n\t./{0} {1} {2}\n\n".format(sys.argv[0], "change_from", "change_to"))
if __name__ == "__main__":
if len(sys.argv) != 4 :
help()
else:
chgfrom = str(sys.argv[1])
chgto = str(sys.argv[2])
snapnum = str(sys.argv[3])
infile = "%s_0.vmdk" % (chgfrom)
outfile = "%s_0.vmdk" % (chgto)
with open(infile, 'r',encoding='utf-8', errors='ignore') as inf, open(outfile, 'w') as outf :
try:
for line in inf :
line = line.replace('%s_0-flat.vmdk', "%s_0-flat.vmdk" % (chgfrom, chgto))
outf.write(line)
except Exception:
pass
infiles = ['%s-Snapshot%s.vmsn' % (chgfrom, snapnum), '%s.vmx' % (chgfrom), '%s_0-000001.vmdk' % (chgfrom), '%s_0.vmdk' % (chgfrom), '%s.vmsd' % (chgfrom)]
outfiles = ['%s-Snapshot%s.vmsn'% (chgto, snapnum), '%s.vmx'% (chgto), '%s_0-000001.vmdk'% (chgto), '%s_0.vmdk'% (chgto), '%s.vmsd'% (chgto)]
for infile, outfile in zip(infiles, outfiles) :
with open(infile, 'r',encoding='utf-8', errors='ignore') as inf, open(outfile, 'w') as outf :
try:
for line in inf :
line = line.replace('%s' % chgfrom, '%s' % chgto)
outf.write(line)
except Exception:
pass
| gpl-3.0 |
OpenMined/PySyft | packages/grid/apps/domain/src/main/core/manager/user_manager.py | 2 | 4499 | # stdlib
from typing import Dict
from typing import List
from typing import Union
# third party
from bcrypt import checkpw
from bcrypt import gensalt
from bcrypt import hashpw
# grid relative
from ..database.users.user import User
from ..exceptions import AuthorizationError
from ..exceptions import InvalidCredentialsError
from ..exceptions import UserNotFoundError
from .database_manager import DatabaseManager
from .role_manager import RoleManager
class UserManager(DatabaseManager):
schema = User
def __init__(self, database):
self._schema = UserManager.schema
self.roles = RoleManager(database)
self.db = database
@property
def common_users(self) -> list:
common_users = []
for role in self.roles.common_roles:
common_users = common_users + list(super().query(role=role.id))
return common_users
@property
def org_users(self) -> list:
org_users = []
for role in self.roles.org_roles:
org_users = org_users + list(super().query(role=role.id))
return org_users
def signup(
self, email: str, password: str, role: int, private_key: str, verify_key: str
):
salt, hashed = self.__salt_and_hash_password(password, 12)
return self.register(
email=email,
role=role,
private_key=private_key,
verify_key=verify_key,
hashed_password=hashed,
salt=salt,
)
def query(self, **kwargs) -> Union[None, List]:
results = super().query(**kwargs)
if len(results) == 0:
raise UserNotFoundError
return results
def first(self, **kwargs) -> Union[None, User]:
result = super().first(**kwargs)
if not result:
raise UserNotFoundError
return result
def login(self, email: str, password: str) -> User:
return self.__login_validation(email, password)
def set(
self,
user_id: str,
email: str = None,
password: str = None,
role: int = 0,
) -> None:
if not self.contain(id=user_id):
raise UserNotFoundError
if email:
key = "email"
value = email
elif password:
salt, hashed = self.__salt_and_hash_password(password, 12)
self.modify({"id": user_id}, {"salt": salt, "hashed_password": hashed})
return
elif role != 0:
key = "role"
value = role
else:
raise Exception
self.modify({"id": user_id}, {key: value})
def can_create_users(self, user_id: str) -> bool:
role = self.role(user_id=user_id)
if role:
return role.can_create_users
else:
return False
def can_upload_data(self, user_id: str) -> bool:
role = self.role(user_id=user_id)
if role:
return role.can_upload_data
else:
return False
def can_triage_requests(self, user_id: str) -> bool:
return self.role(user_id=user_id).can_triage_requests
def can_manage_infrastructure(self, user_id: str) -> bool:
return self.role(user_id=user_id).can_manage_infrastructure
def can_edit_roles(self, user_id: str) -> bool:
return self.role(user_id=user_id).can_edit_roles
def can_create_groups(self, user_id: str) -> bool:
return self.role(user_id=user_id).can_create_groups
def role(self, user_id: int):
try:
user = self.first(id=user_id)
return self.roles.first(id=user.role)
except UserNotFoundError:
return False
def __login_validation(self, email: str, password: str) -> bool:
try:
user = self.first(email=email)
hashed = user.hashed_password.encode("UTF-8")
salt = user.salt.encode("UTF-8")
password = password.encode("UTF-8")
if checkpw(password, salt + hashed):
return user
else:
raise InvalidCredentialsError
except UserNotFoundError:
raise InvalidCredentialsError
def __salt_and_hash_password(self, password, rounds):
password = password.encode("UTF-8")
salt = gensalt(rounds=rounds)
hashed = hashpw(password, salt)
hashed = hashed[len(salt) :]
hashed = hashed.decode("UTF-8")
salt = salt.decode("UTF-8")
return salt, hashed
| apache-2.0 |
rukin5197/android_kernel_lge_m3s | tools/perf/scripts/python/syscall-counts.py | 944 | 1429 | # system call counts
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
usage = "perf trace -s syscall-counts.py [comm]\n";
for_comm = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
pass
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
print "%-40d %10d\n" % (id, val),
| gpl-2.0 |
crazy-canux/xplugin_nagios | plugin/plugins/exchange_2010/src/check_exchange_mounts.py | 1 | 1410 | #!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
# Copyright (C) Canux CHENG <canuxcheng@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
# OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Plugin that checks the Exchange Mailboxes servers MOUNTPOINTS."""
import plugin
from plugin.mounts import PluginXMLMounts
PluginXMLMounts(
version=plugin.version,
description='Check Mailboxes servers Mountpoints of Exchange 2010.'
).run()
| gpl-2.0 |
Tithen-Firion/youtube-dl | youtube_dl/extractor/unistra.py | 87 | 2158 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import qualities
class UnistraIE(InfoExtractor):
_VALID_URL = r'https?://utv\.unistra\.fr/(?:index|video)\.php\?id_video\=(?P<id>\d+)'
_TESTS = [
{
'url': 'http://utv.unistra.fr/video.php?id_video=154',
'md5': '736f605cfdc96724d55bb543ab3ced24',
'info_dict': {
'id': '154',
'ext': 'mp4',
'title': 'M!ss Yella',
'description': 'md5:104892c71bd48e55d70b902736b81bbf',
},
},
{
'url': 'http://utv.unistra.fr/index.php?id_video=437',
'md5': '1ddddd6cccaae76f622ce29b8779636d',
'info_dict': {
'id': '437',
'ext': 'mp4',
'title': 'Prix Louise Weiss 2014',
'description': 'md5:cc3a8735f079f4fb6b0b570fc10c135a',
},
}
]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
webpage = self._download_webpage(url, video_id)
files = set(re.findall(r'file\s*:\s*"(/[^"]+)"', webpage))
quality = qualities(['SD', 'HD'])
formats = []
for file_path in files:
format_id = 'HD' if file_path.endswith('-HD.mp4') else 'SD'
formats.append({
'url': 'http://vod-flash.u-strasbg.fr:8080%s' % file_path,
'format_id': format_id,
'quality': quality(format_id)
})
self._sort_formats(formats)
title = self._html_search_regex(
r'<title>UTV - (.*?)</', webpage, 'title')
description = self._html_search_regex(
r'<meta name="Description" content="(.*?)"', webpage, 'description', flags=re.DOTALL)
thumbnail = self._search_regex(
r'image: "(.*?)"', webpage, 'thumbnail')
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'formats': formats
}
| unlicense |
pyecs/servo | components/script/dom/bindings/codegen/parser/tests/test_treatNonCallableAsNull.py | 106 | 1379 | import WebIDL
def WebIDLTest(parser, harness):
parser.parse("""
callback Function = any(any... arguments);
interface TestTreatNonCallableAsNull1 {
[TreatNonCallableAsNull] attribute Function? onfoo;
attribute Function? onbar;
};
""")
results = parser.finish()
iface = results[1]
attr = iface.members[0]
harness.check(attr.type.treatNonCallableAsNull(), True, "Got the expected value")
attr = iface.members[1]
harness.check(attr.type.treatNonCallableAsNull(), False, "Got the expected value")
parser = parser.reset()
threw = False
try:
parser.parse("""
callback Function = any(any... arguments);
interface TestTreatNonCallableAsNull2 {
[TreatNonCallableAsNull] attribute Function onfoo;
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown.")
parser = parser.reset()
threw = False
try:
parser.parse("""
callback Function = any(any... arguments);
[TreatNonCallableAsNull]
interface TestTreatNonCallableAsNull3 {
attribute Function onfoo;
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown.")
| mpl-2.0 |
Dhivyap/ansible | lib/ansible/plugins/action/slxos_config.py | 13 | 1149 | #
# (c) 2018 Extreme Networks Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
from ansible.plugins.action.network import ActionModule as ActionNetworkModule
PRIVATE_KEYS_RE = re.compile('__.+__')
class ActionModule(ActionNetworkModule):
def run(self, tmp=None, task_vars=None):
del tmp # tmp no longer has any effect
self._config_module = True
return super(ActionModule, self).run(task_vars=task_vars)
| gpl-3.0 |
comocheng/RMG-Py | examples/rmg/liquid_phase/input.py | 2 | 1093 | # Data sources
database(
thermoLibraries = ['primaryThermoLibrary'],
reactionLibraries = [],
seedMechanisms = [],
kineticsDepositories = ['training'],
kineticsFamilies = 'default',
kineticsEstimator = 'rate rules',
)
# Constraints on generated species
generatedSpeciesConstraints(
maximumRadicalElectrons = 3,
)
# List of species
species(
label='octane',
reactive=True,
structure=SMILES("C(CCCCC)CC"),
)
species(
label='oxygen',
reactive=True,
structure=SMILES("[O][O]"),
)
# Reaction systems
liquidReactor(
temperature=(500,'K'),
initialConcentrations={
"octane": (6.154e-3,'mol/cm^3'),
"oxygen": (4.953e-6,'mol/cm^3')
},
terminationTime=(5,'s'),
)
solvation(
solvent='octane'
)
simulator(
atol=1e-16,
rtol=1e-8,
)
model(
toleranceKeepInEdge=1E-9,
toleranceMoveToCore=0.001,
toleranceInterruptSimulation=0.1,
maximumEdgeSpecies=100000
)
options(
units='si',
saveRestartPeriod=None,
drawMolecules=False,
generatePlots=False,
saveSimulationProfiles=True,
)
| mit |
jelugbo/ddi | common/djangoapps/util/tests/test_date_utils.py | 52 | 7723 | # -*- coding: utf-8 -*-
"""
Tests for util.date_utils
"""
from datetime import datetime, timedelta, tzinfo
import unittest
import ddt
from mock import patch
from nose.tools import assert_equals, assert_false # pylint: disable=E0611
from pytz import UTC
from util.date_utils import (
get_default_time_display, get_time_display, almost_same_datetime,
strftime_localized,
)
def test_get_default_time_display():
assert_equals("", get_default_time_display(None))
test_time = datetime(1992, 3, 12, 15, 3, 30, tzinfo=UTC)
assert_equals(
"Mar 12, 1992 at 15:03 UTC",
get_default_time_display(test_time))
def test_get_dflt_time_disp_notz():
test_time = datetime(1992, 3, 12, 15, 3, 30)
assert_equals(
"Mar 12, 1992 at 15:03 UTC",
get_default_time_display(test_time))
def test_get_time_disp_ret_empty():
assert_equals("", get_time_display(None))
test_time = datetime(1992, 3, 12, 15, 3, 30, tzinfo=UTC)
assert_equals("", get_time_display(test_time, ""))
def test_get_time_display():
test_time = datetime(1992, 3, 12, 15, 3, 30, tzinfo=UTC)
assert_equals("dummy text", get_time_display(test_time, 'dummy text'))
assert_equals("Mar 12 1992", get_time_display(test_time, '%b %d %Y'))
assert_equals("Mar 12 1992 UTC", get_time_display(test_time, '%b %d %Y %Z'))
assert_equals("Mar 12 15:03", get_time_display(test_time, '%b %d %H:%M'))
def test_get_time_pass_through():
test_time = datetime(1992, 3, 12, 15, 3, 30, tzinfo=UTC)
assert_equals("Mar 12, 1992 at 15:03 UTC", get_time_display(test_time))
assert_equals("Mar 12, 1992 at 15:03 UTC", get_time_display(test_time, None))
assert_equals("Mar 12, 1992 at 15:03 UTC", get_time_display(test_time, "%"))
def test_get_time_display_coerce():
test_time_standard = datetime(1992, 1, 12, 15, 3, 30, tzinfo=UTC)
test_time_daylight = datetime(1992, 7, 12, 15, 3, 30, tzinfo=UTC)
assert_equals("Jan 12, 1992 at 07:03 PST",
get_time_display(test_time_standard, None, coerce_tz="US/Pacific"))
assert_equals("Jan 12, 1992 at 15:03 UTC",
get_time_display(test_time_standard, None, coerce_tz="NONEXISTENTTZ"))
assert_equals("Jan 12 07:03",
get_time_display(test_time_standard, '%b %d %H:%M', coerce_tz="US/Pacific"))
assert_equals("Jul 12, 1992 at 08:03 PDT",
get_time_display(test_time_daylight, None, coerce_tz="US/Pacific"))
assert_equals("Jul 12, 1992 at 15:03 UTC",
get_time_display(test_time_daylight, None, coerce_tz="NONEXISTENTTZ"))
assert_equals("Jul 12 08:03",
get_time_display(test_time_daylight, '%b %d %H:%M', coerce_tz="US/Pacific"))
# pylint: disable=W0232
class NamelessTZ(tzinfo):
"""Static timezone for testing"""
def utcoffset(self, _dt):
return timedelta(hours=-3)
def dst(self, _dt):
return timedelta(0)
def test_get_default_time_display_no_tzname():
assert_equals("", get_default_time_display(None))
test_time = datetime(1992, 3, 12, 15, 3, 30, tzinfo=NamelessTZ())
assert_equals(
"Mar 12, 1992 at 15:03-0300",
get_default_time_display(test_time))
def test_almost_same_datetime():
assert almost_same_datetime(
datetime(2013, 5, 3, 10, 20, 30),
datetime(2013, 5, 3, 10, 21, 29)
)
assert almost_same_datetime(
datetime(2013, 5, 3, 11, 20, 30),
datetime(2013, 5, 3, 10, 21, 29),
timedelta(hours=1)
)
assert_false(
almost_same_datetime(
datetime(2013, 5, 3, 11, 20, 30),
datetime(2013, 5, 3, 10, 21, 29)
)
)
assert_false(
almost_same_datetime(
datetime(2013, 5, 3, 11, 20, 30),
datetime(2013, 5, 3, 10, 21, 29),
timedelta(minutes=10)
)
)
def fake_ugettext(translations):
"""
Create a fake implementation of ugettext, for testing.
"""
def _ugettext(text): # pylint: disable=missing-docstring
return translations.get(text, text)
return _ugettext
def fake_pgettext(translations):
"""
Create a fake implementation of pgettext, for testing.
"""
def _pgettext(context, text): # pylint: disable=missing-docstring
return translations.get((context, text), text)
return _pgettext
@ddt.ddt
class StrftimeLocalizedTest(unittest.TestCase):
"""
Tests for strftime_localized.
"""
@ddt.data(
("%Y", "2013"),
("%m/%d/%y", "02/14/13"),
("hello", "hello"),
(u'%Y년 %m월 %d일', u"2013년 02월 14일"),
("%a, %b %d, %Y", "Thu, Feb 14, 2013"),
("%I:%M:%S %p", "04:41:17 PM"),
)
def test_usual_strftime_behavior(self, (fmt, expected)):
dtime = datetime(2013, 02, 14, 16, 41, 17)
self.assertEqual(expected, strftime_localized(dtime, fmt))
# strftime doesn't like Unicode, so do the work in UTF8.
self.assertEqual(expected, dtime.strftime(fmt.encode('utf8')).decode('utf8'))
@ddt.data(
("SHORT_DATE", "Feb 14, 2013"),
("LONG_DATE", "Thursday, February 14, 2013"),
("TIME", "04:41:17 PM"),
("%x %X!", "Feb 14, 2013 04:41:17 PM!"),
)
def test_shortcuts(self, (fmt, expected)):
dtime = datetime(2013, 02, 14, 16, 41, 17)
self.assertEqual(expected, strftime_localized(dtime, fmt))
@patch('util.date_utils.pgettext', fake_pgettext(translations={
("abbreviated month name", "Feb"): "XXfebXX",
("month name", "February"): "XXfebruaryXX",
("abbreviated weekday name", "Thu"): "XXthuXX",
("weekday name", "Thursday"): "XXthursdayXX",
("am/pm indicator", "PM"): "XXpmXX",
}))
@ddt.data(
("SHORT_DATE", "XXfebXX 14, 2013"),
("LONG_DATE", "XXthursdayXX, XXfebruaryXX 14, 2013"),
("DATE_TIME", "XXfebXX 14, 2013 at 16:41"),
("TIME", "04:41:17 XXpmXX"),
("%x %X!", "XXfebXX 14, 2013 04:41:17 XXpmXX!"),
)
def test_translated_words(self, (fmt, expected)):
dtime = datetime(2013, 02, 14, 16, 41, 17)
self.assertEqual(expected, strftime_localized(dtime, fmt))
@patch('util.date_utils.ugettext', fake_ugettext(translations={
"SHORT_DATE_FORMAT": "date(%Y.%m.%d)",
"LONG_DATE_FORMAT": "date(%A.%Y.%B.%d)",
"DATE_TIME_FORMAT": "date(%Y.%m.%d@%H.%M)",
"TIME_FORMAT": "%Hh.%Mm.%Ss",
}))
@ddt.data(
("SHORT_DATE", "date(2013.02.14)"),
("Look: %x", "Look: date(2013.02.14)"),
("LONG_DATE", "date(Thursday.2013.February.14)"),
("DATE_TIME", "date(2013.02.14@16.41)"),
("TIME", "16h.41m.17s"),
("The time is: %X", "The time is: 16h.41m.17s"),
("%x %X", "date(2013.02.14) 16h.41m.17s"),
)
def test_translated_formats(self, (fmt, expected)):
dtime = datetime(2013, 02, 14, 16, 41, 17)
self.assertEqual(expected, strftime_localized(dtime, fmt))
@patch('util.date_utils.ugettext', fake_ugettext(translations={
"SHORT_DATE_FORMAT": "oops date(%Y.%x.%d)",
"TIME_FORMAT": "oops %Hh.%Xm.%Ss",
}))
@ddt.data(
("SHORT_DATE", "Feb 14, 2013"),
("TIME", "04:41:17 PM"),
)
def test_recursion_protection(self, (fmt, expected)):
dtime = datetime(2013, 02, 14, 16, 41, 17)
self.assertEqual(expected, strftime_localized(dtime, fmt))
@ddt.data(
"%",
"Hello%"
"%Y/%m/%d%",
)
def test_invalid_format_strings(self, fmt):
dtime = datetime(2013, 02, 14, 16, 41, 17)
with self.assertRaises(ValueError):
strftime_localized(dtime, fmt)
| agpl-3.0 |
MetSystem/shadowsocks | shadowsocks/shell.py | 10 | 12590 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import os
import json
import sys
import getopt
import logging
from shadowsocks.common import to_bytes, to_str, IPNetwork
from shadowsocks import encrypt
VERBOSE_LEVEL = 5
verbose = 0
def check_python():
info = sys.version_info
if info[0] == 2 and not info[1] >= 6:
print('Python 2.6+ required')
sys.exit(1)
elif info[0] == 3 and not info[1] >= 3:
print('Python 3.3+ required')
sys.exit(1)
elif info[0] not in [2, 3]:
print('Python version not supported')
sys.exit(1)
def print_exception(e):
global verbose
logging.error(e)
if verbose > 0:
import traceback
traceback.print_exc()
def print_shadowsocks():
version = ''
try:
import pkg_resources
version = pkg_resources.get_distribution('shadowsocks').version
except Exception:
pass
print('Shadowsocks %s' % version)
def find_config():
config_path = 'config.json'
if os.path.exists(config_path):
return config_path
config_path = os.path.join(os.path.dirname(__file__), '../', 'config.json')
if os.path.exists(config_path):
return config_path
return None
def check_config(config, is_local):
if config.get('daemon', None) == 'stop':
# no need to specify configuration for daemon stop
return
if is_local and not config.get('password', None):
logging.error('password not specified')
print_help(is_local)
sys.exit(2)
if not is_local and not config.get('password', None) \
and not config.get('port_password', None):
logging.error('password or port_password not specified')
print_help(is_local)
sys.exit(2)
if 'local_port' in config:
config['local_port'] = int(config['local_port'])
if 'server_port' in config and type(config['server_port']) != list:
config['server_port'] = int(config['server_port'])
if config.get('local_address', '') in [b'0.0.0.0']:
logging.warn('warning: local set to listen on 0.0.0.0, it\'s not safe')
if config.get('server', '') in ['127.0.0.1', 'localhost']:
logging.warn('warning: server set to listen on %s:%s, are you sure?' %
(to_str(config['server']), config['server_port']))
if (config.get('method', '') or '').lower() == 'table':
logging.warn('warning: table is not safe; please use a safer cipher, '
'like AES-256-CFB')
if (config.get('method', '') or '').lower() == 'rc4':
logging.warn('warning: RC4 is not safe; please use a safer cipher, '
'like AES-256-CFB')
if config.get('timeout', 300) < 100:
logging.warn('warning: your timeout %d seems too short' %
int(config.get('timeout')))
if config.get('timeout', 300) > 600:
logging.warn('warning: your timeout %d seems too long' %
int(config.get('timeout')))
if config.get('password') in [b'mypassword']:
logging.error('DON\'T USE DEFAULT PASSWORD! Please change it in your '
'config.json!')
sys.exit(1)
if config.get('user', None) is not None:
if os.name != 'posix':
logging.error('user can be used only on Unix')
sys.exit(1)
encrypt.try_cipher(config['password'], config['method'])
def get_config(is_local):
global verbose
logging.basicConfig(level=logging.INFO,
format='%(levelname)-s: %(message)s')
if is_local:
shortopts = 'hd:s:b:p:k:l:m:c:t:vq'
longopts = ['help', 'fast-open', 'pid-file=', 'log-file=', 'user=',
'version']
else:
shortopts = 'hd:s:p:k:m:c:t:vq'
longopts = ['help', 'fast-open', 'pid-file=', 'log-file=', 'workers=',
'forbidden-ip=', 'user=', 'manager-address=', 'version']
try:
config_path = find_config()
optlist, args = getopt.getopt(sys.argv[1:], shortopts, longopts)
for key, value in optlist:
if key == '-c':
config_path = value
if config_path:
logging.info('loading config from %s' % config_path)
with open(config_path, 'rb') as f:
try:
config = json.loads(f.read().decode('utf8'),
object_hook=_decode_dict)
except ValueError as e:
logging.error('found an error in config.json: %s',
e.message)
sys.exit(1)
else:
config = {}
v_count = 0
for key, value in optlist:
if key == '-p':
config['server_port'] = int(value)
elif key == '-k':
config['password'] = to_bytes(value)
elif key == '-l':
config['local_port'] = int(value)
elif key == '-s':
config['server'] = to_str(value)
elif key == '-m':
config['method'] = to_str(value)
elif key == '-b':
config['local_address'] = to_str(value)
elif key == '-v':
v_count += 1
# '-vv' turns on more verbose mode
config['verbose'] = v_count
elif key == '-t':
config['timeout'] = int(value)
elif key == '--fast-open':
config['fast_open'] = True
elif key == '--workers':
config['workers'] = int(value)
elif key == '--manager-address':
config['manager_address'] = value
elif key == '--user':
config['user'] = to_str(value)
elif key == '--forbidden-ip':
config['forbidden_ip'] = to_str(value).split(',')
elif key in ('-h', '--help'):
if is_local:
print_local_help()
else:
print_server_help()
sys.exit(0)
elif key == '--version':
print_shadowsocks()
sys.exit(0)
elif key == '-d':
config['daemon'] = to_str(value)
elif key == '--pid-file':
config['pid-file'] = to_str(value)
elif key == '--log-file':
config['log-file'] = to_str(value)
elif key == '-q':
v_count -= 1
config['verbose'] = v_count
except getopt.GetoptError as e:
print(e, file=sys.stderr)
print_help(is_local)
sys.exit(2)
if not config:
logging.error('config not specified')
print_help(is_local)
sys.exit(2)
config['password'] = to_bytes(config.get('password', b''))
config['method'] = to_str(config.get('method', 'aes-256-cfb'))
config['port_password'] = config.get('port_password', None)
config['timeout'] = int(config.get('timeout', 300))
config['fast_open'] = config.get('fast_open', False)
config['workers'] = config.get('workers', 1)
config['pid-file'] = config.get('pid-file', '/var/run/shadowsocks.pid')
config['log-file'] = config.get('log-file', '/var/log/shadowsocks.log')
config['verbose'] = config.get('verbose', False)
config['local_address'] = to_str(config.get('local_address', '127.0.0.1'))
config['local_port'] = config.get('local_port', 1080)
if is_local:
if config.get('server', None) is None:
logging.error('server addr not specified')
print_local_help()
sys.exit(2)
else:
config['server'] = to_str(config['server'])
else:
config['server'] = to_str(config.get('server', '0.0.0.0'))
try:
config['forbidden_ip'] = \
IPNetwork(config.get('forbidden_ip', '127.0.0.0/8,::1/128'))
except Exception as e:
logging.error(e)
sys.exit(2)
config['server_port'] = config.get('server_port', 8388)
logging.getLogger('').handlers = []
logging.addLevelName(VERBOSE_LEVEL, 'VERBOSE')
if config['verbose'] >= 2:
level = VERBOSE_LEVEL
elif config['verbose'] == 1:
level = logging.DEBUG
elif config['verbose'] == -1:
level = logging.WARN
elif config['verbose'] <= -2:
level = logging.ERROR
else:
level = logging.INFO
verbose = config['verbose']
logging.basicConfig(level=level,
format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
check_config(config, is_local)
return config
def print_help(is_local):
if is_local:
print_local_help()
else:
print_server_help()
def print_local_help():
print('''usage: sslocal [OPTION]...
A fast tunnel proxy that helps you bypass firewalls.
You can supply configurations via either config file or command line arguments.
Proxy options:
-c CONFIG path to config file
-s SERVER_ADDR server address
-p SERVER_PORT server port, default: 8388
-b LOCAL_ADDR local binding address, default: 127.0.0.1
-l LOCAL_PORT local port, default: 1080
-k PASSWORD password
-m METHOD encryption method, default: aes-256-cfb
-t TIMEOUT timeout in seconds, default: 300
--fast-open use TCP_FASTOPEN, requires Linux 3.7+
General options:
-h, --help show this help message and exit
-d start/stop/restart daemon mode
--pid-file PID_FILE pid file for daemon mode
--log-file LOG_FILE log file for daemon mode
--user USER username to run as
-v, -vv verbose mode
-q, -qq quiet mode, only show warnings/errors
--version show version information
Online help: <https://github.com/shadowsocks/shadowsocks>
''')
def print_server_help():
print('''usage: ssserver [OPTION]...
A fast tunnel proxy that helps you bypass firewalls.
You can supply configurations via either config file or command line arguments.
Proxy options:
-c CONFIG path to config file
-s SERVER_ADDR server address, default: 0.0.0.0
-p SERVER_PORT server port, default: 8388
-k PASSWORD password
-m METHOD encryption method, default: aes-256-cfb
-t TIMEOUT timeout in seconds, default: 300
--fast-open use TCP_FASTOPEN, requires Linux 3.7+
--workers WORKERS number of workers, available on Unix/Linux
--forbidden-ip IPLIST comma seperated IP list forbidden to connect
--manager-address ADDR optional server manager UDP address, see wiki
General options:
-h, --help show this help message and exit
-d start/stop/restart daemon mode
--pid-file PID_FILE pid file for daemon mode
--log-file LOG_FILE log file for daemon mode
--user USER username to run as
-v, -vv verbose mode
-q, -qq quiet mode, only show warnings/errors
--version show version information
Online help: <https://github.com/shadowsocks/shadowsocks>
''')
def _decode_list(data):
rv = []
for item in data:
if hasattr(item, 'encode'):
item = item.encode('utf-8')
elif isinstance(item, list):
item = _decode_list(item)
elif isinstance(item, dict):
item = _decode_dict(item)
rv.append(item)
return rv
def _decode_dict(data):
rv = {}
for key, value in data.items():
if hasattr(value, 'encode'):
value = value.encode('utf-8')
elif isinstance(value, list):
value = _decode_list(value)
elif isinstance(value, dict):
value = _decode_dict(value)
rv[key] = value
return rv
| apache-2.0 |
vickyting0910/opengeocoding | 2reinter.py | 1 | 3991 | import pandas as pd
import glob
import time
import numpy as num
inter=sorted(glob.glob('*****.csv'))
w='*****.xlsx'
table1=pd.read_excel(w, '*****', index_col=None, na_values=['NA']).fillna(0)
w='*****.csv'
tab=pd.read_csv(w).fillna(0)
tab.is_copy = False
pd.options.mode.chained_assignment = None
t1=time.time()
for i in range(len(tab)):
if tab["IBR"][i]=='9A' or tab["IBR"][i] == '9B' or tab["IBR"][i] == '09A' or tab["IBR"][i] == '09B':
tab["IBR"][i]='9'
if tab["IBR"][i]=='11A' or tab["IBR"][i] == '11B' or tab["IBR"][i]=='11C' or tab["IBR"][i] == '11D' or tab["IBR"][i]=='36B':
tab["IBR"][i]='11'
if tab["IBR"][i]=='36A' or tab["IBR"][i] == '36B':
tab["IBR"][i]='36'
if tab["IBR"][i]=='13A' or tab["IBR"][i] == '13B' or tab["IBR"][i] == '13C':
tab["IBR"][i]='13'
if tab["IBR"][i]=='23A' or tab["IBR"][i] == '23B' or tab["IBR"][i] == '23E' or tab["IBR"][i] == '23F' or tab["IBR"][i] == '23H':
tab["IBR"][i]='23'
if tab["IBR"][i]=='26A' or tab["IBR"][i] == '26B' or tab["IBR"][i] == '26C' or tab["IBR"][i] == '26D' or tab["IBR"][i] == '26E':
tab["IBR"][i]='26'
if tab["IBR"][i]=='35A' or tab["IBR"][i] == '35B':
tab["IBR"][i]='35'
if tab["IBR"][i]=='36A':
tab["IBR"][i]='36'
if tab["IBR"][i]=='39A' or tab["IBR"][i] == '39B' or tab["IBR"][i] == '39C' or tab["IBR"][i] == '39D':
tab["IBR"][i]='39'
if tab["IBR"][i]=='40A' or tab["IBR"][i] == '40B' or tab["IBR"][i] == '40C':
tab["IBR"][i]='40'
if tab["IBR"][i]=='64A' or tab["IBR"][i] == '64B':
tab["IBR"][i]='64'
if tab["IBR"][i]=='90A' or tab["IBR"][i] == '90B' or tab["IBR"][i] == '90C' or tab["IBR"][i] == '90H' or tab["IBR"][i] == '90F' or tab["IBR"][i] == '90G' or tab["IBR"][i]=='90J' or tab["IBR"][i]=='90Z':
tab["IBR"][i]='90'
#convert to string for the join
for i in range(len(table1)):
table1['IBR_code'][i]=str(table1['IBR_code'][i])
description=table1.set_index([ "IBR_code"])
t2=time.time()
print t2-t1
#index crime
tab["index"]=num.nan
for i in range(len(tab)): #convert to integer
tab["index"][i]=tab.index[i]+1
#join
tab=tab.join(description, on=["IBR"], sort=True, rsuffix='_1', how='outer').fillna(0)
tab=tab[(tab["Reported_address"] != 0)].reset_index(drop=True).fillna(0)
tab["IBR_description"]=tab["crime_des12"]
t3=time.time()
print t3-t2
tab=tab[["Global_ID","Reported_address","Incident_date","Incident_time","Report_date","Report_time","Latitude","Longitude","IBR","IBR_description","Police_Department_Code","PD_description","State_Statute_Literal","State_Statute_Number","flag_geocode",'Fdir_n1','Edir_n1','strname_n1','strtype_n1','Enum_n1','Fdir_n2','Edir_n2','strname_n2','strtype_n2','Enum_n2','comname','mroad1','mratio1','wcorr1','wratio1','mroad2','mratio2','wcorr2','wratio2','match']]
tab=tab.replace("",num.nan)
tab=tab.replace("0",num.nan)
tab=tab.replace("00",num.nan)
tab=tab.replace(0,num.nan)
tab.to_csv('*****.csv',index=False)
for i in range(len(tab)):
tab['Global_ID'][i]=str(tab['Global_ID'][i])
description=tab.set_index([ "Global_ID"])
name1=[i[i.find('inter'):i.rfind('C.csv')+1].replace('_matchgeo','') for i in inter]
for p, q in zip((inter), (name1)):
table1=pd.read_csv(p)
for i in range(len(table1)):
tab['Global_ID'][i]=str(tab['Global_ID'][i])
table1=table1.join(description, on=["Global_ID"], sort=True, rsuffix='_1', how='outer').fillna(0)
table1=table1[(table1["Reported_address"] != 0)].reset_index(drop=True).fillna(0)
table1["IBR_description"]=table1["IBR_description_1"]
table1["IBR"]=table1["IBR_1"]
table1=table1[["Global_ID","Reported_address","Incident_date","Incident_time","Report_date","Report_time","Latitude","Longitude","IBR","IBR_description","Police_Department_Code","PD_description","State_Statute_Literal","State_Statute_Number","flag_geocode",'Fdir_n1','Edir_n1','strname_n1','strtype_n1','Enum_n1','Fdir_n2','Edir_n2','strname_n2','strtype_n2','Enum_n2','comname','mroad1','mratio1','wcorr1','wratio1','mroad2','mratio2','wcorr2','wratio2','match']]
table1.to_csv('*****.csv',index=False)
| bsd-2-clause |
sodexis/odoo | addons/purchase/__openerp__.py | 259 | 3787 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Purchase Management',
'version': '1.1',
'category': 'Purchase Management',
'sequence': 19,
'summary': 'Purchase Orders, Receipts, Supplier Invoices',
'description': """
Manage goods requirement by Purchase Orders easily
==================================================
Purchase management enables you to track your suppliers' price quotations and convert them into purchase orders if necessary.
OpenERP has several methods of monitoring invoices and tracking the receipt of ordered goods. You can handle partial deliveries in OpenERP, so you can keep track of items that are still to be delivered in your orders, and you can issue reminders automatically.
OpenERP’s replenishment management rules enable the system to generate draft purchase orders automatically, or you can configure it to run a lean process driven entirely by current production needs.
Dashboard / Reports for Purchase Management will include:
---------------------------------------------------------
* Request for Quotations
* Purchase Orders Waiting Approval
* Monthly Purchases by Category
* Receipt Analysis
* Purchase Analysis
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com/page/purchase',
'depends': ['stock_account', 'report'],
'data': [
'security/purchase_security.xml',
'security/ir.model.access.csv',
'purchase_workflow.xml',
'purchase_sequence.xml',
'company_view.xml',
'purchase_data.xml',
'purchase_data.yml',
'wizard/purchase_order_group_view.xml',
'wizard/purchase_line_invoice_view.xml',
'purchase_report.xml',
'purchase_view.xml',
'stock_view.xml',
'partner_view.xml',
'report/purchase_report_view.xml',
'edi/purchase_order_action_data.xml',
'res_config_view.xml',
'views/report_purchaseorder.xml',
'views/report_purchasequotation.xml',
],
'test': [
'test/ui/purchase_users.yml',
'test/process/run_scheduler.yml',
'test/fifo_price.yml',
'test/fifo_returns.yml',
#'test/costmethodchange.yml',
'test/process/cancel_order.yml',
'test/process/rfq2order2done.yml',
'test/process/generate_invoice_from_reception.yml',
'test/process/merge_order.yml',
'test/process/edi_purchase_order.yml',
'test/process/invoice_on_poline.yml',
'test/ui/duplicate_order.yml',
'test/ui/delete_order.yml',
'test/average_price.yml',
],
'demo': [
'purchase_order_demo.yml',
'purchase_demo.xml',
'purchase_stock_demo.yml',
],
'installable': True,
'auto_install': False,
'application': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
b0ri5/fssw-googlecode | scons/scons-local-2.0.1/SCons/Scanner/LaTeX.py | 61 | 15879 | """SCons.Scanner.LaTeX
This module implements the dependency scanner for LaTeX code.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Scanner/LaTeX.py 5134 2010/08/16 23:02:40 bdeegan"
import os.path
import re
import SCons.Scanner
import SCons.Util
# list of graphics file extensions for TeX and LaTeX
TexGraphics = ['.eps', '.ps']
LatexGraphics = ['.pdf', '.png', '.jpg', '.gif', '.tif']
# Used as a return value of modify_env_var if the variable is not set.
class _Null(object):
pass
_null = _Null
# The user specifies the paths in env[variable], similar to other builders.
# They may be relative and must be converted to absolute, as expected
# by LaTeX and Co. The environment may already have some paths in
# env['ENV'][var]. These paths are honored, but the env[var] paths have
# higher precedence. All changes are un-done on exit.
def modify_env_var(env, var, abspath):
try:
save = env['ENV'][var]
except KeyError:
save = _null
env.PrependENVPath(var, abspath)
try:
if SCons.Util.is_List(env[var]):
env.PrependENVPath(var, [os.path.abspath(str(p)) for p in env[var]])
else:
# Split at os.pathsep to convert into absolute path
env.PrependENVPath(var, [os.path.abspath(p) for p in str(env[var]).split(os.pathsep)])
except KeyError:
pass
# Convert into a string explicitly to append ":" (without which it won't search system
# paths as well). The problem is that env.AppendENVPath(var, ":")
# does not work, refuses to append ":" (os.pathsep).
if SCons.Util.is_List(env['ENV'][var]):
env['ENV'][var] = os.pathsep.join(env['ENV'][var])
# Append the trailing os.pathsep character here to catch the case with no env[var]
env['ENV'][var] = env['ENV'][var] + os.pathsep
return save
class FindENVPathDirs(object):
"""A class to bind a specific *PATH variable name to a function that
will return all of the *path directories."""
def __init__(self, variable):
self.variable = variable
def __call__(self, env, dir=None, target=None, source=None, argument=None):
import SCons.PathList
try:
path = env['ENV'][self.variable]
except KeyError:
return ()
dir = dir or env.fs._cwd
path = SCons.PathList.PathList(path).subst_path(env, target, source)
return tuple(dir.Rfindalldirs(path))
def LaTeXScanner():
"""Return a prototype Scanner instance for scanning LaTeX source files
when built with latex.
"""
ds = LaTeX(name = "LaTeXScanner",
suffixes = '$LATEXSUFFIXES',
# in the search order, see below in LaTeX class docstring
graphics_extensions = TexGraphics,
recursive = 0)
return ds
def PDFLaTeXScanner():
"""Return a prototype Scanner instance for scanning LaTeX source files
when built with pdflatex.
"""
ds = LaTeX(name = "PDFLaTeXScanner",
suffixes = '$LATEXSUFFIXES',
# in the search order, see below in LaTeX class docstring
graphics_extensions = LatexGraphics,
recursive = 0)
return ds
class LaTeX(SCons.Scanner.Base):
"""Class for scanning LaTeX files for included files.
Unlike most scanners, which use regular expressions that just
return the included file name, this returns a tuple consisting
of the keyword for the inclusion ("include", "includegraphics",
"input", or "bibliography"), and then the file name itself.
Based on a quick look at LaTeX documentation, it seems that we
should append .tex suffix for the "include" keywords, append .tex if
there is no extension for the "input" keyword, and need to add .bib
for the "bibliography" keyword that does not accept extensions by itself.
Finally, if there is no extension for an "includegraphics" keyword
latex will append .ps or .eps to find the file, while pdftex may use .pdf,
.jpg, .tif, .mps, or .png.
The actual subset and search order may be altered by
DeclareGraphicsExtensions command. This complication is ignored.
The default order corresponds to experimentation with teTeX
$ latex --version
pdfeTeX 3.141592-1.21a-2.2 (Web2C 7.5.4)
kpathsea version 3.5.4
The order is:
['.eps', '.ps'] for latex
['.png', '.pdf', '.jpg', '.tif'].
Another difference is that the search path is determined by the type
of the file being searched:
env['TEXINPUTS'] for "input" and "include" keywords
env['TEXINPUTS'] for "includegraphics" keyword
env['TEXINPUTS'] for "lstinputlisting" keyword
env['BIBINPUTS'] for "bibliography" keyword
env['BSTINPUTS'] for "bibliographystyle" keyword
FIXME: also look for the class or style in document[class|style]{}
FIXME: also look for the argument of bibliographystyle{}
"""
keyword_paths = {'include': 'TEXINPUTS',
'input': 'TEXINPUTS',
'includegraphics': 'TEXINPUTS',
'bibliography': 'BIBINPUTS',
'bibliographystyle': 'BSTINPUTS',
'usepackage': 'TEXINPUTS',
'lstinputlisting': 'TEXINPUTS'}
env_variables = SCons.Util.unique(list(keyword_paths.values()))
def __init__(self, name, suffixes, graphics_extensions, *args, **kw):
# We have to include \n with the % we exclude from the first part
# part of the regex because the expression is compiled with re.M.
# Without the \n, the ^ could match the beginning of a *previous*
# line followed by one or more newline characters (i.e. blank
# lines), interfering with a match on the next line.
# add option for whitespace before the '[options]' or the '{filename}'
regex = r'^[^%\n]*\\(include|includegraphics(?:\s*\[[^\]]+\])?|lstinputlisting(?:\[[^\]]+\])?|input|bibliography|usepackage)\s*{([^}]*)}'
self.cre = re.compile(regex, re.M)
self.comment_re = re.compile(r'^((?:(?:\\%)|[^%\n])*)(.*)$', re.M)
self.graphics_extensions = graphics_extensions
def _scan(node, env, path=(), self=self):
node = node.rfile()
if not node.exists():
return []
return self.scan_recurse(node, path)
class FindMultiPathDirs(object):
"""The stock FindPathDirs function has the wrong granularity:
it is called once per target, while we need the path that depends
on what kind of included files is being searched. This wrapper
hides multiple instances of FindPathDirs, one per the LaTeX path
variable in the environment. When invoked, the function calculates
and returns all the required paths as a dictionary (converted into
a tuple to become hashable). Then the scan function converts it
back and uses a dictionary of tuples rather than a single tuple
of paths.
"""
def __init__(self, dictionary):
self.dictionary = {}
for k,n in dictionary.items():
self.dictionary[k] = ( SCons.Scanner.FindPathDirs(n),
FindENVPathDirs(n) )
def __call__(self, env, dir=None, target=None, source=None,
argument=None):
di = {}
for k,(c,cENV) in self.dictionary.items():
di[k] = ( c(env, dir=None, target=None, source=None,
argument=None) ,
cENV(env, dir=None, target=None, source=None,
argument=None) )
# To prevent "dict is not hashable error"
return tuple(di.items())
class LaTeXScanCheck(object):
"""Skip all but LaTeX source files, i.e., do not scan *.eps,
*.pdf, *.jpg, etc.
"""
def __init__(self, suffixes):
self.suffixes = suffixes
def __call__(self, node, env):
current = not node.has_builder() or node.is_up_to_date()
scannable = node.get_suffix() in env.subst_list(self.suffixes)[0]
# Returning false means that the file is not scanned.
return scannable and current
kw['function'] = _scan
kw['path_function'] = FindMultiPathDirs(LaTeX.keyword_paths)
kw['recursive'] = 0
kw['skeys'] = suffixes
kw['scan_check'] = LaTeXScanCheck(suffixes)
kw['name'] = name
SCons.Scanner.Base.__init__(self, *args, **kw)
def _latex_names(self, include):
filename = include[1]
if include[0] == 'input':
base, ext = os.path.splitext( filename )
if ext == "":
return [filename + '.tex']
if (include[0] == 'include'):
return [filename + '.tex']
if include[0] == 'bibliography':
base, ext = os.path.splitext( filename )
if ext == "":
return [filename + '.bib']
if include[0] == 'usepackage':
base, ext = os.path.splitext( filename )
if ext == "":
return [filename + '.sty']
if include[0] == 'includegraphics':
base, ext = os.path.splitext( filename )
if ext == "":
#return [filename+e for e in self.graphics_extensions + TexGraphics]
# use the line above to find dependencies for the PDF builder
# when only an .eps figure is present. Since it will be found
# if the user tells scons how to make the pdf figure, leave
# it out for now.
return [filename+e for e in self.graphics_extensions]
return [filename]
def sort_key(self, include):
return SCons.Node.FS._my_normcase(str(include))
def find_include(self, include, source_dir, path):
try:
sub_path = path[include[0]]
except (IndexError, KeyError):
sub_path = ()
try_names = self._latex_names(include)
for n in try_names:
# see if we find it using the path in env[var]
i = SCons.Node.FS.find_file(n, (source_dir,) + sub_path[0])
if i:
return i, include
# see if we find it using the path in env['ENV'][var]
i = SCons.Node.FS.find_file(n, (source_dir,) + sub_path[1])
if i:
return i, include
return i, include
def canonical_text(self, text):
"""Standardize an input TeX-file contents.
Currently:
* removes comments, unwrapping comment-wrapped lines.
"""
out = []
line_continues_a_comment = False
for line in text.splitlines():
line,comment = self.comment_re.findall(line)[0]
if line_continues_a_comment == True:
out[-1] = out[-1] + line.lstrip()
else:
out.append(line)
line_continues_a_comment = len(comment) > 0
return '\n'.join(out).rstrip()+'\n'
def scan(self, node):
# Modify the default scan function to allow for the regular
# expression to return a comma separated list of file names
# as can be the case with the bibliography keyword.
# Cache the includes list in node so we only scan it once:
# path_dict = dict(list(path))
# add option for whitespace (\s) before the '['
noopt_cre = re.compile('\s*\[.*$')
if node.includes != None:
includes = node.includes
else:
text = self.canonical_text(node.get_text_contents())
includes = self.cre.findall(text)
# 1. Split comma-separated lines, e.g.
# ('bibliography', 'phys,comp')
# should become two entries
# ('bibliography', 'phys')
# ('bibliography', 'comp')
# 2. Remove the options, e.g., such as
# ('includegraphics[clip,width=0.7\\linewidth]', 'picture.eps')
# should become
# ('includegraphics', 'picture.eps')
split_includes = []
for include in includes:
inc_type = noopt_cre.sub('', include[0])
inc_list = include[1].split(',')
for j in range(len(inc_list)):
split_includes.append( (inc_type, inc_list[j]) )
#
includes = split_includes
node.includes = includes
return includes
def scan_recurse(self, node, path=()):
""" do a recursive scan of the top level target file
This lets us search for included files based on the
directory of the main file just as latex does"""
path_dict = dict(list(path))
queue = []
queue.extend( self.scan(node) )
seen = {}
# This is a hand-coded DSU (decorate-sort-undecorate, or
# Schwartzian transform) pattern. The sort key is the raw name
# of the file as specifed on the \include, \input, etc. line.
# TODO: what about the comment in the original Classic scanner:
# """which lets
# us keep the sort order constant regardless of whether the file
# is actually found in a Repository or locally."""
nodes = []
source_dir = node.get_dir()
#for include in includes:
while queue:
include = queue.pop()
try:
if seen[include[1]] == 1:
continue
except KeyError:
seen[include[1]] = 1
#
# Handle multiple filenames in include[1]
#
n, i = self.find_include(include, source_dir, path_dict)
if n is None:
# Do not bother with 'usepackage' warnings, as they most
# likely refer to system-level files
if include[0] != 'usepackage':
SCons.Warnings.warn(SCons.Warnings.DependencyWarning,
"No dependency generated for file: %s (included from: %s) -- file not found" % (i, node))
else:
sortkey = self.sort_key(n)
nodes.append((sortkey, n))
# recurse down
queue.extend( self.scan(n) )
return [pair[1] for pair in sorted(nodes)]
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| lgpl-3.0 |
AnthonyBroadCrawford/servo | tests/wpt/harness/wptrunner/metadata.py | 38 | 11541 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import os
import shutil
import sys
import tempfile
import types
import uuid
from collections import defaultdict
from mozlog import reader
from mozlog import structuredlog
import expected
import manifestupdate
import testloader
import wptmanifest
import wpttest
from vcs import git
manifest = None # Module that will be imported relative to test_root
logger = structuredlog.StructuredLogger("web-platform-tests")
def load_test_manifests(serve_root, test_paths):
do_delayed_imports(serve_root)
manifest_loader = testloader.ManifestLoader(test_paths, False)
return manifest_loader.load()
def update_expected(test_paths, serve_root, log_file_names,
rev_old=None, rev_new="HEAD", ignore_existing=False,
sync_root=None):
"""Update the metadata files for web-platform-tests based on
the results obtained in a previous run"""
manifests = load_test_manifests(serve_root, test_paths)
change_data = {}
if sync_root is not None:
if rev_old is not None:
rev_old = git("rev-parse", rev_old, repo=sync_root).strip()
rev_new = git("rev-parse", rev_new, repo=sync_root).strip()
if rev_old is not None:
change_data = load_change_data(rev_old, rev_new, repo=sync_root)
expected_map_by_manifest = update_from_logs(manifests,
*log_file_names,
ignore_existing=ignore_existing)
for test_manifest, expected_map in expected_map_by_manifest.iteritems():
url_base = manifests[test_manifest]["url_base"]
metadata_path = test_paths[url_base]["metadata_path"]
write_changes(metadata_path, expected_map)
results_changed = [item.test_path for item in expected_map.itervalues() if item.modified]
return unexpected_changes(manifests, change_data, results_changed)
def do_delayed_imports(serve_root):
global manifest
from manifest import manifest
def files_in_repo(repo_root):
return git("ls-tree", "-r", "--name-only", "HEAD").split("\n")
def rev_range(rev_old, rev_new, symmetric=False):
joiner = ".." if not symmetric else "..."
return "".join([rev_old, joiner, rev_new])
def paths_changed(rev_old, rev_new, repo):
data = git("diff", "--name-status", rev_range(rev_old, rev_new), repo=repo)
lines = [tuple(item.strip() for item in line.strip().split("\t", 1))
for line in data.split("\n") if line.strip()]
output = set(lines)
return output
def load_change_data(rev_old, rev_new, repo):
changes = paths_changed(rev_old, rev_new, repo)
rv = {}
status_keys = {"M": "modified",
"A": "new",
"D": "deleted"}
# TODO: deal with renames
for item in changes:
rv[item[1]] = status_keys[item[0]]
return rv
def unexpected_changes(manifests, change_data, files_changed):
files_changed = set(files_changed)
root_manifest = None
for manifest, paths in manifests.iteritems():
if paths["url_base"] == "/":
root_manifest = manifest
break
else:
return []
rv = []
return [fn for fn, tests in root_manifest if fn in files_changed and change_data.get(fn) != "M"]
# For each testrun
# Load all files and scan for the suite_start entry
# Build a hash of filename: properties
# For each different set of properties, gather all chunks
# For each chunk in the set of chunks, go through all tests
# for each test, make a map of {conditionals: [(platform, new_value)]}
# Repeat for each platform
# For each test in the list of tests:
# for each conditional:
# If all the new values match (or there aren't any) retain that conditional
# If any new values mismatch mark the test as needing human attention
# Check if all the RHS values are the same; if so collapse the conditionals
def update_from_logs(manifests, *log_filenames, **kwargs):
ignore_existing = kwargs.pop("ignore_existing", False)
expected_map = {}
id_test_map = {}
for test_manifest, paths in manifests.iteritems():
expected_map_manifest, id_path_map_manifest = create_test_tree(paths["metadata_path"],
test_manifest)
expected_map[test_manifest] = expected_map_manifest
id_test_map.update(id_path_map_manifest)
updater = ExpectedUpdater(manifests, expected_map, id_test_map,
ignore_existing=ignore_existing)
for log_filename in log_filenames:
with open(log_filename) as f:
updater.update_from_log(f)
for manifest_expected in expected_map.itervalues():
for tree in manifest_expected.itervalues():
for test in tree.iterchildren():
for subtest in test.iterchildren():
subtest.coalesce_expected()
test.coalesce_expected()
return expected_map
def directory_manifests(metadata_path):
rv = []
for dirpath, dirname, filenames in os.walk(metadata_path):
if "__dir__.ini" in filenames:
rel_path = os.path.relpath(dirpath, metadata_path)
rv.append(os.path.join(rel_path, "__dir__.ini"))
return rv
def write_changes(metadata_path, expected_map):
# First write the new manifest files to a temporary directory
temp_path = tempfile.mkdtemp(dir=os.path.split(metadata_path)[0])
write_new_expected(temp_path, expected_map)
# Keep all __dir__.ini files (these are not in expected_map because they
# aren't associated with a specific test)
keep_files = directory_manifests(metadata_path)
# Copy all files in the root to the temporary location since
# these cannot be ini files
keep_files.extend(item for item in os.listdir(metadata_path) if
not os.path.isdir(os.path.join(metadata_path, item)))
for item in keep_files:
dest_dir = os.path.dirname(os.path.join(temp_path, item))
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
shutil.copyfile(os.path.join(metadata_path, item),
os.path.join(temp_path, item))
# Then move the old manifest files to a new location
temp_path_2 = metadata_path + str(uuid.uuid4())
os.rename(metadata_path, temp_path_2)
# Move the new files to the destination location and remove the old files
os.rename(temp_path, metadata_path)
shutil.rmtree(temp_path_2)
def write_new_expected(metadata_path, expected_map):
# Serialize the data back to a file
for tree in expected_map.itervalues():
if not tree.is_empty:
manifest_str = wptmanifest.serialize(tree.node, skip_empty_data=True)
assert manifest_str != ""
path = expected.expected_path(metadata_path, tree.test_path)
dir = os.path.split(path)[0]
if not os.path.exists(dir):
os.makedirs(dir)
with open(path, "w") as f:
f.write(manifest_str)
class ExpectedUpdater(object):
def __init__(self, test_manifests, expected_tree, id_path_map, ignore_existing=False):
self.test_manifests = test_manifests
self.expected_tree = expected_tree
self.id_path_map = id_path_map
self.ignore_existing = ignore_existing
self.run_info = None
self.action_map = {"suite_start": self.suite_start,
"test_start": self.test_start,
"test_status": self.test_status,
"test_end": self.test_end}
self.tests_visited = {}
self.test_cache = {}
def update_from_log(self, log_file):
self.run_info = None
log_reader = reader.read(log_file)
reader.each_log(log_reader, self.action_map)
def suite_start(self, data):
self.run_info = data["run_info"]
def test_id(self, id):
if type(id) in types.StringTypes:
return id
else:
return tuple(id)
def test_start(self, data):
test_id = self.test_id(data["test"])
try:
test_manifest, test = self.id_path_map[test_id]
expected_node = self.expected_tree[test_manifest][test].get_test(test_id)
except KeyError:
print "Test not found %s, skipping" % test_id
return
self.test_cache[test_id] = expected_node
if test_id not in self.tests_visited:
if self.ignore_existing:
expected_node.clear_expected()
self.tests_visited[test_id] = set()
def test_status(self, data):
test_id = self.test_id(data["test"])
test = self.test_cache.get(test_id)
if test is None:
return
test_cls = wpttest.manifest_test_cls[test.test_type]
subtest = test.get_subtest(data["subtest"])
self.tests_visited[test.id].add(data["subtest"])
result = test_cls.subtest_result_cls(
data["subtest"],
data["status"],
data.get("message"))
subtest.set_result(self.run_info, result)
def test_end(self, data):
test_id = self.test_id(data["test"])
test = self.test_cache.get(test_id)
if test is None:
return
test_cls = wpttest.manifest_test_cls[test.test_type]
if data["status"] == "SKIP":
return
result = test_cls.result_cls(
data["status"],
data.get("message"))
test.set_result(self.run_info, result)
del self.test_cache[test_id]
def create_test_tree(metadata_path, test_manifest):
expected_map = {}
id_test_map = {}
exclude_types = frozenset(["stub", "helper", "manual"])
include_types = set(manifest.item_types) - exclude_types
for test_path, tests in test_manifest.itertypes(*include_types):
expected_data = load_expected(test_manifest, metadata_path, test_path, tests)
if expected_data is None:
expected_data = create_expected(test_manifest, test_path, tests)
for test in tests:
id_test_map[test.id] = (test_manifest, test)
expected_map[test] = expected_data
return expected_map, id_test_map
def create_expected(test_manifest, test_path, tests):
expected = manifestupdate.ExpectedManifest(None, test_path, test_manifest.url_base)
for test in tests:
expected.append(manifestupdate.TestNode.create(test.item_type, test.id))
return expected
def load_expected(test_manifest, metadata_path, test_path, tests):
expected_manifest = manifestupdate.get_manifest(metadata_path,
test_path,
test_manifest.url_base)
if expected_manifest is None:
return
tests_by_id = {item.id: item for item in tests}
# Remove expected data for tests that no longer exist
for test in expected_manifest.iterchildren():
if not test.id in tests_by_id:
test.remove()
# Add tests that don't have expected data
for test in tests:
if not expected_manifest.has_test(test.id):
expected_manifest.append(manifestupdate.TestNode.create(test.item_type, test.id))
return expected_manifest
| mpl-2.0 |
farert/farert | db/scripts/distance_exp.py | 1 | 4515 | #!python3.0.1
# -*- coding: utf-8 -*-
"""
指定路線、駅1から駅2までの営業キロ、計算キロを得る
"""
import sys
import os
import jrdb
import time
t0 = time.time()
sql = """
select
(select max(sales_km) from t_lines where line_id=?1 and (station_id=?2 or station_id=?3))-
(select min(sales_km) from t_lines where line_id=?1 and (station_id=?2 or station_id=?3)),
(select max(calc_km) from t_lines where line_id=?1 and (station_id=?2 or station_id=?3))-
(select min(calc_km) from t_lines where line_id=?1 and (station_id=?2 or station_id=?3)),
case when exists (select * from t_lines
where line_id=?1 and (lflg&(1<<21)!=0) and station_id=?2)
then -1 else
abs((select sales_km from t_lines
where line_id=?1 and (lflg&(1<<21)!=0)
and sales_km>(select min(sales_km) from t_lines where line_id=?1 and (station_id=?2 or station_id=?3))
and sales_km<(select max(sales_km) from t_lines where line_id=?1 and (station_id=?2 or station_id=?3)))-
(select sales_km from t_lines where line_id=?1 and station_id=?2)) end,
case when exists (select * from t_lines
where line_id=?1 and (lflg&(1<<21)!=0) and station_id=?3)
then -1 else
abs((select calc_km from t_lines
where line_id=?1 and (lflg&(1<<21)!=0)
and sales_km>(select min(sales_km) from t_lines where line_id=?1 and (station_id=?2 or station_id=?3))
and sales_km<(select max(sales_km) from t_lines where line_id=?1 and (station_id=?2 or station_id=?3)))-
(select calc_km from t_lines where line_id=?1 and station_id=?2)) end,
((select company_id from t_station where rowid=?2) + (65536 * (select company_id from t_station where rowid=?3))),
((select 2147483648*(1&(lflg>>23)) from t_lines where line_id=?1) +
(select sflg&8191 from t_station where rowid=?2) + (select sflg&8191 from t_station where rowid=?3) * 65536)
"""
# s1 or s2が
# result list
for n in range(100):
for inf in jrdb.sqlexec(sql, [ jrdb.line_id(sys.argv[1]),
jrdb.station_id(sys.argv[2]), jrdb.station_id(sys.argv[3]) ] ):
if n == 0: print(inf[0], inf[1], inf[2], inf[3], inf[4], inf[5])
pass
print("lapse ", time.time() - t0)
# col1 : ?1の?2~?3の営業キロ
# col2 : ?1の?2~?3の計算キロ
# col3 : ?2~境界駅の営業キロ(?2が境界駅なら-1を返す, 境界駅が?2~?3間になければ、Noneを返す
# col4 : ?2~境界駅の計算キロ(?3が境界駅なら-1を返す, 境界駅が?2~?3間になければ、Noneを返す
# 2012-9-2
# 2012-12-21 上のを使用
print("----------------------------------------------------------------------------")
t0 = time.time()
sql = """
select (select max(sales_km) from t_lines where line_id=?1 and (station_id=?2 or station_id=?3))-
(select min(sales_km) from t_lines where line_id=?1 and (station_id=?2 or station_id=?3)),
(select max(calc_km) from t_lines where line_id=?1 and (station_id=?2 or station_id=?3))-
(select min(calc_km) from t_lines where line_id=?1 and (station_id=?2 or station_id=?3)),
abs((select sales_km from t_lines where line_id=?1 and (lflg&(1<<21))!=0)-
(select sales_km from t_lines where line_id=?1 and station_id=?2)),
abs((select calc_km from t_lines where line_id=?1 and (lflg&(1<<21))!=0)-
(select calc_km from t_lines where line_id=?1 and station_id=?2)),
((select company_id from t_station where rowid=?2) + (65536 * (select company_id from t_station where rowid=?3))),
((select 2147483648*(1&(lflg>>23)) from t_lines where line_id=?1) +
(select sflg&8191 from t_station where rowid=?2) + (select sflg&8191 from t_station where rowid=?3) * 65536)
"""
# sales_km, calc_km, sales_km(station1の会社区間), calc_km(station1の会社区間), station1のcompany_id, station2のcompany_id
# bit31: 会社線か否か
# result list
for n in range(500):
for inf in jrdb.sqlexec(sql, [ jrdb.line_id(sys.argv[1]),
jrdb.station_id(sys.argv[2]), jrdb.station_id(sys.argv[3]) ] ):
if n == 0: print(inf[0], inf[1], inf[2], inf[3], inf[4], inf[5])
pass
print("lapse ", time.time() - t0)
"""
3167 新山口
3180 門司 -> sales_km: 752 下関まで689
141 山陽線
12 2
15 2
19 2
22 2
29 2
32 2
40 3
48 3 36 = (40-12) + (48-40)
create table lin(km, cid, f);
insert into lin values(12, 2, 0);
insert into lin values(15, 2, 0);
insert into lin values(19, 2, 0);
insert into lin values(22, 2, 0);
insert into lin values(29, 2, 0);
insert into lin values(32, 2, 0);
insert into lin values(40, 3, 1);
insert into lin values(48, 3, 0);
"""
| gpl-3.0 |
EricMuller/mynotes-backend | requirements/twisted/Twisted-17.1.0/docs/core/examples/simpleclient.py | 2 | 1262 |
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
An example client. Run simpleserv.py first before running this.
"""
from __future__ import print_function
from twisted.internet import reactor, protocol
# a client protocol
class EchoClient(protocol.Protocol):
"""Once connected, send a message, then print the result."""
def connectionMade(self):
self.transport.write("hello, world!")
def dataReceived(self, data):
"As soon as any data is received, write it back."
print("Server said:", data)
self.transport.loseConnection()
def connectionLost(self, reason):
print("connection lost")
class EchoFactory(protocol.ClientFactory):
protocol = EchoClient
def clientConnectionFailed(self, connector, reason):
print("Connection failed - goodbye!")
reactor.stop()
def clientConnectionLost(self, connector, reason):
print("Connection lost - goodbye!")
reactor.stop()
# this connects the protocol to a server running on port 8000
def main():
f = EchoFactory()
reactor.connectTCP("localhost", 8000, f)
reactor.run()
# this only runs if the module was *not* imported
if __name__ == '__main__':
main()
| mit |
yrizk/yrizk.github.io | myvenv/lib/python3.4/site-packages/pip/_vendor/distlib/util.py | 203 | 51453 | #
# Copyright (C) 2012-2014 The Python Software Foundation.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
import codecs
from collections import deque
import contextlib
import csv
from glob import iglob as std_iglob
import io
import json
import logging
import os
import py_compile
import re
import shutil
import socket
import ssl
import subprocess
import sys
import tarfile
import tempfile
try:
import threading
except ImportError:
import dummy_threading as threading
import time
from . import DistlibException
from .compat import (string_types, text_type, shutil, raw_input, StringIO,
cache_from_source, urlopen, httplib, xmlrpclib, splittype,
HTTPHandler, HTTPSHandler as BaseHTTPSHandler,
BaseConfigurator, valid_ident, Container, configparser,
URLError, match_hostname, CertificateError, ZipFile)
logger = logging.getLogger(__name__)
#
# Requirement parsing code for name + optional constraints + optional extras
#
# e.g. 'foo >= 1.2, < 2.0 [bar, baz]'
#
# The regex can seem a bit hairy, so we build it up out of smaller pieces
# which are manageable.
#
COMMA = r'\s*,\s*'
COMMA_RE = re.compile(COMMA)
IDENT = r'(\w|[.-])+'
EXTRA_IDENT = r'(\*|:(\*|\w+):|' + IDENT + ')'
VERSPEC = IDENT + r'\*?'
RELOP = '([<>=!~]=)|[<>]'
#
# The first relop is optional - if absent, will be taken as '~='
#
BARE_CONSTRAINTS = ('(' + RELOP + r')?\s*(' + VERSPEC + ')(' + COMMA + '(' +
RELOP + r')\s*(' + VERSPEC + '))*')
DIRECT_REF = '(from\s+(?P<diref>.*))'
#
# Either the bare constraints or the bare constraints in parentheses
#
CONSTRAINTS = (r'\(\s*(?P<c1>' + BARE_CONSTRAINTS + '|' + DIRECT_REF +
r')\s*\)|(?P<c2>' + BARE_CONSTRAINTS + '\s*)')
EXTRA_LIST = EXTRA_IDENT + '(' + COMMA + EXTRA_IDENT + ')*'
EXTRAS = r'\[\s*(?P<ex>' + EXTRA_LIST + r')?\s*\]'
REQUIREMENT = ('(?P<dn>' + IDENT + r')\s*(' + EXTRAS + r'\s*)?(\s*' +
CONSTRAINTS + ')?$')
REQUIREMENT_RE = re.compile(REQUIREMENT)
#
# Used to scan through the constraints
#
RELOP_IDENT = '(?P<op>' + RELOP + r')\s*(?P<vn>' + VERSPEC + ')'
RELOP_IDENT_RE = re.compile(RELOP_IDENT)
def parse_requirement(s):
def get_constraint(m):
d = m.groupdict()
return d['op'], d['vn']
result = None
m = REQUIREMENT_RE.match(s)
if m:
d = m.groupdict()
name = d['dn']
cons = d['c1'] or d['c2']
if not d['diref']:
url = None
else:
# direct reference
cons = None
url = d['diref'].strip()
if not cons:
cons = None
constr = ''
rs = d['dn']
else:
if cons[0] not in '<>!=':
cons = '~=' + cons
iterator = RELOP_IDENT_RE.finditer(cons)
cons = [get_constraint(m) for m in iterator]
rs = '%s (%s)' % (name, ', '.join(['%s %s' % con for con in cons]))
if not d['ex']:
extras = None
else:
extras = COMMA_RE.split(d['ex'])
result = Container(name=name, constraints=cons, extras=extras,
requirement=rs, source=s, url=url)
return result
def get_resources_dests(resources_root, rules):
"""Find destinations for resources files"""
def get_rel_path(base, path):
# normalizes and returns a lstripped-/-separated path
base = base.replace(os.path.sep, '/')
path = path.replace(os.path.sep, '/')
assert path.startswith(base)
return path[len(base):].lstrip('/')
destinations = {}
for base, suffix, dest in rules:
prefix = os.path.join(resources_root, base)
for abs_base in iglob(prefix):
abs_glob = os.path.join(abs_base, suffix)
for abs_path in iglob(abs_glob):
resource_file = get_rel_path(resources_root, abs_path)
if dest is None: # remove the entry if it was here
destinations.pop(resource_file, None)
else:
rel_path = get_rel_path(abs_base, abs_path)
rel_dest = dest.replace(os.path.sep, '/').rstrip('/')
destinations[resource_file] = rel_dest + '/' + rel_path
return destinations
def in_venv():
if hasattr(sys, 'real_prefix'):
# virtualenv venvs
result = True
else:
# PEP 405 venvs
result = sys.prefix != getattr(sys, 'base_prefix', sys.prefix)
return result
def get_executable():
# The __PYVENV_LAUNCHER__ dance is apparently no longer needed, as
# changes to the stub launcher mean that sys.executable always points
# to the stub on OS X
# if sys.platform == 'darwin' and ('__PYVENV_LAUNCHER__'
# in os.environ):
# result = os.environ['__PYVENV_LAUNCHER__']
# else:
# result = sys.executable
# return result
return sys.executable
def proceed(prompt, allowed_chars, error_prompt=None, default=None):
p = prompt
while True:
s = raw_input(p)
p = prompt
if not s and default:
s = default
if s:
c = s[0].lower()
if c in allowed_chars:
break
if error_prompt:
p = '%c: %s\n%s' % (c, error_prompt, prompt)
return c
def extract_by_key(d, keys):
if isinstance(keys, string_types):
keys = keys.split()
result = {}
for key in keys:
if key in d:
result[key] = d[key]
return result
def read_exports(stream):
if sys.version_info[0] >= 3:
# needs to be a text stream
stream = codecs.getreader('utf-8')(stream)
# Try to load as JSON, falling back on legacy format
data = stream.read()
stream = StringIO(data)
try:
data = json.load(stream)
result = data['extensions']['python.exports']['exports']
for group, entries in result.items():
for k, v in entries.items():
s = '%s = %s' % (k, v)
entry = get_export_entry(s)
assert entry is not None
entries[k] = entry
return result
except Exception:
stream.seek(0, 0)
cp = configparser.ConfigParser()
if hasattr(cp, 'read_file'):
cp.read_file(stream)
else:
cp.readfp(stream)
result = {}
for key in cp.sections():
result[key] = entries = {}
for name, value in cp.items(key):
s = '%s = %s' % (name, value)
entry = get_export_entry(s)
assert entry is not None
#entry.dist = self
entries[name] = entry
return result
def write_exports(exports, stream):
if sys.version_info[0] >= 3:
# needs to be a text stream
stream = codecs.getwriter('utf-8')(stream)
cp = configparser.ConfigParser()
for k, v in exports.items():
# TODO check k, v for valid values
cp.add_section(k)
for entry in v.values():
if entry.suffix is None:
s = entry.prefix
else:
s = '%s:%s' % (entry.prefix, entry.suffix)
if entry.flags:
s = '%s [%s]' % (s, ', '.join(entry.flags))
cp.set(k, entry.name, s)
cp.write(stream)
@contextlib.contextmanager
def tempdir():
td = tempfile.mkdtemp()
try:
yield td
finally:
shutil.rmtree(td)
@contextlib.contextmanager
def chdir(d):
cwd = os.getcwd()
try:
os.chdir(d)
yield
finally:
os.chdir(cwd)
@contextlib.contextmanager
def socket_timeout(seconds=15):
cto = socket.getdefaulttimeout()
try:
socket.setdefaulttimeout(seconds)
yield
finally:
socket.setdefaulttimeout(cto)
class cached_property(object):
def __init__(self, func):
self.func = func
#for attr in ('__name__', '__module__', '__doc__'):
# setattr(self, attr, getattr(func, attr, None))
def __get__(self, obj, cls=None):
if obj is None:
return self
value = self.func(obj)
object.__setattr__(obj, self.func.__name__, value)
#obj.__dict__[self.func.__name__] = value = self.func(obj)
return value
def convert_path(pathname):
"""Return 'pathname' as a name that will work on the native filesystem.
The path is split on '/' and put back together again using the current
directory separator. Needed because filenames in the setup script are
always supplied in Unix style, and have to be converted to the local
convention before we can actually use them in the filesystem. Raises
ValueError on non-Unix-ish systems if 'pathname' either starts or
ends with a slash.
"""
if os.sep == '/':
return pathname
if not pathname:
return pathname
if pathname[0] == '/':
raise ValueError("path '%s' cannot be absolute" % pathname)
if pathname[-1] == '/':
raise ValueError("path '%s' cannot end with '/'" % pathname)
paths = pathname.split('/')
while os.curdir in paths:
paths.remove(os.curdir)
if not paths:
return os.curdir
return os.path.join(*paths)
class FileOperator(object):
def __init__(self, dry_run=False):
self.dry_run = dry_run
self.ensured = set()
self._init_record()
def _init_record(self):
self.record = False
self.files_written = set()
self.dirs_created = set()
def record_as_written(self, path):
if self.record:
self.files_written.add(path)
def newer(self, source, target):
"""Tell if the target is newer than the source.
Returns true if 'source' exists and is more recently modified than
'target', or if 'source' exists and 'target' doesn't.
Returns false if both exist and 'target' is the same age or younger
than 'source'. Raise PackagingFileError if 'source' does not exist.
Note that this test is not very accurate: files created in the same
second will have the same "age".
"""
if not os.path.exists(source):
raise DistlibException("file '%r' does not exist" %
os.path.abspath(source))
if not os.path.exists(target):
return True
return os.stat(source).st_mtime > os.stat(target).st_mtime
def copy_file(self, infile, outfile, check=True):
"""Copy a file respecting dry-run and force flags.
"""
self.ensure_dir(os.path.dirname(outfile))
logger.info('Copying %s to %s', infile, outfile)
if not self.dry_run:
msg = None
if check:
if os.path.islink(outfile):
msg = '%s is a symlink' % outfile
elif os.path.exists(outfile) and not os.path.isfile(outfile):
msg = '%s is a non-regular file' % outfile
if msg:
raise ValueError(msg + ' which would be overwritten')
shutil.copyfile(infile, outfile)
self.record_as_written(outfile)
def copy_stream(self, instream, outfile, encoding=None):
assert not os.path.isdir(outfile)
self.ensure_dir(os.path.dirname(outfile))
logger.info('Copying stream %s to %s', instream, outfile)
if not self.dry_run:
if encoding is None:
outstream = open(outfile, 'wb')
else:
outstream = codecs.open(outfile, 'w', encoding=encoding)
try:
shutil.copyfileobj(instream, outstream)
finally:
outstream.close()
self.record_as_written(outfile)
def write_binary_file(self, path, data):
self.ensure_dir(os.path.dirname(path))
if not self.dry_run:
with open(path, 'wb') as f:
f.write(data)
self.record_as_written(path)
def write_text_file(self, path, data, encoding):
self.ensure_dir(os.path.dirname(path))
if not self.dry_run:
with open(path, 'wb') as f:
f.write(data.encode(encoding))
self.record_as_written(path)
def set_mode(self, bits, mask, files):
if os.name == 'posix':
# Set the executable bits (owner, group, and world) on
# all the files specified.
for f in files:
if self.dry_run:
logger.info("changing mode of %s", f)
else:
mode = (os.stat(f).st_mode | bits) & mask
logger.info("changing mode of %s to %o", f, mode)
os.chmod(f, mode)
set_executable_mode = lambda s, f: s.set_mode(0o555, 0o7777, f)
def ensure_dir(self, path):
path = os.path.abspath(path)
if path not in self.ensured and not os.path.exists(path):
self.ensured.add(path)
d, f = os.path.split(path)
self.ensure_dir(d)
logger.info('Creating %s' % path)
if not self.dry_run:
os.mkdir(path)
if self.record:
self.dirs_created.add(path)
def byte_compile(self, path, optimize=False, force=False, prefix=None):
dpath = cache_from_source(path, not optimize)
logger.info('Byte-compiling %s to %s', path, dpath)
if not self.dry_run:
if force or self.newer(path, dpath):
if not prefix:
diagpath = None
else:
assert path.startswith(prefix)
diagpath = path[len(prefix):]
py_compile.compile(path, dpath, diagpath, True) # raise error
self.record_as_written(dpath)
return dpath
def ensure_removed(self, path):
if os.path.exists(path):
if os.path.isdir(path) and not os.path.islink(path):
logger.debug('Removing directory tree at %s', path)
if not self.dry_run:
shutil.rmtree(path)
if self.record:
if path in self.dirs_created:
self.dirs_created.remove(path)
else:
if os.path.islink(path):
s = 'link'
else:
s = 'file'
logger.debug('Removing %s %s', s, path)
if not self.dry_run:
os.remove(path)
if self.record:
if path in self.files_written:
self.files_written.remove(path)
def is_writable(self, path):
result = False
while not result:
if os.path.exists(path):
result = os.access(path, os.W_OK)
break
parent = os.path.dirname(path)
if parent == path:
break
path = parent
return result
def commit(self):
"""
Commit recorded changes, turn off recording, return
changes.
"""
assert self.record
result = self.files_written, self.dirs_created
self._init_record()
return result
def rollback(self):
if not self.dry_run:
for f in list(self.files_written):
if os.path.exists(f):
os.remove(f)
# dirs should all be empty now, except perhaps for
# __pycache__ subdirs
# reverse so that subdirs appear before their parents
dirs = sorted(self.dirs_created, reverse=True)
for d in dirs:
flist = os.listdir(d)
if flist:
assert flist == ['__pycache__']
sd = os.path.join(d, flist[0])
os.rmdir(sd)
os.rmdir(d) # should fail if non-empty
self._init_record()
def resolve(module_name, dotted_path):
if module_name in sys.modules:
mod = sys.modules[module_name]
else:
mod = __import__(module_name)
if dotted_path is None:
result = mod
else:
parts = dotted_path.split('.')
result = getattr(mod, parts.pop(0))
for p in parts:
result = getattr(result, p)
return result
class ExportEntry(object):
def __init__(self, name, prefix, suffix, flags):
self.name = name
self.prefix = prefix
self.suffix = suffix
self.flags = flags
@cached_property
def value(self):
return resolve(self.prefix, self.suffix)
def __repr__(self):
return '<ExportEntry %s = %s:%s %s>' % (self.name, self.prefix,
self.suffix, self.flags)
def __eq__(self, other):
if not isinstance(other, ExportEntry):
result = False
else:
result = (self.name == other.name and
self.prefix == other.prefix and
self.suffix == other.suffix and
self.flags == other.flags)
return result
__hash__ = object.__hash__
ENTRY_RE = re.compile(r'''(?P<name>(\w|[-.])+)
\s*=\s*(?P<callable>(\w+)([:\.]\w+)*)
\s*(\[\s*(?P<flags>\w+(=\w+)?(,\s*\w+(=\w+)?)*)\s*\])?
''', re.VERBOSE)
def get_export_entry(specification):
m = ENTRY_RE.search(specification)
if not m:
result = None
if '[' in specification or ']' in specification:
raise DistlibException('Invalid specification '
'%r' % specification)
else:
d = m.groupdict()
name = d['name']
path = d['callable']
colons = path.count(':')
if colons == 0:
prefix, suffix = path, None
else:
if colons != 1:
raise DistlibException('Invalid specification '
'%r' % specification)
prefix, suffix = path.split(':')
flags = d['flags']
if flags is None:
if '[' in specification or ']' in specification:
raise DistlibException('Invalid specification '
'%r' % specification)
flags = []
else:
flags = [f.strip() for f in flags.split(',')]
result = ExportEntry(name, prefix, suffix, flags)
return result
def get_cache_base(suffix=None):
"""
Return the default base location for distlib caches. If the directory does
not exist, it is created. Use the suffix provided for the base directory,
and default to '.distlib' if it isn't provided.
On Windows, if LOCALAPPDATA is defined in the environment, then it is
assumed to be a directory, and will be the parent directory of the result.
On POSIX, and on Windows if LOCALAPPDATA is not defined, the user's home
directory - using os.expanduser('~') - will be the parent directory of
the result.
The result is just the directory '.distlib' in the parent directory as
determined above, or with the name specified with ``suffix``.
"""
if suffix is None:
suffix = '.distlib'
if os.name == 'nt' and 'LOCALAPPDATA' in os.environ:
result = os.path.expandvars('$localappdata')
else:
# Assume posix, or old Windows
result = os.path.expanduser('~')
# we use 'isdir' instead of 'exists', because we want to
# fail if there's a file with that name
if os.path.isdir(result):
usable = os.access(result, os.W_OK)
if not usable:
logger.warning('Directory exists but is not writable: %s', result)
else:
try:
os.makedirs(result)
usable = True
except OSError:
logger.warning('Unable to create %s', result, exc_info=True)
usable = False
if not usable:
result = tempfile.mkdtemp()
logger.warning('Default location unusable, using %s', result)
return os.path.join(result, suffix)
def path_to_cache_dir(path):
"""
Convert an absolute path to a directory name for use in a cache.
The algorithm used is:
#. On Windows, any ``':'`` in the drive is replaced with ``'---'``.
#. Any occurrence of ``os.sep`` is replaced with ``'--'``.
#. ``'.cache'`` is appended.
"""
d, p = os.path.splitdrive(os.path.abspath(path))
if d:
d = d.replace(':', '---')
p = p.replace(os.sep, '--')
return d + p + '.cache'
def ensure_slash(s):
if not s.endswith('/'):
return s + '/'
return s
def parse_credentials(netloc):
username = password = None
if '@' in netloc:
prefix, netloc = netloc.split('@', 1)
if ':' not in prefix:
username = prefix
else:
username, password = prefix.split(':', 1)
return username, password, netloc
def get_process_umask():
result = os.umask(0o22)
os.umask(result)
return result
def is_string_sequence(seq):
result = True
i = None
for i, s in enumerate(seq):
if not isinstance(s, string_types):
result = False
break
assert i is not None
return result
PROJECT_NAME_AND_VERSION = re.compile('([a-z0-9_]+([.-][a-z_][a-z0-9_]*)*)-'
'([a-z0-9_.+-]+)', re.I)
PYTHON_VERSION = re.compile(r'-py(\d\.?\d?)')
def split_filename(filename, project_name=None):
"""
Extract name, version, python version from a filename (no extension)
Return name, version, pyver or None
"""
result = None
pyver = None
m = PYTHON_VERSION.search(filename)
if m:
pyver = m.group(1)
filename = filename[:m.start()]
if project_name and len(filename) > len(project_name) + 1:
m = re.match(re.escape(project_name) + r'\b', filename)
if m:
n = m.end()
result = filename[:n], filename[n + 1:], pyver
if result is None:
m = PROJECT_NAME_AND_VERSION.match(filename)
if m:
result = m.group(1), m.group(3), pyver
return result
# Allow spaces in name because of legacy dists like "Twisted Core"
NAME_VERSION_RE = re.compile(r'(?P<name>[\w .-]+)\s*'
r'\(\s*(?P<ver>[^\s)]+)\)$')
def parse_name_and_version(p):
"""
A utility method used to get name and version from a string.
From e.g. a Provides-Dist value.
:param p: A value in a form 'foo (1.0)'
:return: The name and version as a tuple.
"""
m = NAME_VERSION_RE.match(p)
if not m:
raise DistlibException('Ill-formed name/version string: \'%s\'' % p)
d = m.groupdict()
return d['name'].strip().lower(), d['ver']
def get_extras(requested, available):
result = set()
requested = set(requested or [])
available = set(available or [])
if '*' in requested:
requested.remove('*')
result |= available
for r in requested:
if r == '-':
result.add(r)
elif r.startswith('-'):
unwanted = r[1:]
if unwanted not in available:
logger.warning('undeclared extra: %s' % unwanted)
if unwanted in result:
result.remove(unwanted)
else:
if r not in available:
logger.warning('undeclared extra: %s' % r)
result.add(r)
return result
#
# Extended metadata functionality
#
def _get_external_data(url):
result = {}
try:
# urlopen might fail if it runs into redirections,
# because of Python issue #13696. Fixed in locators
# using a custom redirect handler.
resp = urlopen(url)
headers = resp.info()
if headers.get('Content-Type') != 'application/json':
logger.debug('Unexpected response for JSON request')
else:
reader = codecs.getreader('utf-8')(resp)
#data = reader.read().decode('utf-8')
#result = json.loads(data)
result = json.load(reader)
except Exception as e:
logger.exception('Failed to get external data for %s: %s', url, e)
return result
def get_project_data(name):
url = ('https://www.red-dove.com/pypi/projects/'
'%s/%s/project.json' % (name[0].upper(), name))
result = _get_external_data(url)
return result
def get_package_data(name, version):
url = ('https://www.red-dove.com/pypi/projects/'
'%s/%s/package-%s.json' % (name[0].upper(), name, version))
return _get_external_data(url)
class Cache(object):
"""
A class implementing a cache for resources that need to live in the file system
e.g. shared libraries. This class was moved from resources to here because it
could be used by other modules, e.g. the wheel module.
"""
def __init__(self, base):
"""
Initialise an instance.
:param base: The base directory where the cache should be located.
"""
# we use 'isdir' instead of 'exists', because we want to
# fail if there's a file with that name
if not os.path.isdir(base):
os.makedirs(base)
if (os.stat(base).st_mode & 0o77) != 0:
logger.warning('Directory \'%s\' is not private', base)
self.base = os.path.abspath(os.path.normpath(base))
def prefix_to_dir(self, prefix):
"""
Converts a resource prefix to a directory name in the cache.
"""
return path_to_cache_dir(prefix)
def clear(self):
"""
Clear the cache.
"""
not_removed = []
for fn in os.listdir(self.base):
fn = os.path.join(self.base, fn)
try:
if os.path.islink(fn) or os.path.isfile(fn):
os.remove(fn)
elif os.path.isdir(fn):
shutil.rmtree(fn)
except Exception:
not_removed.append(fn)
return not_removed
class EventMixin(object):
"""
A very simple publish/subscribe system.
"""
def __init__(self):
self._subscribers = {}
def add(self, event, subscriber, append=True):
"""
Add a subscriber for an event.
:param event: The name of an event.
:param subscriber: The subscriber to be added (and called when the
event is published).
:param append: Whether to append or prepend the subscriber to an
existing subscriber list for the event.
"""
subs = self._subscribers
if event not in subs:
subs[event] = deque([subscriber])
else:
sq = subs[event]
if append:
sq.append(subscriber)
else:
sq.appendleft(subscriber)
def remove(self, event, subscriber):
"""
Remove a subscriber for an event.
:param event: The name of an event.
:param subscriber: The subscriber to be removed.
"""
subs = self._subscribers
if event not in subs:
raise ValueError('No subscribers: %r' % event)
subs[event].remove(subscriber)
def get_subscribers(self, event):
"""
Return an iterator for the subscribers for an event.
:param event: The event to return subscribers for.
"""
return iter(self._subscribers.get(event, ()))
def publish(self, event, *args, **kwargs):
"""
Publish a event and return a list of values returned by its
subscribers.
:param event: The event to publish.
:param args: The positional arguments to pass to the event's
subscribers.
:param kwargs: The keyword arguments to pass to the event's
subscribers.
"""
result = []
for subscriber in self.get_subscribers(event):
try:
value = subscriber(event, *args, **kwargs)
except Exception:
logger.exception('Exception during event publication')
value = None
result.append(value)
logger.debug('publish %s: args = %s, kwargs = %s, result = %s',
event, args, kwargs, result)
return result
#
# Simple sequencing
#
class Sequencer(object):
def __init__(self):
self._preds = {}
self._succs = {}
self._nodes = set() # nodes with no preds/succs
def add_node(self, node):
self._nodes.add(node)
def remove_node(self, node, edges=False):
if node in self._nodes:
self._nodes.remove(node)
if edges:
for p in set(self._preds.get(node, ())):
self.remove(p, node)
for s in set(self._succs.get(node, ())):
self.remove(node, s)
# Remove empties
for k, v in list(self._preds.items()):
if not v:
del self._preds[k]
for k, v in list(self._succs.items()):
if not v:
del self._succs[k]
def add(self, pred, succ):
assert pred != succ
self._preds.setdefault(succ, set()).add(pred)
self._succs.setdefault(pred, set()).add(succ)
def remove(self, pred, succ):
assert pred != succ
try:
preds = self._preds[succ]
succs = self._succs[pred]
except KeyError:
raise ValueError('%r not a successor of anything' % succ)
try:
preds.remove(pred)
succs.remove(succ)
except KeyError:
raise ValueError('%r not a successor of %r' % (succ, pred))
def is_step(self, step):
return (step in self._preds or step in self._succs or
step in self._nodes)
def get_steps(self, final):
if not self.is_step(final):
raise ValueError('Unknown: %r' % final)
result = []
todo = []
seen = set()
todo.append(final)
while todo:
step = todo.pop(0)
if step in seen:
# if a step was already seen,
# move it to the end (so it will appear earlier
# when reversed on return) ... but not for the
# final step, as that would be confusing for
# users
if step != final:
result.remove(step)
result.append(step)
else:
seen.add(step)
result.append(step)
preds = self._preds.get(step, ())
todo.extend(preds)
return reversed(result)
@property
def strong_connections(self):
#http://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm
index_counter = [0]
stack = []
lowlinks = {}
index = {}
result = []
graph = self._succs
def strongconnect(node):
# set the depth index for this node to the smallest unused index
index[node] = index_counter[0]
lowlinks[node] = index_counter[0]
index_counter[0] += 1
stack.append(node)
# Consider successors
try:
successors = graph[node]
except Exception:
successors = []
for successor in successors:
if successor not in lowlinks:
# Successor has not yet been visited
strongconnect(successor)
lowlinks[node] = min(lowlinks[node],lowlinks[successor])
elif successor in stack:
# the successor is in the stack and hence in the current
# strongly connected component (SCC)
lowlinks[node] = min(lowlinks[node],index[successor])
# If `node` is a root node, pop the stack and generate an SCC
if lowlinks[node] == index[node]:
connected_component = []
while True:
successor = stack.pop()
connected_component.append(successor)
if successor == node: break
component = tuple(connected_component)
# storing the result
result.append(component)
for node in graph:
if node not in lowlinks:
strongconnect(node)
return result
@property
def dot(self):
result = ['digraph G {']
for succ in self._preds:
preds = self._preds[succ]
for pred in preds:
result.append(' %s -> %s;' % (pred, succ))
for node in self._nodes:
result.append(' %s;' % node)
result.append('}')
return '\n'.join(result)
#
# Unarchiving functionality for zip, tar, tgz, tbz, whl
#
ARCHIVE_EXTENSIONS = ('.tar.gz', '.tar.bz2', '.tar', '.zip',
'.tgz', '.tbz', '.whl')
def unarchive(archive_filename, dest_dir, format=None, check=True):
def check_path(path):
if not isinstance(path, text_type):
path = path.decode('utf-8')
p = os.path.abspath(os.path.join(dest_dir, path))
if not p.startswith(dest_dir) or p[plen] != os.sep:
raise ValueError('path outside destination: %r' % p)
dest_dir = os.path.abspath(dest_dir)
plen = len(dest_dir)
archive = None
if format is None:
if archive_filename.endswith(('.zip', '.whl')):
format = 'zip'
elif archive_filename.endswith(('.tar.gz', '.tgz')):
format = 'tgz'
mode = 'r:gz'
elif archive_filename.endswith(('.tar.bz2', '.tbz')):
format = 'tbz'
mode = 'r:bz2'
elif archive_filename.endswith('.tar'):
format = 'tar'
mode = 'r'
else:
raise ValueError('Unknown format for %r' % archive_filename)
try:
if format == 'zip':
archive = ZipFile(archive_filename, 'r')
if check:
names = archive.namelist()
for name in names:
check_path(name)
else:
archive = tarfile.open(archive_filename, mode)
if check:
names = archive.getnames()
for name in names:
check_path(name)
if format != 'zip' and sys.version_info[0] < 3:
# See Python issue 17153. If the dest path contains Unicode,
# tarfile extraction fails on Python 2.x if a member path name
# contains non-ASCII characters - it leads to an implicit
# bytes -> unicode conversion using ASCII to decode.
for tarinfo in archive.getmembers():
if not isinstance(tarinfo.name, text_type):
tarinfo.name = tarinfo.name.decode('utf-8')
archive.extractall(dest_dir)
finally:
if archive:
archive.close()
def zip_dir(directory):
"""zip a directory tree into a BytesIO object"""
result = io.BytesIO()
dlen = len(directory)
with ZipFile(result, "w") as zf:
for root, dirs, files in os.walk(directory):
for name in files:
full = os.path.join(root, name)
rel = root[dlen:]
dest = os.path.join(rel, name)
zf.write(full, dest)
return result
#
# Simple progress bar
#
UNITS = ('', 'K', 'M', 'G','T','P')
class Progress(object):
unknown = 'UNKNOWN'
def __init__(self, minval=0, maxval=100):
assert maxval is None or maxval >= minval
self.min = self.cur = minval
self.max = maxval
self.started = None
self.elapsed = 0
self.done = False
def update(self, curval):
assert self.min <= curval
assert self.max is None or curval <= self.max
self.cur = curval
now = time.time()
if self.started is None:
self.started = now
else:
self.elapsed = now - self.started
def increment(self, incr):
assert incr >= 0
self.update(self.cur + incr)
def start(self):
self.update(self.min)
return self
def stop(self):
if self.max is not None:
self.update(self.max)
self.done = True
@property
def maximum(self):
return self.unknown if self.max is None else self.max
@property
def percentage(self):
if self.done:
result = '100 %'
elif self.max is None:
result = ' ?? %'
else:
v = 100.0 * (self.cur - self.min) / (self.max - self.min)
result = '%3d %%' % v
return result
def format_duration(self, duration):
if (duration <= 0) and self.max is None or self.cur == self.min:
result = '??:??:??'
#elif duration < 1:
# result = '--:--:--'
else:
result = time.strftime('%H:%M:%S', time.gmtime(duration))
return result
@property
def ETA(self):
if self.done:
prefix = 'Done'
t = self.elapsed
#import pdb; pdb.set_trace()
else:
prefix = 'ETA '
if self.max is None:
t = -1
elif self.elapsed == 0 or (self.cur == self.min):
t = 0
else:
#import pdb; pdb.set_trace()
t = float(self.max - self.min)
t /= self.cur - self.min
t = (t - 1) * self.elapsed
return '%s: %s' % (prefix, self.format_duration(t))
@property
def speed(self):
if self.elapsed == 0:
result = 0.0
else:
result = (self.cur - self.min) / self.elapsed
for unit in UNITS:
if result < 1000:
break
result /= 1000.0
return '%d %sB/s' % (result, unit)
#
# Glob functionality
#
RICH_GLOB = re.compile(r'\{([^}]*)\}')
_CHECK_RECURSIVE_GLOB = re.compile(r'[^/\\,{]\*\*|\*\*[^/\\,}]')
_CHECK_MISMATCH_SET = re.compile(r'^[^{]*\}|\{[^}]*$')
def iglob(path_glob):
"""Extended globbing function that supports ** and {opt1,opt2,opt3}."""
if _CHECK_RECURSIVE_GLOB.search(path_glob):
msg = """invalid glob %r: recursive glob "**" must be used alone"""
raise ValueError(msg % path_glob)
if _CHECK_MISMATCH_SET.search(path_glob):
msg = """invalid glob %r: mismatching set marker '{' or '}'"""
raise ValueError(msg % path_glob)
return _iglob(path_glob)
def _iglob(path_glob):
rich_path_glob = RICH_GLOB.split(path_glob, 1)
if len(rich_path_glob) > 1:
assert len(rich_path_glob) == 3, rich_path_glob
prefix, set, suffix = rich_path_glob
for item in set.split(','):
for path in _iglob(''.join((prefix, item, suffix))):
yield path
else:
if '**' not in path_glob:
for item in std_iglob(path_glob):
yield item
else:
prefix, radical = path_glob.split('**', 1)
if prefix == '':
prefix = '.'
if radical == '':
radical = '*'
else:
# we support both
radical = radical.lstrip('/')
radical = radical.lstrip('\\')
for path, dir, files in os.walk(prefix):
path = os.path.normpath(path)
for fn in _iglob(os.path.join(path, radical)):
yield fn
#
# HTTPSConnection which verifies certificates/matches domains
#
class HTTPSConnection(httplib.HTTPSConnection):
ca_certs = None # set this to the path to the certs file (.pem)
check_domain = True # only used if ca_certs is not None
# noinspection PyPropertyAccess
def connect(self):
sock = socket.create_connection((self.host, self.port), self.timeout)
if getattr(self, '_tunnel_host', False):
self.sock = sock
self._tunnel()
if not hasattr(ssl, 'SSLContext'):
# For 2.x
if self.ca_certs:
cert_reqs = ssl.CERT_REQUIRED
else:
cert_reqs = ssl.CERT_NONE
self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file,
cert_reqs=cert_reqs,
ssl_version=ssl.PROTOCOL_SSLv23,
ca_certs=self.ca_certs)
else:
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.options |= ssl.OP_NO_SSLv2
if self.cert_file:
context.load_cert_chain(self.cert_file, self.key_file)
kwargs = {}
if self.ca_certs:
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(cafile=self.ca_certs)
if getattr(ssl, 'HAS_SNI', False):
kwargs['server_hostname'] = self.host
self.sock = context.wrap_socket(sock, **kwargs)
if self.ca_certs and self.check_domain:
try:
match_hostname(self.sock.getpeercert(), self.host)
logger.debug('Host verified: %s', self.host)
except CertificateError:
self.sock.shutdown(socket.SHUT_RDWR)
self.sock.close()
raise
class HTTPSHandler(BaseHTTPSHandler):
def __init__(self, ca_certs, check_domain=True):
BaseHTTPSHandler.__init__(self)
self.ca_certs = ca_certs
self.check_domain = check_domain
def _conn_maker(self, *args, **kwargs):
"""
This is called to create a connection instance. Normally you'd
pass a connection class to do_open, but it doesn't actually check for
a class, and just expects a callable. As long as we behave just as a
constructor would have, we should be OK. If it ever changes so that
we *must* pass a class, we'll create an UnsafeHTTPSConnection class
which just sets check_domain to False in the class definition, and
choose which one to pass to do_open.
"""
result = HTTPSConnection(*args, **kwargs)
if self.ca_certs:
result.ca_certs = self.ca_certs
result.check_domain = self.check_domain
return result
def https_open(self, req):
try:
return self.do_open(self._conn_maker, req)
except URLError as e:
if 'certificate verify failed' in str(e.reason):
raise CertificateError('Unable to verify server certificate '
'for %s' % req.host)
else:
raise
#
# To prevent against mixing HTTP traffic with HTTPS (examples: A Man-In-The-
# Middle proxy using HTTP listens on port 443, or an index mistakenly serves
# HTML containing a http://xyz link when it should be https://xyz),
# you can use the following handler class, which does not allow HTTP traffic.
#
# It works by inheriting from HTTPHandler - so build_opener won't add a
# handler for HTTP itself.
#
class HTTPSOnlyHandler(HTTPSHandler, HTTPHandler):
def http_open(self, req):
raise URLError('Unexpected HTTP request on what should be a secure '
'connection: %s' % req)
#
# XML-RPC with timeouts
#
_ver_info = sys.version_info[:2]
if _ver_info == (2, 6):
class HTTP(httplib.HTTP):
def __init__(self, host='', port=None, **kwargs):
if port == 0: # 0 means use port 0, not the default port
port = None
self._setup(self._connection_class(host, port, **kwargs))
class HTTPS(httplib.HTTPS):
def __init__(self, host='', port=None, **kwargs):
if port == 0: # 0 means use port 0, not the default port
port = None
self._setup(self._connection_class(host, port, **kwargs))
class Transport(xmlrpclib.Transport):
def __init__(self, timeout, use_datetime=0):
self.timeout = timeout
xmlrpclib.Transport.__init__(self, use_datetime)
def make_connection(self, host):
h, eh, x509 = self.get_host_info(host)
if _ver_info == (2, 6):
result = HTTP(h, timeout=self.timeout)
else:
if not self._connection or host != self._connection[0]:
self._extra_headers = eh
self._connection = host, httplib.HTTPConnection(h)
result = self._connection[1]
return result
class SafeTransport(xmlrpclib.SafeTransport):
def __init__(self, timeout, use_datetime=0):
self.timeout = timeout
xmlrpclib.SafeTransport.__init__(self, use_datetime)
def make_connection(self, host):
h, eh, kwargs = self.get_host_info(host)
if not kwargs:
kwargs = {}
kwargs['timeout'] = self.timeout
if _ver_info == (2, 6):
result = HTTPS(host, None, **kwargs)
else:
if not self._connection or host != self._connection[0]:
self._extra_headers = eh
self._connection = host, httplib.HTTPSConnection(h, None,
**kwargs)
result = self._connection[1]
return result
class ServerProxy(xmlrpclib.ServerProxy):
def __init__(self, uri, **kwargs):
self.timeout = timeout = kwargs.pop('timeout', None)
# The above classes only come into play if a timeout
# is specified
if timeout is not None:
scheme, _ = splittype(uri)
use_datetime = kwargs.get('use_datetime', 0)
if scheme == 'https':
tcls = SafeTransport
else:
tcls = Transport
kwargs['transport'] = t = tcls(timeout, use_datetime=use_datetime)
self.transport = t
xmlrpclib.ServerProxy.__init__(self, uri, **kwargs)
#
# CSV functionality. This is provided because on 2.x, the csv module can't
# handle Unicode. However, we need to deal with Unicode in e.g. RECORD files.
#
def _csv_open(fn, mode, **kwargs):
if sys.version_info[0] < 3:
mode += 'b'
else:
kwargs['newline'] = ''
return open(fn, mode, **kwargs)
class CSVBase(object):
defaults = {
'delimiter': str(','), # The strs are used because we need native
'quotechar': str('"'), # str in the csv API (2.x won't take
'lineterminator': str('\n') # Unicode)
}
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.stream.close()
class CSVReader(CSVBase):
def __init__(self, **kwargs):
if 'stream' in kwargs:
stream = kwargs['stream']
if sys.version_info[0] >= 3:
# needs to be a text stream
stream = codecs.getreader('utf-8')(stream)
self.stream = stream
else:
self.stream = _csv_open(kwargs['path'], 'r')
self.reader = csv.reader(self.stream, **self.defaults)
def __iter__(self):
return self
def next(self):
result = next(self.reader)
if sys.version_info[0] < 3:
for i, item in enumerate(result):
if not isinstance(item, text_type):
result[i] = item.decode('utf-8')
return result
__next__ = next
class CSVWriter(CSVBase):
def __init__(self, fn, **kwargs):
self.stream = _csv_open(fn, 'w')
self.writer = csv.writer(self.stream, **self.defaults)
def writerow(self, row):
if sys.version_info[0] < 3:
r = []
for item in row:
if isinstance(item, text_type):
item = item.encode('utf-8')
r.append(item)
row = r
self.writer.writerow(row)
#
# Configurator functionality
#
class Configurator(BaseConfigurator):
value_converters = dict(BaseConfigurator.value_converters)
value_converters['inc'] = 'inc_convert'
def __init__(self, config, base=None):
super(Configurator, self).__init__(config)
self.base = base or os.getcwd()
def configure_custom(self, config):
def convert(o):
if isinstance(o, (list, tuple)):
result = type(o)([convert(i) for i in o])
elif isinstance(o, dict):
if '()' in o:
result = self.configure_custom(o)
else:
result = {}
for k in o:
result[k] = convert(o[k])
else:
result = self.convert(o)
return result
c = config.pop('()')
if not callable(c):
c = self.resolve(c)
props = config.pop('.', None)
# Check for valid identifiers
args = config.pop('[]', ())
if args:
args = tuple([convert(o) for o in args])
items = [(k, convert(config[k])) for k in config if valid_ident(k)]
kwargs = dict(items)
result = c(*args, **kwargs)
if props:
for n, v in props.items():
setattr(result, n, convert(v))
return result
def __getitem__(self, key):
result = self.config[key]
if isinstance(result, dict) and '()' in result:
self.config[key] = result = self.configure_custom(result)
return result
def inc_convert(self, value):
"""Default converter for the inc:// protocol."""
if not os.path.isabs(value):
value = os.path.join(self.base, value)
with codecs.open(value, 'r', encoding='utf-8') as f:
result = json.load(f)
return result
#
# Mixin for running subprocesses and capturing their output
#
class SubprocessMixin(object):
def __init__(self, verbose=False, progress=None):
self.verbose = verbose
self.progress = progress
def reader(self, stream, context):
"""
Read lines from a subprocess' output stream and either pass to a progress
callable (if specified) or write progress information to sys.stderr.
"""
progress = self.progress
verbose = self.verbose
while True:
s = stream.readline()
if not s:
break
if progress is not None:
progress(s, context)
else:
if not verbose:
sys.stderr.write('.')
else:
sys.stderr.write(s.decode('utf-8'))
sys.stderr.flush()
stream.close()
def run_command(self, cmd, **kwargs):
p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, **kwargs)
t1 = threading.Thread(target=self.reader, args=(p.stdout, 'stdout'))
t1.start()
t2 = threading.Thread(target=self.reader, args=(p.stderr, 'stderr'))
t2.start()
p.wait()
t1.join()
t2.join()
if self.progress is not None:
self.progress('done.', 'main')
elif self.verbose:
sys.stderr.write('done.\n')
return p
| apache-2.0 |
askeing/servo | tests/wpt/web-platform-tests/tools/pywebsocket/src/test/test_handshake.py | 452 | 7134 | #!/usr/bin/env python
#
# Copyright 2012, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests for handshake._base module."""
import unittest
import set_sys_path # Update sys.path to locate mod_pywebsocket module.
from mod_pywebsocket.common import ExtensionParameter
from mod_pywebsocket.common import ExtensionParsingException
from mod_pywebsocket.common import format_extensions
from mod_pywebsocket.common import parse_extensions
from mod_pywebsocket.handshake._base import HandshakeException
from mod_pywebsocket.handshake._base import validate_subprotocol
class ValidateSubprotocolTest(unittest.TestCase):
"""A unittest for validate_subprotocol method."""
def test_validate_subprotocol(self):
# Should succeed.
validate_subprotocol('sample')
validate_subprotocol('Sample')
validate_subprotocol('sample\x7eprotocol')
# Should fail.
self.assertRaises(HandshakeException,
validate_subprotocol,
'')
self.assertRaises(HandshakeException,
validate_subprotocol,
'sample\x09protocol')
self.assertRaises(HandshakeException,
validate_subprotocol,
'sample\x19protocol')
self.assertRaises(HandshakeException,
validate_subprotocol,
'sample\x20protocol')
self.assertRaises(HandshakeException,
validate_subprotocol,
'sample\x7fprotocol')
self.assertRaises(HandshakeException,
validate_subprotocol,
# "Japan" in Japanese
u'\u65e5\u672c')
_TEST_TOKEN_EXTENSION_DATA = [
('foo', [('foo', [])]),
('foo; bar', [('foo', [('bar', None)])]),
('foo; bar=baz', [('foo', [('bar', 'baz')])]),
('foo; bar=baz; car=cdr', [('foo', [('bar', 'baz'), ('car', 'cdr')])]),
('foo; bar=baz, car; cdr',
[('foo', [('bar', 'baz')]), ('car', [('cdr', None)])]),
('a, b, c, d',
[('a', []), ('b', []), ('c', []), ('d', [])]),
]
_TEST_QUOTED_EXTENSION_DATA = [
('foo; bar=""', [('foo', [('bar', '')])]),
('foo; bar=" baz "', [('foo', [('bar', ' baz ')])]),
('foo; bar=",baz;"', [('foo', [('bar', ',baz;')])]),
('foo; bar="\\\r\\\nbaz"', [('foo', [('bar', '\r\nbaz')])]),
('foo; bar="\\"baz"', [('foo', [('bar', '"baz')])]),
('foo; bar="\xbbbaz"', [('foo', [('bar', '\xbbbaz')])]),
]
_TEST_REDUNDANT_TOKEN_EXTENSION_DATA = [
('foo \t ', [('foo', [])]),
('foo; \r\n bar', [('foo', [('bar', None)])]),
('foo; bar=\r\n \r\n baz', [('foo', [('bar', 'baz')])]),
('foo ;bar = baz ', [('foo', [('bar', 'baz')])]),
('foo,bar,,baz', [('foo', []), ('bar', []), ('baz', [])]),
]
_TEST_REDUNDANT_QUOTED_EXTENSION_DATA = [
('foo; bar="\r\n \r\n baz"', [('foo', [('bar', ' baz')])]),
]
class ExtensionsParserTest(unittest.TestCase):
def _verify_extension_list(self, expected_list, actual_list):
"""Verifies that ExtensionParameter objects in actual_list have the
same members as extension definitions in expected_list. Extension
definition used in this test is a pair of an extension name and a
parameter dictionary.
"""
self.assertEqual(len(expected_list), len(actual_list))
for expected, actual in zip(expected_list, actual_list):
(name, parameters) = expected
self.assertEqual(name, actual._name)
self.assertEqual(parameters, actual._parameters)
def test_parse(self):
for formatted_string, definition in _TEST_TOKEN_EXTENSION_DATA:
self._verify_extension_list(
definition, parse_extensions(formatted_string))
def test_parse_quoted_data(self):
for formatted_string, definition in _TEST_QUOTED_EXTENSION_DATA:
self._verify_extension_list(
definition, parse_extensions(formatted_string))
def test_parse_redundant_data(self):
for (formatted_string,
definition) in _TEST_REDUNDANT_TOKEN_EXTENSION_DATA:
self._verify_extension_list(
definition, parse_extensions(formatted_string))
def test_parse_redundant_quoted_data(self):
for (formatted_string,
definition) in _TEST_REDUNDANT_QUOTED_EXTENSION_DATA:
self._verify_extension_list(
definition, parse_extensions(formatted_string))
def test_parse_bad_data(self):
_TEST_BAD_EXTENSION_DATA = [
('foo; ; '),
('foo; a a'),
('foo foo'),
(',,,'),
('foo; bar='),
('foo; bar="hoge'),
('foo; bar="a\r"'),
('foo; bar="\\\xff"'),
('foo; bar=\ra'),
]
for formatted_string in _TEST_BAD_EXTENSION_DATA:
self.assertRaises(
ExtensionParsingException, parse_extensions, formatted_string)
class FormatExtensionsTest(unittest.TestCase):
def test_format_extensions(self):
for formatted_string, definitions in _TEST_TOKEN_EXTENSION_DATA:
extensions = []
for definition in definitions:
(name, parameters) = definition
extension = ExtensionParameter(name)
extension._parameters = parameters
extensions.append(extension)
self.assertEqual(
formatted_string, format_extensions(extensions))
if __name__ == '__main__':
unittest.main()
# vi:sts=4 sw=4 et
| mpl-2.0 |
fredericlepied/ansible | lib/ansible/modules/storage/netapp/netapp_e_hostgroup.py | 10 | 13965 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: netapp_e_hostgroup
version_added: "2.2"
short_description: Manage NetApp Storage Array Host Groups
author: Kevin Hulquest (@hulquest)
description:
- Create, update or destroy host groups on a NetApp E-Series storage array.
options:
api_username:
required: true
description:
- The username to authenticate with the SANtricity WebServices Proxy or embedded REST API.
api_password:
required: true
description:
- The password to authenticate with the SANtricity WebServices Proxy or embedded REST API.
api_url:
required: true
description:
- The url to the SANtricity WebServices Proxy or embedded REST API.
validate_certs:
required: false
default: true
description:
- Should https certificates be validated?
ssid:
required: true
description:
- The ID of the array to manage (as configured on the web services proxy).
state:
required: true
description:
- Whether the specified host group should exist or not.
choices: ['present', 'absent']
name:
required: false
description:
- The name of the host group to manage. Either this or C(id_num) must be supplied.
new_name:
required: false
description:
- specify this when you need to update the name of a host group
id:
required: false
description:
- The id number of the host group to manage. Either this or C(name) must be supplied.
hosts::
required: false
description:
- a list of host names/labels to add to the group
'''
EXAMPLES = '''
- name: Configure Hostgroup
netapp_e_hostgroup:
ssid: "{{ ssid }}"
api_url: "{{ netapp_api_url }}"
api_username: "{{ netapp_api_username }}"
api_password: "{{ netapp_api_password }}"
validate_certs: "{{ netapp_api_validate_certs }}"
state: present
'''
RETURN = '''
clusterRef:
description: The unique identification value for this object. Other objects may use this reference value to refer to the cluster.
returned: always except when state is absent
type: string
sample: "3233343536373839303132333100000000000000"
confirmLUNMappingCreation:
description: If true, indicates that creation of LUN-to-volume mappings should require careful confirmation from the end-user, since such a mapping
will alter the volume access rights of other clusters, in addition to this one.
returned: always
type: boolean
sample: false
hosts:
description: A list of the hosts that are part of the host group after all operations.
returned: always except when state is absent
type: list
sample: ["HostA","HostB"]
id:
description: The id number of the hostgroup
returned: always except when state is absent
type: string
sample: "3233343536373839303132333100000000000000"
isSAControlled:
description: If true, indicates that I/O accesses from this cluster are subject to the storage array's default LUN-to-volume mappings. If false,
indicates that I/O accesses from the cluster are subject to cluster-specific LUN-to-volume mappings.
returned: always except when state is absent
type: boolean
sample: false
label:
description: The user-assigned, descriptive label string for the cluster.
returned: always
type: string
sample: "MyHostGroup"
name:
description: same as label
returned: always except when state is absent
type: string
sample: "MyHostGroup"
protectionInformationCapableAccessMethod:
description: This field is true if the host has a PI capable access method.
returned: always except when state is absent
type: boolean
sample: true
'''
HEADERS = {
"Content-Type": "application/json",
"Accept": "application/json"
}
import json
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six.moves.urllib.error import HTTPError
from ansible.module_utils._text import to_native
from ansible.module_utils.urls import open_url
def request(url, data=None, headers=None, method='GET', use_proxy=True,
force=False, last_mod_time=None, timeout=10, validate_certs=True,
url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
try:
r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
url_username=url_username, url_password=url_password, http_agent=http_agent,
force_basic_auth=force_basic_auth)
except HTTPError as e:
r = e.fp
try:
raw_data = r.read()
if raw_data:
data = json.loads(raw_data)
else:
raw_data = None
except:
if ignore_errors:
pass
else:
raise Exception(raw_data)
resp_code = r.getcode()
if resp_code >= 400 and not ignore_errors:
raise Exception(resp_code, data)
else:
return resp_code, data
def group_exists(module, id_type, ident, ssid, api_url, user, pwd):
rc, data = get_hostgroups(module, ssid, api_url, user, pwd)
for group in data:
if group[id_type] == ident:
return True, data
else:
continue
return False, data
def get_hostgroups(module, ssid, api_url, user, pwd):
groups = "storage-systems/%s/host-groups" % ssid
url = api_url + groups
try:
rc, data = request(url, headers=HEADERS, url_username=user, url_password=pwd)
return rc, data
except HTTPError as e:
module.fail_json(msg="Failed to get host groups. Id [%s]. Error [%s]." % (ssid, to_native(e)))
def get_hostref(module, ssid, name, api_url, user, pwd):
all_hosts = 'storage-systems/%s/hosts' % ssid
url = api_url + all_hosts
try:
rc, data = request(url, method='GET', headers=HEADERS, url_username=user, url_password=pwd)
except Exception as e:
module.fail_json(msg="Failed to get hosts. Id [%s]. Error [%s]." % (ssid, to_native(e)))
for host in data:
if host['name'] == name:
return host['hostRef']
else:
continue
module.fail_json(msg="No host with the name %s could be found" % name)
def create_hostgroup(module, ssid, name, api_url, user, pwd, hosts=None):
groups = "storage-systems/%s/host-groups" % ssid
url = api_url + groups
hostrefs = []
if hosts:
for host in hosts:
href = get_hostref(module, ssid, host, api_url, user, pwd)
hostrefs.append(href)
post_data = json.dumps(dict(name=name, hosts=hostrefs))
try:
rc, data = request(url, method='POST', data=post_data, headers=HEADERS, url_username=user, url_password=pwd)
except Exception as e:
module.fail_json(msg="Failed to create host group. Id [%s]. Error [%s]." % (ssid, to_native(e)))
return rc, data
def update_hostgroup(module, ssid, name, api_url, user, pwd, hosts=None, new_name=None):
gid = get_hostgroup_id(module, ssid, name, api_url, user, pwd)
groups = "storage-systems/%s/host-groups/%s" % (ssid, gid)
url = api_url + groups
hostrefs = []
if hosts:
for host in hosts:
href = get_hostref(module, ssid, host, api_url, user, pwd)
hostrefs.append(href)
if new_name:
post_data = json.dumps(dict(name=new_name, hosts=hostrefs))
else:
post_data = json.dumps(dict(hosts=hostrefs))
try:
rc, data = request(url, method='POST', data=post_data, headers=HEADERS, url_username=user, url_password=pwd)
except Exception as e:
module.fail_json(msg="Failed to update host group. Group [%s]. Id [%s]. Error [%s]." % (gid, ssid,
to_native(e)))
return rc, data
def delete_hostgroup(module, ssid, group_id, api_url, user, pwd):
groups = "storage-systems/%s/host-groups/%s" % (ssid, group_id)
url = api_url + groups
# TODO: Loop through hosts, do mapping to href, make new list to pass to data
try:
rc, data = request(url, method='DELETE', headers=HEADERS, url_username=user, url_password=pwd)
except Exception as e:
module.fail_json(msg="Failed to delete host group. Group [%s]. Id [%s]. Error [%s]." % (group_id, ssid, to_native(e)))
return rc, data
def get_hostgroup_id(module, ssid, name, api_url, user, pwd):
all_groups = 'storage-systems/%s/host-groups' % ssid
url = api_url + all_groups
rc, data = request(url, method='GET', headers=HEADERS, url_username=user, url_password=pwd)
for hg in data:
if hg['name'] == name:
return hg['id']
else:
continue
module.fail_json(msg="A hostgroup with the name %s could not be found" % name)
def get_hosts_in_group(module, ssid, group_name, api_url, user, pwd):
all_groups = 'storage-systems/%s/host-groups' % ssid
g_url = api_url + all_groups
try:
g_rc, g_data = request(g_url, method='GET', headers=HEADERS, url_username=user, url_password=pwd)
except Exception as e:
module.fail_json(
msg="Failed in first step getting hosts from group. Group: [%s]. Id [%s]. Error [%s]." % (group_name,
ssid,
to_native(e)))
all_hosts = 'storage-systems/%s/hosts' % ssid
h_url = api_url + all_hosts
try:
h_rc, h_data = request(h_url, method='GET', headers=HEADERS, url_username=user, url_password=pwd)
except Exception as e:
module.fail_json(
msg="Failed in second step getting hosts from group. Group: [%s]. Id [%s]. Error [%s]." % (
group_name,
ssid,
to_native(e)))
hosts_in_group = []
for hg in g_data:
if hg['name'] == group_name:
clusterRef = hg['clusterRef']
for host in h_data:
if host['clusterRef'] == clusterRef:
hosts_in_group.append(host['name'])
return hosts_in_group
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=False),
new_name=dict(required=False),
ssid=dict(required=True),
id=dict(required=False),
state=dict(required=True, choices=['present', 'absent']),
hosts=dict(required=False, type='list'),
api_url=dict(required=True),
api_username=dict(required=True),
validate_certs=dict(required=False, default=True),
api_password=dict(required=True, no_log=True)
),
supports_check_mode=False,
mutually_exclusive=[['name', 'id']],
required_one_of=[['name', 'id']]
)
name = module.params['name']
new_name = module.params['new_name']
ssid = module.params['ssid']
id_num = module.params['id']
state = module.params['state']
hosts = module.params['hosts']
user = module.params['api_username']
pwd = module.params['api_password']
api_url = module.params['api_url']
if not api_url.endswith('/'):
api_url += '/'
if name:
id_type = 'name'
id_key = name
elif id_num:
id_type = 'id'
id_key = id_num
exists, group_data = group_exists(module, id_type, id_key, ssid, api_url, user, pwd)
if state == 'present':
if not exists:
try:
rc, data = create_hostgroup(module, ssid, name, api_url, user, pwd, hosts)
except Exception as e:
module.fail_json(msg="Failed to create a host group. Id [%s]. Error [%s]." % (ssid, to_native(e)))
hosts = get_hosts_in_group(module, ssid, name, api_url, user, pwd)
module.exit_json(changed=True, hosts=hosts, **data)
else:
current_hosts = get_hosts_in_group(module, ssid, name, api_url, user, pwd)
if not current_hosts:
current_hosts = []
if not hosts:
hosts = []
if set(current_hosts) != set(hosts):
try:
rc, data = update_hostgroup(module, ssid, name, api_url, user, pwd, hosts, new_name)
except Exception as e:
module.fail_json(
msg="Failed to update host group. Group: [%s]. Id [%s]. Error [%s]." % (name, ssid, to_native(e)))
module.exit_json(changed=True, hosts=hosts, **data)
else:
for group in group_data:
if group['name'] == name:
module.exit_json(changed=False, hosts=current_hosts, **group)
elif state == 'absent':
if exists:
hg_id = get_hostgroup_id(module, ssid, name, api_url, user, pwd)
try:
rc, data = delete_hostgroup(module, ssid, hg_id, api_url, user, pwd)
except Exception as e:
module.fail_json(
msg="Failed to delete host group. Group: [%s]. Id [%s]. Error [%s]." % (name, ssid, to_native(e)))
module.exit_json(changed=True, msg="Host Group deleted")
else:
module.exit_json(changed=False, msg="Host Group is already absent")
if __name__ == '__main__':
main()
| gpl-3.0 |
gemmaan/moviesenal | Hasil/Lib/site-packages/pip/_vendor/cachecontrol/controller.py | 327 | 13024 | """
The httplib2 algorithms ported for use with requests.
"""
import logging
import re
import calendar
import time
from email.utils import parsedate_tz
from pip._vendor.requests.structures import CaseInsensitiveDict
from .cache import DictCache
from .serialize import Serializer
logger = logging.getLogger(__name__)
URI = re.compile(r"^(([^:/?#]+):)?(//([^/?#]*))?([^?#]*)(\?([^#]*))?(#(.*))?")
def parse_uri(uri):
"""Parses a URI using the regex given in Appendix B of RFC 3986.
(scheme, authority, path, query, fragment) = parse_uri(uri)
"""
groups = URI.match(uri).groups()
return (groups[1], groups[3], groups[4], groups[6], groups[8])
class CacheController(object):
"""An interface to see if request should cached or not.
"""
def __init__(self, cache=None, cache_etags=True, serializer=None):
self.cache = cache or DictCache()
self.cache_etags = cache_etags
self.serializer = serializer or Serializer()
@classmethod
def _urlnorm(cls, uri):
"""Normalize the URL to create a safe key for the cache"""
(scheme, authority, path, query, fragment) = parse_uri(uri)
if not scheme or not authority:
raise Exception("Only absolute URIs are allowed. uri = %s" % uri)
scheme = scheme.lower()
authority = authority.lower()
if not path:
path = "/"
# Could do syntax based normalization of the URI before
# computing the digest. See Section 6.2.2 of Std 66.
request_uri = query and "?".join([path, query]) or path
defrag_uri = scheme + "://" + authority + request_uri
return defrag_uri
@classmethod
def cache_url(cls, uri):
return cls._urlnorm(uri)
def parse_cache_control(self, headers):
"""
Parse the cache control headers returning a dictionary with values
for the different directives.
"""
retval = {}
cc_header = 'cache-control'
if 'Cache-Control' in headers:
cc_header = 'Cache-Control'
if cc_header in headers:
parts = headers[cc_header].split(',')
parts_with_args = [
tuple([x.strip().lower() for x in part.split("=", 1)])
for part in parts if -1 != part.find("=")
]
parts_wo_args = [
(name.strip().lower(), 1)
for name in parts if -1 == name.find("=")
]
retval = dict(parts_with_args + parts_wo_args)
return retval
def cached_request(self, request):
"""
Return a cached response if it exists in the cache, otherwise
return False.
"""
cache_url = self.cache_url(request.url)
logger.debug('Looking up "%s" in the cache', cache_url)
cc = self.parse_cache_control(request.headers)
# Bail out if the request insists on fresh data
if 'no-cache' in cc:
logger.debug('Request header has "no-cache", cache bypassed')
return False
if 'max-age' in cc and cc['max-age'] == 0:
logger.debug('Request header has "max_age" as 0, cache bypassed')
return False
# Request allows serving from the cache, let's see if we find something
cache_data = self.cache.get(cache_url)
if cache_data is None:
logger.debug('No cache entry available')
return False
# Check whether it can be deserialized
resp = self.serializer.loads(request, cache_data)
if not resp:
logger.warning('Cache entry deserialization failed, entry ignored')
return False
# If we have a cached 301, return it immediately. We don't
# need to test our response for other headers b/c it is
# intrinsically "cacheable" as it is Permanent.
# See:
# https://tools.ietf.org/html/rfc7231#section-6.4.2
#
# Client can try to refresh the value by repeating the request
# with cache busting headers as usual (ie no-cache).
if resp.status == 301:
msg = ('Returning cached "301 Moved Permanently" response '
'(ignoring date and etag information)')
logger.debug(msg)
return resp
headers = CaseInsensitiveDict(resp.headers)
if not headers or 'date' not in headers:
if 'etag' not in headers:
# Without date or etag, the cached response can never be used
# and should be deleted.
logger.debug('Purging cached response: no date or etag')
self.cache.delete(cache_url)
logger.debug('Ignoring cached response: no date')
return False
now = time.time()
date = calendar.timegm(
parsedate_tz(headers['date'])
)
current_age = max(0, now - date)
logger.debug('Current age based on date: %i', current_age)
# TODO: There is an assumption that the result will be a
# urllib3 response object. This may not be best since we
# could probably avoid instantiating or constructing the
# response until we know we need it.
resp_cc = self.parse_cache_control(headers)
# determine freshness
freshness_lifetime = 0
# Check the max-age pragma in the cache control header
if 'max-age' in resp_cc and resp_cc['max-age'].isdigit():
freshness_lifetime = int(resp_cc['max-age'])
logger.debug('Freshness lifetime from max-age: %i',
freshness_lifetime)
# If there isn't a max-age, check for an expires header
elif 'expires' in headers:
expires = parsedate_tz(headers['expires'])
if expires is not None:
expire_time = calendar.timegm(expires) - date
freshness_lifetime = max(0, expire_time)
logger.debug("Freshness lifetime from expires: %i",
freshness_lifetime)
# Determine if we are setting freshness limit in the
# request. Note, this overrides what was in the response.
if 'max-age' in cc:
try:
freshness_lifetime = int(cc['max-age'])
logger.debug('Freshness lifetime from request max-age: %i',
freshness_lifetime)
except ValueError:
freshness_lifetime = 0
if 'min-fresh' in cc:
try:
min_fresh = int(cc['min-fresh'])
except ValueError:
min_fresh = 0
# adjust our current age by our min fresh
current_age += min_fresh
logger.debug('Adjusted current age from min-fresh: %i',
current_age)
# Return entry if it is fresh enough
if freshness_lifetime > current_age:
logger.debug('The response is "fresh", returning cached response')
logger.debug('%i > %i', freshness_lifetime, current_age)
return resp
# we're not fresh. If we don't have an Etag, clear it out
if 'etag' not in headers:
logger.debug(
'The cached response is "stale" with no etag, purging'
)
self.cache.delete(cache_url)
# return the original handler
return False
def conditional_headers(self, request):
cache_url = self.cache_url(request.url)
resp = self.serializer.loads(request, self.cache.get(cache_url))
new_headers = {}
if resp:
headers = CaseInsensitiveDict(resp.headers)
if 'etag' in headers:
new_headers['If-None-Match'] = headers['ETag']
if 'last-modified' in headers:
new_headers['If-Modified-Since'] = headers['Last-Modified']
return new_headers
def cache_response(self, request, response, body=None):
"""
Algorithm for caching requests.
This assumes a requests Response object.
"""
# From httplib2: Don't cache 206's since we aren't going to
# handle byte range requests
cacheable_status_codes = [200, 203, 300, 301]
if response.status not in cacheable_status_codes:
logger.debug(
'Status code %s not in %s',
response.status,
cacheable_status_codes
)
return
response_headers = CaseInsensitiveDict(response.headers)
# If we've been given a body, our response has a Content-Length, that
# Content-Length is valid then we can check to see if the body we've
# been given matches the expected size, and if it doesn't we'll just
# skip trying to cache it.
if (body is not None and
"content-length" in response_headers and
response_headers["content-length"].isdigit() and
int(response_headers["content-length"]) != len(body)):
return
cc_req = self.parse_cache_control(request.headers)
cc = self.parse_cache_control(response_headers)
cache_url = self.cache_url(request.url)
logger.debug('Updating cache with response from "%s"', cache_url)
# Delete it from the cache if we happen to have it stored there
no_store = False
if cc.get('no-store'):
no_store = True
logger.debug('Response header has "no-store"')
if cc_req.get('no-store'):
no_store = True
logger.debug('Request header has "no-store"')
if no_store and self.cache.get(cache_url):
logger.debug('Purging existing cache entry to honor "no-store"')
self.cache.delete(cache_url)
# If we've been given an etag, then keep the response
if self.cache_etags and 'etag' in response_headers:
logger.debug('Caching due to etag')
self.cache.set(
cache_url,
self.serializer.dumps(request, response, body=body),
)
# Add to the cache any 301s. We do this before looking that
# the Date headers.
elif response.status == 301:
logger.debug('Caching permanant redirect')
self.cache.set(
cache_url,
self.serializer.dumps(request, response)
)
# Add to the cache if the response headers demand it. If there
# is no date header then we can't do anything about expiring
# the cache.
elif 'date' in response_headers:
# cache when there is a max-age > 0
if cc and cc.get('max-age'):
if cc['max-age'].isdigit() and int(cc['max-age']) > 0:
logger.debug('Caching b/c date exists and max-age > 0')
self.cache.set(
cache_url,
self.serializer.dumps(request, response, body=body),
)
# If the request can expire, it means we should cache it
# in the meantime.
elif 'expires' in response_headers:
if response_headers['expires']:
logger.debug('Caching b/c of expires header')
self.cache.set(
cache_url,
self.serializer.dumps(request, response, body=body),
)
def update_cached_response(self, request, response):
"""On a 304 we will get a new set of headers that we want to
update our cached value with, assuming we have one.
This should only ever be called when we've sent an ETag and
gotten a 304 as the response.
"""
cache_url = self.cache_url(request.url)
cached_response = self.serializer.loads(
request,
self.cache.get(cache_url)
)
if not cached_response:
# we didn't have a cached response
return response
# Lets update our headers with the headers from the new request:
# http://tools.ietf.org/html/draft-ietf-httpbis-p4-conditional-26#section-4.1
#
# The server isn't supposed to send headers that would make
# the cached body invalid. But... just in case, we'll be sure
# to strip out ones we know that might be problmatic due to
# typical assumptions.
excluded_headers = [
"content-length",
]
cached_response.headers.update(
dict((k, v) for k, v in response.headers.items()
if k.lower() not in excluded_headers)
)
# we want a 200 b/c we have content via the cache
cached_response.status = 200
# update our cache
self.cache.set(
cache_url,
self.serializer.dumps(request, cached_response),
)
return cached_response
| mit |
jeenalee/servo | tests/wpt/css-tests/tools/pywebsocket/src/mod_pywebsocket/msgutil.py | 658 | 7598 | # Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Message related utilities.
Note: request.connection.write/read are used in this module, even though
mod_python document says that they should be used only in connection
handlers. Unfortunately, we have no other options. For example,
request.write/read are not suitable because they don't allow direct raw
bytes writing/reading.
"""
import Queue
import threading
# Export Exception symbols from msgutil for backward compatibility
from mod_pywebsocket._stream_base import ConnectionTerminatedException
from mod_pywebsocket._stream_base import InvalidFrameException
from mod_pywebsocket._stream_base import BadOperationException
from mod_pywebsocket._stream_base import UnsupportedFrameException
# An API for handler to send/receive WebSocket messages.
def close_connection(request):
"""Close connection.
Args:
request: mod_python request.
"""
request.ws_stream.close_connection()
def send_message(request, payload_data, end=True, binary=False):
"""Send a message (or part of a message).
Args:
request: mod_python request.
payload_data: unicode text or str binary to send.
end: True to terminate a message.
False to send payload_data as part of a message that is to be
terminated by next or later send_message call with end=True.
binary: send payload_data as binary frame(s).
Raises:
BadOperationException: when server already terminated.
"""
request.ws_stream.send_message(payload_data, end, binary)
def receive_message(request):
"""Receive a WebSocket frame and return its payload as a text in
unicode or a binary in str.
Args:
request: mod_python request.
Raises:
InvalidFrameException: when client send invalid frame.
UnsupportedFrameException: when client send unsupported frame e.g. some
of reserved bit is set but no extension can
recognize it.
InvalidUTF8Exception: when client send a text frame containing any
invalid UTF-8 string.
ConnectionTerminatedException: when the connection is closed
unexpectedly.
BadOperationException: when client already terminated.
"""
return request.ws_stream.receive_message()
def send_ping(request, body=''):
request.ws_stream.send_ping(body)
class MessageReceiver(threading.Thread):
"""This class receives messages from the client.
This class provides three ways to receive messages: blocking,
non-blocking, and via callback. Callback has the highest precedence.
Note: This class should not be used with the standalone server for wss
because pyOpenSSL used by the server raises a fatal error if the socket
is accessed from multiple threads.
"""
def __init__(self, request, onmessage=None):
"""Construct an instance.
Args:
request: mod_python request.
onmessage: a function to be called when a message is received.
May be None. If not None, the function is called on
another thread. In that case, MessageReceiver.receive
and MessageReceiver.receive_nowait are useless
because they will never return any messages.
"""
threading.Thread.__init__(self)
self._request = request
self._queue = Queue.Queue()
self._onmessage = onmessage
self._stop_requested = False
self.setDaemon(True)
self.start()
def run(self):
try:
while not self._stop_requested:
message = receive_message(self._request)
if self._onmessage:
self._onmessage(message)
else:
self._queue.put(message)
finally:
close_connection(self._request)
def receive(self):
""" Receive a message from the channel, blocking.
Returns:
message as a unicode string.
"""
return self._queue.get()
def receive_nowait(self):
""" Receive a message from the channel, non-blocking.
Returns:
message as a unicode string if available. None otherwise.
"""
try:
message = self._queue.get_nowait()
except Queue.Empty:
message = None
return message
def stop(self):
"""Request to stop this instance.
The instance will be stopped after receiving the next message.
This method may not be very useful, but there is no clean way
in Python to forcefully stop a running thread.
"""
self._stop_requested = True
class MessageSender(threading.Thread):
"""This class sends messages to the client.
This class provides both synchronous and asynchronous ways to send
messages.
Note: This class should not be used with the standalone server for wss
because pyOpenSSL used by the server raises a fatal error if the socket
is accessed from multiple threads.
"""
def __init__(self, request):
"""Construct an instance.
Args:
request: mod_python request.
"""
threading.Thread.__init__(self)
self._request = request
self._queue = Queue.Queue()
self.setDaemon(True)
self.start()
def run(self):
while True:
message, condition = self._queue.get()
condition.acquire()
send_message(self._request, message)
condition.notify()
condition.release()
def send(self, message):
"""Send a message, blocking."""
condition = threading.Condition()
condition.acquire()
self._queue.put((message, condition))
condition.wait()
def send_nowait(self, message):
"""Send a message, non-blocking."""
self._queue.put((message, threading.Condition()))
# vi:sts=4 sw=4 et
| mpl-2.0 |
olegk0/rk3188-kernel | scripts/rt-tester/rt-tester.py | 11005 | 5307 | #!/usr/bin/python
#
# rt-mutex tester
#
# (C) 2006 Thomas Gleixner <tglx@linutronix.de>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
import os
import sys
import getopt
import shutil
import string
# Globals
quiet = 0
test = 0
comments = 0
sysfsprefix = "/sys/devices/system/rttest/rttest"
statusfile = "/status"
commandfile = "/command"
# Command opcodes
cmd_opcodes = {
"schedother" : "1",
"schedfifo" : "2",
"lock" : "3",
"locknowait" : "4",
"lockint" : "5",
"lockintnowait" : "6",
"lockcont" : "7",
"unlock" : "8",
"signal" : "11",
"resetevent" : "98",
"reset" : "99",
}
test_opcodes = {
"prioeq" : ["P" , "eq" , None],
"priolt" : ["P" , "lt" , None],
"priogt" : ["P" , "gt" , None],
"nprioeq" : ["N" , "eq" , None],
"npriolt" : ["N" , "lt" , None],
"npriogt" : ["N" , "gt" , None],
"unlocked" : ["M" , "eq" , 0],
"trylock" : ["M" , "eq" , 1],
"blocked" : ["M" , "eq" , 2],
"blockedwake" : ["M" , "eq" , 3],
"locked" : ["M" , "eq" , 4],
"opcodeeq" : ["O" , "eq" , None],
"opcodelt" : ["O" , "lt" , None],
"opcodegt" : ["O" , "gt" , None],
"eventeq" : ["E" , "eq" , None],
"eventlt" : ["E" , "lt" , None],
"eventgt" : ["E" , "gt" , None],
}
# Print usage information
def usage():
print "rt-tester.py <-c -h -q -t> <testfile>"
print " -c display comments after first command"
print " -h help"
print " -q quiet mode"
print " -t test mode (syntax check)"
print " testfile: read test specification from testfile"
print " otherwise from stdin"
return
# Print progress when not in quiet mode
def progress(str):
if not quiet:
print str
# Analyse a status value
def analyse(val, top, arg):
intval = int(val)
if top[0] == "M":
intval = intval / (10 ** int(arg))
intval = intval % 10
argval = top[2]
elif top[0] == "O":
argval = int(cmd_opcodes.get(arg, arg))
else:
argval = int(arg)
# progress("%d %s %d" %(intval, top[1], argval))
if top[1] == "eq" and intval == argval:
return 1
if top[1] == "lt" and intval < argval:
return 1
if top[1] == "gt" and intval > argval:
return 1
return 0
# Parse the commandline
try:
(options, arguments) = getopt.getopt(sys.argv[1:],'chqt')
except getopt.GetoptError, ex:
usage()
sys.exit(1)
# Parse commandline options
for option, value in options:
if option == "-c":
comments = 1
elif option == "-q":
quiet = 1
elif option == "-t":
test = 1
elif option == '-h':
usage()
sys.exit(0)
# Select the input source
if arguments:
try:
fd = open(arguments[0])
except Exception,ex:
sys.stderr.write("File not found %s\n" %(arguments[0]))
sys.exit(1)
else:
fd = sys.stdin
linenr = 0
# Read the test patterns
while 1:
linenr = linenr + 1
line = fd.readline()
if not len(line):
break
line = line.strip()
parts = line.split(":")
if not parts or len(parts) < 1:
continue
if len(parts[0]) == 0:
continue
if parts[0].startswith("#"):
if comments > 1:
progress(line)
continue
if comments == 1:
comments = 2
progress(line)
cmd = parts[0].strip().lower()
opc = parts[1].strip().lower()
tid = parts[2].strip()
dat = parts[3].strip()
try:
# Test or wait for a status value
if cmd == "t" or cmd == "w":
testop = test_opcodes[opc]
fname = "%s%s%s" %(sysfsprefix, tid, statusfile)
if test:
print fname
continue
while 1:
query = 1
fsta = open(fname, 'r')
status = fsta.readline().strip()
fsta.close()
stat = status.split(",")
for s in stat:
s = s.strip()
if s.startswith(testop[0]):
# Separate status value
val = s[2:].strip()
query = analyse(val, testop, dat)
break
if query or cmd == "t":
break
progress(" " + status)
if not query:
sys.stderr.write("Test failed in line %d\n" %(linenr))
sys.exit(1)
# Issue a command to the tester
elif cmd == "c":
cmdnr = cmd_opcodes[opc]
# Build command string and sys filename
cmdstr = "%s:%s" %(cmdnr, dat)
fname = "%s%s%s" %(sysfsprefix, tid, commandfile)
if test:
print fname
continue
fcmd = open(fname, 'w')
fcmd.write(cmdstr)
fcmd.close()
except Exception,ex:
sys.stderr.write(str(ex))
sys.stderr.write("\nSyntax error in line %d\n" %(linenr))
if not test:
fd.close()
sys.exit(1)
# Normal exit pass
print "Pass"
sys.exit(0)
| gpl-2.0 |
gdgellatly/OCB1 | addons/pad/py_etherpad/__init__.py | 505 | 7804 | """Module to talk to EtherpadLite API."""
import json
import urllib
import urllib2
class EtherpadLiteClient:
"""Client to talk to EtherpadLite API."""
API_VERSION = 1 # TODO probably 1.1 sometime soon
CODE_OK = 0
CODE_INVALID_PARAMETERS = 1
CODE_INTERNAL_ERROR = 2
CODE_INVALID_FUNCTION = 3
CODE_INVALID_API_KEY = 4
TIMEOUT = 20
apiKey = ""
baseUrl = "http://localhost:9001/api"
def __init__(self, apiKey=None, baseUrl=None):
if apiKey:
self.apiKey = apiKey
if baseUrl:
self.baseUrl = baseUrl
def call(self, function, arguments=None):
"""Create a dictionary of all parameters"""
url = '%s/%d/%s' % (self.baseUrl, self.API_VERSION, function)
params = arguments or {}
params.update({'apikey': self.apiKey})
data = urllib.urlencode(params, True)
try:
opener = urllib2.build_opener()
request = urllib2.Request(url=url, data=data)
response = opener.open(request, timeout=self.TIMEOUT)
result = response.read()
response.close()
except urllib2.HTTPError:
raise
result = json.loads(result)
if result is None:
raise ValueError("JSON response could not be decoded")
return self.handleResult(result)
def handleResult(self, result):
"""Handle API call result"""
if 'code' not in result:
raise Exception("API response has no code")
if 'message' not in result:
raise Exception("API response has no message")
if 'data' not in result:
result['data'] = None
if result['code'] == self.CODE_OK:
return result['data']
elif result['code'] == self.CODE_INVALID_PARAMETERS or result['code'] == self.CODE_INVALID_API_KEY:
raise ValueError(result['message'])
elif result['code'] == self.CODE_INTERNAL_ERROR:
raise Exception(result['message'])
elif result['code'] == self.CODE_INVALID_FUNCTION:
raise Exception(result['message'])
else:
raise Exception("An unexpected error occurred whilst handling the response")
# GROUPS
# Pads can belong to a group. There will always be public pads that do not belong to a group (or we give this group the id 0)
def createGroup(self):
"""creates a new group"""
return self.call("createGroup")
def createGroupIfNotExistsFor(self, groupMapper):
"""this functions helps you to map your application group ids to etherpad lite group ids"""
return self.call("createGroupIfNotExistsFor", {
"groupMapper": groupMapper
})
def deleteGroup(self, groupID):
"""deletes a group"""
return self.call("deleteGroup", {
"groupID": groupID
})
def listPads(self, groupID):
"""returns all pads of this group"""
return self.call("listPads", {
"groupID": groupID
})
def createGroupPad(self, groupID, padName, text=''):
"""creates a new pad in this group"""
params = {
"groupID": groupID,
"padName": padName,
}
if text:
params['text'] = text
return self.call("createGroupPad", params)
# AUTHORS
# Theses authors are bind to the attributes the users choose (color and name).
def createAuthor(self, name=''):
"""creates a new author"""
params = {}
if name:
params['name'] = name
return self.call("createAuthor", params)
def createAuthorIfNotExistsFor(self, authorMapper, name=''):
"""this functions helps you to map your application author ids to etherpad lite author ids"""
params = {
'authorMapper': authorMapper
}
if name:
params['name'] = name
return self.call("createAuthorIfNotExistsFor", params)
# SESSIONS
# Sessions can be created between a group and a author. This allows
# an author to access more than one group. The sessionID will be set as
# a cookie to the client and is valid until a certain date.
def createSession(self, groupID, authorID, validUntil):
"""creates a new session"""
return self.call("createSession", {
"groupID": groupID,
"authorID": authorID,
"validUntil": validUntil
})
def deleteSession(self, sessionID):
"""deletes a session"""
return self.call("deleteSession", {
"sessionID": sessionID
})
def getSessionInfo(self, sessionID):
"""returns informations about a session"""
return self.call("getSessionInfo", {
"sessionID": sessionID
})
def listSessionsOfGroup(self, groupID):
"""returns all sessions of a group"""
return self.call("listSessionsOfGroup", {
"groupID": groupID
})
def listSessionsOfAuthor(self, authorID):
"""returns all sessions of an author"""
return self.call("listSessionsOfAuthor", {
"authorID": authorID
})
# PAD CONTENT
# Pad content can be updated and retrieved through the API
def getText(self, padID, rev=None):
"""returns the text of a pad"""
params = {"padID": padID}
if rev is not None:
params['rev'] = rev
return self.call("getText", params)
# introduced with pull request merge
def getHtml(self, padID, rev=None):
"""returns the html of a pad"""
params = {"padID": padID}
if rev is not None:
params['rev'] = rev
return self.call("getHTML", params)
def setText(self, padID, text):
"""sets the text of a pad"""
return self.call("setText", {
"padID": padID,
"text": text
})
def setHtml(self, padID, html):
"""sets the text of a pad from html"""
return self.call("setHTML", {
"padID": padID,
"html": html
})
# PAD
# Group pads are normal pads, but with the name schema
# GROUPID$PADNAME. A security manager controls access of them and its
# forbidden for normal pads to include a in the name.
def createPad(self, padID, text=''):
"""creates a new pad"""
params = {
"padID": padID,
}
if text:
params['text'] = text
return self.call("createPad", params)
def getRevisionsCount(self, padID):
"""returns the number of revisions of this pad"""
return self.call("getRevisionsCount", {
"padID": padID
})
def deletePad(self, padID):
"""deletes a pad"""
return self.call("deletePad", {
"padID": padID
})
def getReadOnlyID(self, padID):
"""returns the read only link of a pad"""
return self.call("getReadOnlyID", {
"padID": padID
})
def setPublicStatus(self, padID, publicStatus):
"""sets a boolean for the public status of a pad"""
return self.call("setPublicStatus", {
"padID": padID,
"publicStatus": publicStatus
})
def getPublicStatus(self, padID):
"""return true of false"""
return self.call("getPublicStatus", {
"padID": padID
})
def setPassword(self, padID, password):
"""returns ok or a error message"""
return self.call("setPassword", {
"padID": padID,
"password": password
})
def isPasswordProtected(self, padID):
"""returns true or false"""
return self.call("isPasswordProtected", {
"padID": padID
})
| agpl-3.0 |
AndiDog/git-cola | cola/widgets/common.py | 3 | 2073 | from __future__ import division, absolute_import, unicode_literals
from ..i18n import N_
from .. import cmds
from .. import hotkeys
from .. import icons
from .. import qtutils
from .. import utils
def cmd_action(parent, cmd, context, fn, *keys):
"""Wrap a standard Command object in a QAction
This function assumes that :func:`fn()` takes no arguments,
that `cmd` has a :func:`name()` method, and that the `cmd`
constructor takes a single argument, as returned by `fn`.
"""
return qtutils.add_action(
parent, cmd.name(), lambda: cmds.do(cmd, context, fn()), *keys)
def default_app_action(context, parent, fn):
"""Open paths with the OS-default app -> QAction"""
action = cmd_action(parent, cmds.OpenDefaultApp, context, fn,
hotkeys.PRIMARY_ACTION)
action.setIcon(icons.default_app())
return action
def edit_action(context, parent, *keys):
"""Launch an editor -> QAction"""
action = qtutils.add_action_with_status_tip(
parent, cmds.LaunchEditor.name(),
N_('Edit selected paths'),
cmds.run(cmds.LaunchEditor, context), hotkeys.EDIT, *keys)
action.setIcon(icons.edit())
return action
def parent_dir_action(context, parent, fn):
"""Open the parent directory of paths -> QAction"""
hotkey = hotkeys.SECONDARY_ACTION
action = cmd_action(parent, cmds.OpenParentDir, context, fn, hotkey)
action.setIcon(icons.folder())
return action
def refresh_action(context, parent):
"""Refresh the repository state -> QAction"""
return qtutils.add_action(parent, cmds.Refresh.name(),
cmds.run(cmds.Refresh, context),
hotkeys.REFRESH)
def terminal_action(context, parent, fn):
"""Launch a terminal -> QAction"""
action = None
if cmds.LaunchTerminal.is_available(context):
action = cmd_action(parent, cmds.LaunchTerminal, context,
lambda: utils.select_directory(fn()),
hotkeys.TERMINAL)
return action
| gpl-2.0 |
jimsimon/sky_engine | build/android/pylib/sdk/aapt.py | 25 | 1183 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This module wraps the Android Asset Packaging Tool."""
import os
from pylib import cmd_helper
from pylib import constants
from pylib.utils import timeout_retry
_AAPT_PATH = os.path.join(constants.ANDROID_SDK_TOOLS, 'aapt')
def _RunAaptCmd(args):
"""Runs an aapt command.
Args:
args: A list of arguments for aapt.
Returns:
The output of the command.
"""
cmd = [_AAPT_PATH] + args
status, output = cmd_helper.GetCmdStatusAndOutput(cmd)
if status != 0:
raise Exception('Failed running aapt command: "%s" with output "%s".' %
(' '.join(cmd), output))
return output
def Dump(what, apk, assets=None):
"""Returns the output of the aapt dump command.
Args:
what: What you want to dump.
apk: Path to apk you want to dump information for.
assets: List of assets in apk you want to dump information for.
"""
assets = assets or []
if isinstance(assets, basestring):
assets = [assets]
return _RunAaptCmd(['dump', what, apk] + assets).splitlines() | bsd-3-clause |
appcelerator/entourage | components/services/pylons/appcelerator-module/setup.py | 1 | 1264 | from setuptools import setup, find_packages
setup(name='Appcelerator',
version='0.0.0',
description="Python version of the Appcelerator web application framework for building fast, dynamic, AJAX based web 2.0 applications.",
long_description="""
""",
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Topic :: Internet :: WWW/HTTP :: WSGI :: Middleware',
'Programming Language :: Python',
'Programming Language :: JavaScript',
'License :: OSI Approved :: GNU General Public License (GPL)',
],
keywords='wsgi web soa ria javascript',
author='Mark Luffel',
author_email='mluffel@appcelerator.com',
url='http://appcelerator.org',
license='GPL',
packages=find_packages(exclude=['ez_setup']),
include_package_data=True,
zip_safe=False,
install_requires=[
'beaker>=0.8.1',
'simplejson',
'elementtree',
'pastescript'
],
entry_points="""
[paste.app_factory]
service_broker = appcelerator.core:service_broker_factory
cross_domain_proxy = appcelerator.core:cross_domain_proxy_factory
"""
)
| apache-2.0 |
olapaola/olapaola-android-scripting | python/src/Lib/CGIHTTPServer.py | 59 | 12687 | """CGI-savvy HTTP Server.
This module builds on SimpleHTTPServer by implementing GET and POST
requests to cgi-bin scripts.
If the os.fork() function is not present (e.g. on Windows),
os.popen2() is used as a fallback, with slightly altered semantics; if
that function is not present either (e.g. on Macintosh), only Python
scripts are supported, and they are executed by the current process.
In all cases, the implementation is intentionally naive -- all
requests are executed sychronously.
SECURITY WARNING: DON'T USE THIS CODE UNLESS YOU ARE INSIDE A FIREWALL
-- it may execute arbitrary Python code or external programs.
Note that status code 200 is sent prior to execution of a CGI script, so
scripts cannot send other status codes such as 302 (redirect).
"""
__version__ = "0.4"
__all__ = ["CGIHTTPRequestHandler"]
import os
import sys
import urllib
import BaseHTTPServer
import SimpleHTTPServer
import select
class CGIHTTPRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
"""Complete HTTP server with GET, HEAD and POST commands.
GET and HEAD also support running CGI scripts.
The POST command is *only* implemented for CGI scripts.
"""
# Determine platform specifics
have_fork = hasattr(os, 'fork')
have_popen2 = hasattr(os, 'popen2')
have_popen3 = hasattr(os, 'popen3')
# Make rfile unbuffered -- we need to read one line and then pass
# the rest to a subprocess, so we can't use buffered input.
rbufsize = 0
def do_POST(self):
"""Serve a POST request.
This is only implemented for CGI scripts.
"""
if self.is_cgi():
self.run_cgi()
else:
self.send_error(501, "Can only POST to CGI scripts")
def send_head(self):
"""Version of send_head that support CGI scripts"""
if self.is_cgi():
return self.run_cgi()
else:
return SimpleHTTPServer.SimpleHTTPRequestHandler.send_head(self)
def is_cgi(self):
"""Test whether self.path corresponds to a CGI script,
and return a boolean.
This function sets self.cgi_info to a tuple (dir, rest)
when it returns True, where dir is the directory part before
the CGI script name. Note that rest begins with a
slash if it is not empty.
The default implementation tests whether the path
begins with one of the strings in the list
self.cgi_directories (and the next character is a '/'
or the end of the string).
"""
path = self.path
for x in self.cgi_directories:
i = len(x)
if path[:i] == x and (not path[i:] or path[i] == '/'):
self.cgi_info = path[:i], path[i+1:]
return True
return False
cgi_directories = ['/cgi-bin', '/htbin']
def is_executable(self, path):
"""Test whether argument path is an executable file."""
return executable(path)
def is_python(self, path):
"""Test whether argument path is a Python script."""
head, tail = os.path.splitext(path)
return tail.lower() in (".py", ".pyw")
def run_cgi(self):
"""Execute a CGI script."""
path = self.path
dir, rest = self.cgi_info
i = path.find('/', len(dir) + 1)
while i >= 0:
nextdir = path[:i]
nextrest = path[i+1:]
scriptdir = self.translate_path(nextdir)
if os.path.isdir(scriptdir):
dir, rest = nextdir, nextrest
i = path.find('/', len(dir) + 1)
else:
break
# find an explicit query string, if present.
i = rest.rfind('?')
if i >= 0:
rest, query = rest[:i], rest[i+1:]
else:
query = ''
# dissect the part after the directory name into a script name &
# a possible additional path, to be stored in PATH_INFO.
i = rest.find('/')
if i >= 0:
script, rest = rest[:i], rest[i:]
else:
script, rest = rest, ''
scriptname = dir + '/' + script
scriptfile = self.translate_path(scriptname)
if not os.path.exists(scriptfile):
self.send_error(404, "No such CGI script (%r)" % scriptname)
return
if not os.path.isfile(scriptfile):
self.send_error(403, "CGI script is not a plain file (%r)" %
scriptname)
return
ispy = self.is_python(scriptname)
if not ispy:
if not (self.have_fork or self.have_popen2 or self.have_popen3):
self.send_error(403, "CGI script is not a Python script (%r)" %
scriptname)
return
if not self.is_executable(scriptfile):
self.send_error(403, "CGI script is not executable (%r)" %
scriptname)
return
# Reference: http://hoohoo.ncsa.uiuc.edu/cgi/env.html
# XXX Much of the following could be prepared ahead of time!
env = {}
env['SERVER_SOFTWARE'] = self.version_string()
env['SERVER_NAME'] = self.server.server_name
env['GATEWAY_INTERFACE'] = 'CGI/1.1'
env['SERVER_PROTOCOL'] = self.protocol_version
env['SERVER_PORT'] = str(self.server.server_port)
env['REQUEST_METHOD'] = self.command
uqrest = urllib.unquote(rest)
env['PATH_INFO'] = uqrest
env['PATH_TRANSLATED'] = self.translate_path(uqrest)
env['SCRIPT_NAME'] = scriptname
if query:
env['QUERY_STRING'] = query
host = self.address_string()
if host != self.client_address[0]:
env['REMOTE_HOST'] = host
env['REMOTE_ADDR'] = self.client_address[0]
authorization = self.headers.getheader("authorization")
if authorization:
authorization = authorization.split()
if len(authorization) == 2:
import base64, binascii
env['AUTH_TYPE'] = authorization[0]
if authorization[0].lower() == "basic":
try:
authorization = base64.decodestring(authorization[1])
except binascii.Error:
pass
else:
authorization = authorization.split(':')
if len(authorization) == 2:
env['REMOTE_USER'] = authorization[0]
# XXX REMOTE_IDENT
if self.headers.typeheader is None:
env['CONTENT_TYPE'] = self.headers.type
else:
env['CONTENT_TYPE'] = self.headers.typeheader
length = self.headers.getheader('content-length')
if length:
env['CONTENT_LENGTH'] = length
referer = self.headers.getheader('referer')
if referer:
env['HTTP_REFERER'] = referer
accept = []
for line in self.headers.getallmatchingheaders('accept'):
if line[:1] in "\t\n\r ":
accept.append(line.strip())
else:
accept = accept + line[7:].split(',')
env['HTTP_ACCEPT'] = ','.join(accept)
ua = self.headers.getheader('user-agent')
if ua:
env['HTTP_USER_AGENT'] = ua
co = filter(None, self.headers.getheaders('cookie'))
if co:
env['HTTP_COOKIE'] = ', '.join(co)
# XXX Other HTTP_* headers
# Since we're setting the env in the parent, provide empty
# values to override previously set values
for k in ('QUERY_STRING', 'REMOTE_HOST', 'CONTENT_LENGTH',
'HTTP_USER_AGENT', 'HTTP_COOKIE', 'HTTP_REFERER'):
env.setdefault(k, "")
os.environ.update(env)
self.send_response(200, "Script output follows")
decoded_query = query.replace('+', ' ')
if self.have_fork:
# Unix -- fork as we should
args = [script]
if '=' not in decoded_query:
args.append(decoded_query)
nobody = nobody_uid()
self.wfile.flush() # Always flush before forking
pid = os.fork()
if pid != 0:
# Parent
pid, sts = os.waitpid(pid, 0)
# throw away additional data [see bug #427345]
while select.select([self.rfile], [], [], 0)[0]:
if not self.rfile.read(1):
break
if sts:
self.log_error("CGI script exit status %#x", sts)
return
# Child
try:
try:
os.setuid(nobody)
except os.error:
pass
os.dup2(self.rfile.fileno(), 0)
os.dup2(self.wfile.fileno(), 1)
os.execve(scriptfile, args, os.environ)
except:
self.server.handle_error(self.request, self.client_address)
os._exit(127)
elif self.have_popen2 or self.have_popen3:
# Windows -- use popen2 or popen3 to create a subprocess
import shutil
if self.have_popen3:
popenx = os.popen3
else:
popenx = os.popen2
cmdline = scriptfile
if self.is_python(scriptfile):
interp = sys.executable
if interp.lower().endswith("w.exe"):
# On Windows, use python.exe, not pythonw.exe
interp = interp[:-5] + interp[-4:]
cmdline = "%s -u %s" % (interp, cmdline)
if '=' not in query and '"' not in query:
cmdline = '%s "%s"' % (cmdline, query)
self.log_message("command: %s", cmdline)
try:
nbytes = int(length)
except (TypeError, ValueError):
nbytes = 0
files = popenx(cmdline, 'b')
fi = files[0]
fo = files[1]
if self.have_popen3:
fe = files[2]
if self.command.lower() == "post" and nbytes > 0:
data = self.rfile.read(nbytes)
fi.write(data)
# throw away additional data [see bug #427345]
while select.select([self.rfile._sock], [], [], 0)[0]:
if not self.rfile._sock.recv(1):
break
fi.close()
shutil.copyfileobj(fo, self.wfile)
if self.have_popen3:
errors = fe.read()
fe.close()
if errors:
self.log_error('%s', errors)
sts = fo.close()
if sts:
self.log_error("CGI script exit status %#x", sts)
else:
self.log_message("CGI script exited OK")
else:
# Other O.S. -- execute script in this process
save_argv = sys.argv
save_stdin = sys.stdin
save_stdout = sys.stdout
save_stderr = sys.stderr
try:
save_cwd = os.getcwd()
try:
sys.argv = [scriptfile]
if '=' not in decoded_query:
sys.argv.append(decoded_query)
sys.stdout = self.wfile
sys.stdin = self.rfile
execfile(scriptfile, {"__name__": "__main__"})
finally:
sys.argv = save_argv
sys.stdin = save_stdin
sys.stdout = save_stdout
sys.stderr = save_stderr
os.chdir(save_cwd)
except SystemExit, sts:
self.log_error("CGI script exit status %s", str(sts))
else:
self.log_message("CGI script exited OK")
nobody = None
def nobody_uid():
"""Internal routine to get nobody's uid"""
global nobody
if nobody:
return nobody
try:
import pwd
except ImportError:
return -1
try:
nobody = pwd.getpwnam('nobody')[2]
except KeyError:
nobody = 1 + max(map(lambda x: x[2], pwd.getpwall()))
return nobody
def executable(path):
"""Test for executable file."""
try:
st = os.stat(path)
except os.error:
return False
return st.st_mode & 0111 != 0
def test(HandlerClass = CGIHTTPRequestHandler,
ServerClass = BaseHTTPServer.HTTPServer):
SimpleHTTPServer.test(HandlerClass, ServerClass)
if __name__ == '__main__':
test()
| apache-2.0 |
AICP/external_chromium_org | build/android/pylib/utils/reraiser_thread_unittest.py | 99 | 2368 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unittests for reraiser_thread.py."""
import threading
import unittest
from pylib.utils import reraiser_thread
from pylib.utils import watchdog_timer
class TestException(Exception):
pass
class TestReraiserThread(unittest.TestCase):
"""Tests for reraiser_thread.ReraiserThread."""
def testNominal(self):
result = [None, None]
def f(a, b=None):
result[0] = a
result[1] = b
thread = reraiser_thread.ReraiserThread(f, [1], {'b': 2})
thread.start()
thread.join()
self.assertEqual(result[0], 1)
self.assertEqual(result[1], 2)
def testRaise(self):
def f():
raise TestException
thread = reraiser_thread.ReraiserThread(f)
thread.start()
thread.join()
with self.assertRaises(TestException):
thread.ReraiseIfException()
class TestReraiserThreadGroup(unittest.TestCase):
"""Tests for reraiser_thread.ReraiserThreadGroup."""
def testInit(self):
ran = [False] * 5
def f(i):
ran[i] = True
group = reraiser_thread.ReraiserThreadGroup(
[reraiser_thread.ReraiserThread(f, args=[i]) for i in range(5)])
group.StartAll()
group.JoinAll()
for v in ran:
self.assertTrue(v)
def testAdd(self):
ran = [False] * 5
def f(i):
ran[i] = True
group = reraiser_thread.ReraiserThreadGroup()
for i in xrange(5):
group.Add(reraiser_thread.ReraiserThread(f, args=[i]))
group.StartAll()
group.JoinAll()
for v in ran:
self.assertTrue(v)
def testJoinRaise(self):
def f():
raise TestException
group = reraiser_thread.ReraiserThreadGroup(
[reraiser_thread.ReraiserThread(f) for _ in xrange(5)])
group.StartAll()
with self.assertRaises(TestException):
group.JoinAll()
def testJoinTimeout(self):
def f():
pass
event = threading.Event()
def g():
event.wait()
group = reraiser_thread.ReraiserThreadGroup(
[reraiser_thread.ReraiserThread(g),
reraiser_thread.ReraiserThread(f)])
group.StartAll()
with self.assertRaises(reraiser_thread.TimeoutError):
group.JoinAll(watchdog_timer.WatchdogTimer(0.01))
event.set()
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
leansoft/edx-platform | common/djangoapps/course_modes/migrations/0008_auto__del_field_coursemodesarchive_description__add_field_coursemode_s.py | 102 | 3098 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'CourseMode.sku'
db.add_column('course_modes_coursemode', 'sku',
self.gf('django.db.models.fields.CharField')(max_length=255, null=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'CourseMode.sku'
db.delete_column('course_modes_coursemode', 'sku')
models = {
'course_modes.coursemode': {
'Meta': {'unique_together': "(('course_id', 'mode_slug', 'currency'),)", 'object_name': 'CourseMode'},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'currency': ('django.db.models.fields.CharField', [], {'default': "'usd'", 'max_length': '8'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'expiration_date': ('django.db.models.fields.DateField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'expiration_datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'min_price': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'mode_display_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'mode_slug': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'sku': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'suggested_prices': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'default': "''", 'max_length': '255', 'blank': 'True'})
},
'course_modes.coursemodesarchive': {
'Meta': {'object_name': 'CourseModesArchive'},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'currency': ('django.db.models.fields.CharField', [], {'default': "'usd'", 'max_length': '8'}),
'expiration_date': ('django.db.models.fields.DateField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'expiration_datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'min_price': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'mode_display_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'mode_slug': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'suggested_prices': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'default': "''", 'max_length': '255', 'blank': 'True'})
}
}
complete_apps = ['course_modes']
| agpl-3.0 |
mic4ael/indico | indico/modules/rb/notifications/blockings.py | 1 | 1361 | # This file is part of Indico.
# Copyright (C) 2002 - 2020 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from flask import render_template
from indico.core.notifications import email_sender, make_email
@email_sender
def notify_request(owner, blocking, blocked_rooms):
"""
Notifies room owner about blockings he has to approve.
Expects only blockings for rooms owned by the specified owner
"""
subject = 'Confirm room blockings'
body = render_template('rb/emails/blockings/awaiting_confirmation_email_to_manager.txt',
owner=owner, blocking=blocking, blocked_rooms=blocked_rooms)
return make_email(owner.email, subject=subject, body=body)
@email_sender
def notify_request_response(blocked_room):
"""
Notifies blocking creator about approval/rejection of his
blocking request for a room
"""
to = blocked_room.blocking.created_by_user.email
verb = blocked_room.State(blocked_room.state).title.upper()
subject = 'Room blocking {}'.format(verb)
body = render_template('rb/emails/blockings/state_email_to_user.txt',
blocking=blocked_room.blocking, blocked_room=blocked_room, verb=verb)
return make_email(to, subject=subject, body=body)
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.