repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
qqzwc/XX-Net | code/default/python27/1.0/lib/noarch/hyper/common/headers.py | 4 | 8868 | # -*- coding: utf-8 -*-
"""
hyper/common/headers
~~~~~~~~~~~~~~~~~~~~~
Contains hyper's structures for storing and working with HTTP headers.
"""
import collections
from hyper.common.util import to_bytestring, to_bytestring_tuple
class HTTPHeaderMap(collections.MutableMapping):
"""
A structure that contains HTTP headers.
HTTP headers are a curious beast. At the surface level they look roughly
like a name-value set, but in practice they have many variations that
make them tricky:
- duplicate keys are allowed
- keys are compared case-insensitively
- duplicate keys are isomorphic to comma-separated values, *except when
they aren't*!
- they logically contain a form of ordering
This data structure is an attempt to preserve all of that information
while being as user-friendly as possible. It retains all of the mapping
convenience methods (allowing by-name indexing), while avoiding using a
dictionary for storage.
When iterated over, this structure returns headers in 'canonical form'.
This form is a tuple, where the first entry is the header name (in
lower-case), and the second entry is a list of header values (in original
case).
The mapping always emits both names and values in the form of bytestrings:
never unicode strings. It can accept names and values in unicode form, and
will automatically be encoded to bytestrings using UTF-8. The reason for
what appears to be a user-unfriendly decision here is primarily to allow
the broadest-possible compatibility (to make it possible to send headers in
unusual encodings) while ensuring that users are never confused about what
type of data they will receive.
.. warning:: Note that this data structure makes none of the performance
guarantees of a dictionary. Lookup and deletion is not an O(1)
operation. Inserting a new value *is* O(1), all other
operations are O(n), including *replacing* a header entirely.
"""
def __init__(self, *args, **kwargs):
# The meat of the structure. In practice, headers are an ordered list
# of tuples. This early version of the data structure simply uses this
# directly under the covers.
#
# An important curiosity here is that the headers are not stored in
# 'canonical form', but are instead stored in the form they were
# provided in. This is to ensure that it is always possible to
# reproduce the original header structure if necessary. This leads to
# some unfortunate performance costs on structure access where it is
# often necessary to transform the data into canonical form on access.
# This cost is judged acceptable in low-level code like `hyper`, but
# higher-level abstractions should consider if they really require this
# logic.
self._items = []
for arg in args:
self._items.extend(map(lambda x: to_bytestring_tuple(*x), arg))
for k, v in kwargs.items():
self._items.append(to_bytestring_tuple(k, v))
def __getitem__(self, key):
"""
Unlike the dict __getitem__, this returns a list of items in the order
they were added. These items are returned in 'canonical form', meaning
that comma-separated values are split into multiple values.
"""
key = to_bytestring(key)
values = []
for k, v in self._items:
if _keys_equal(k, key):
values.extend(x[1] for x in canonical_form(k, v))
if not values:
raise KeyError("Nonexistent header key: {}".format(key))
return values
def __setitem__(self, key, value):
"""
Unlike the dict __setitem__, this appends to the list of items.
"""
self._items.append(to_bytestring_tuple(key, value))
def __delitem__(self, key):
"""
Sadly, __delitem__ is kind of stupid here, but the best we can do is
delete all headers with a given key. To correctly achieve the 'KeyError
on missing key' logic from dictionaries, we need to do this slowly.
"""
key = to_bytestring(key)
indices = []
for (i, (k, v)) in enumerate(self._items):
if _keys_equal(k, key):
indices.append(i)
if not indices:
raise KeyError("Nonexistent header key: {}".format(key))
for i in indices[::-1]:
self._items.pop(i)
def __iter__(self):
"""
This mapping iterates like the list of tuples it is. The headers are
returned in canonical form.
"""
for pair in self._items:
for value in canonical_form(*pair):
yield value
def __len__(self):
"""
The length of this mapping is the number of individual headers in
canonical form. Sadly, this is a somewhat expensive operation.
"""
size = 0
for _ in self:
size += 1
return size
def __contains__(self, key):
"""
If any header is present with this key, returns True.
"""
key = to_bytestring(key)
return any(_keys_equal(key, k) for k, _ in self._items)
def keys(self):
"""
Returns an iterable of the header keys in the mapping. This explicitly
does not filter duplicates, ensuring that it's the same length as
len().
"""
for n, _ in self:
yield n
def items(self):
"""
This mapping iterates like the list of tuples it is.
"""
return self.__iter__()
def values(self):
"""
This is an almost nonsensical query on a header dictionary, but we
satisfy it in the exact same way we satisfy 'keys'.
"""
for _, v in self:
yield v
def get(self, name, default=None):
"""
Unlike the dict get, this returns a list of items in the order
they were added.
"""
try:
return self[name]
except KeyError:
return default
def iter_raw(self):
"""
Allows iterating over the headers in 'raw' form: that is, the form in
which they were added to the structure. This iteration is in order,
and can be used to rebuild the original headers (e.g. to determine
exactly what a server sent).
"""
for item in self._items:
yield item
def replace(self, key, value):
"""
Replace existing header with new value. If header doesn't exist this
method work like ``__setitem__``. Replacing leads to deletion of all
existing headers with the same name.
"""
key = to_bytestring(key)
indices = []
for (i, (k, v)) in enumerate(self._items):
if _keys_equal(k, key):
indices.append(i)
# If the key isn't present, this is easy: just append and abort early.
if not indices:
self._items.append((key, value))
return
# Delete all but the first. I swear, this is the correct slicing
# syntax!
base_index = indices[0]
for i in indices[:0:-1]:
self._items.pop(i)
del self._items[base_index]
self._items.insert(base_index, (key, value))
def merge(self, other):
"""
Merge another header set or any other dict-like into this one.
"""
# Short circuit to avoid infinite loops in case we try to merge into
# ourselves.
if other is self:
return
if isinstance(other, HTTPHeaderMap):
self._items.extend(other.iter_raw())
return
for k, v in other.items():
self._items.append(to_bytestring_tuple(k, v))
def __eq__(self, other):
return self._items == other._items
def __ne__(self, other):
return self._items != other._items
def __str__(self): # pragma: no cover
return 'HTTPHeaderMap(%s)' % self._items
def __repr__(self): # pragma: no cover
return str(self)
def canonical_form(k, v):
"""
Returns an iterable of key-value-pairs corresponding to the header in
canonical form. This means that the header is split on commas unless for
any reason it's a super-special snowflake (I'm looking at you Set-Cookie).
"""
SPECIAL_SNOWFLAKES = set([b'set-cookie', b'set-cookie2'])
k = k.lower()
if k in SPECIAL_SNOWFLAKES:
yield k, v
else:
for sub_val in v.split(b','):
yield k, sub_val.strip()
def _keys_equal(x, y):
"""
Returns 'True' if the two keys are equal by the laws of HTTP headers.
"""
return x.lower() == y.lower()
| bsd-2-clause |
copyninja/apt-offline | apt_offline_core/AptOffline_argparse.py | 30 | 87352 | # -*- coding: utf-8 -*-
# Copyright © 2006-2009 Steven J. Bethard <steven.bethard@gmail.com>.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Command-line parsing library
This module is an optparse-inspired command-line parsing library that:
- handles both optional and positional arguments
- produces highly informative usage messages
- supports parsers that dispatch to sub-parsers
The following is a simple usage example that sums integers from the
command-line and writes the result to a file::
parser = argparse.ArgumentParser(
description='sum the integers at the command line')
parser.add_argument(
'integers', metavar='int', nargs='+', type=int,
help='an integer to be summed')
parser.add_argument(
'--log', default=sys.stdout, type=argparse.FileType('w'),
help='the file where the sum should be written')
args = parser.parse_args()
args.log.write('%s' % sum(args.integers))
args.log.close()
The module contains the following public classes:
- ArgumentParser -- The main entry point for command-line parsing. As the
example above shows, the add_argument() method is used to populate
the parser with actions for optional and positional arguments. Then
the parse_args() method is invoked to convert the args at the
command-line into an object with attributes.
- ArgumentError -- The exception raised by ArgumentParser objects when
there are errors with the parser's actions. Errors raised while
parsing the command-line are caught by ArgumentParser and emitted
as command-line messages.
- FileType -- A factory for defining types of files to be created. As the
example above shows, instances of FileType are typically passed as
the type= argument of add_argument() calls.
- Action -- The base class for parser actions. Typically actions are
selected by passing strings like 'store_true' or 'append_const' to
the action= argument of add_argument(). However, for greater
customization of ArgumentParser actions, subclasses of Action may
be defined and passed as the action= argument.
- HelpFormatter, RawDescriptionHelpFormatter, RawTextHelpFormatter,
ArgumentDefaultsHelpFormatter -- Formatter classes which
may be passed as the formatter_class= argument to the
ArgumentParser constructor. HelpFormatter is the default,
RawDescriptionHelpFormatter and RawTextHelpFormatter tell the parser
not to change the formatting for help text, and
ArgumentDefaultsHelpFormatter adds information about argument defaults
to the help.
All other classes in this module are considered implementation details.
(Also note that HelpFormatter and RawDescriptionHelpFormatter are only
considered public as object names -- the API of the formatter objects is
still considered an implementation detail.)
"""
__version__ = '1.1'
__all__ = [
'ArgumentParser',
'ArgumentError',
'Namespace',
'Action',
'FileType',
'HelpFormatter',
'RawDescriptionHelpFormatter',
'RawTextHelpFormatter',
'ArgumentDefaultsHelpFormatter',
]
import copy as _copy
import os as _os
import re as _re
import sys as _sys
import textwrap as _textwrap
from gettext import gettext as _
try:
_set = set
except NameError:
from sets import Set as _set
try:
_basestring = basestring
except NameError:
_basestring = str
try:
_sorted = sorted
except NameError:
def _sorted(iterable, reverse=False):
result = list(iterable)
result.sort()
if reverse:
result.reverse()
return result
def _callable(obj):
return hasattr(obj, '__call__') or hasattr(obj, '__bases__')
# silence Python 2.6 buggy warnings about Exception.message
if _sys.version_info[:2] == (2, 6):
import warnings
warnings.filterwarnings(
action='ignore',
message='BaseException.message has been deprecated as of Python 2.6',
category=DeprecationWarning,
module='argparse')
SUPPRESS = '==SUPPRESS=='
OPTIONAL = '?'
ZERO_OR_MORE = '*'
ONE_OR_MORE = '+'
PARSER = 'A...'
REMAINDER = '...'
# =============================
# Utility functions and classes
# =============================
class _AttributeHolder(object):
"""Abstract base class that provides __repr__.
The __repr__ method returns a string in the format::
ClassName(attr=name, attr=name, ...)
The attributes are determined either by a class-level attribute,
'_kwarg_names', or by inspecting the instance __dict__.
"""
def __repr__(self):
type_name = type(self).__name__
arg_strings = []
for arg in self._get_args():
arg_strings.append(repr(arg))
for name, value in self._get_kwargs():
arg_strings.append('%s=%r' % (name, value))
return '%s(%s)' % (type_name, ', '.join(arg_strings))
def _get_kwargs(self):
return _sorted(self.__dict__.items())
def _get_args(self):
return []
def _ensure_value(namespace, name, value):
if getattr(namespace, name, None) is None:
setattr(namespace, name, value)
return getattr(namespace, name)
# ===============
# Formatting Help
# ===============
class HelpFormatter(object):
"""Formatter for generating usage messages and argument help strings.
Only the name of this class is considered a public API. All the methods
provided by the class are considered an implementation detail.
"""
def __init__(self,
prog,
indent_increment=2,
max_help_position=24,
width=None):
# default setting for width
if width is None:
try:
width = int(_os.environ['COLUMNS'])
except (KeyError, ValueError):
width = 80
width -= 2
self._prog = prog
self._indent_increment = indent_increment
self._max_help_position = max_help_position
self._width = width
self._current_indent = 0
self._level = 0
self._action_max_length = 0
self._root_section = self._Section(self, None)
self._current_section = self._root_section
self._whitespace_matcher = _re.compile(r'\s+')
self._long_break_matcher = _re.compile(r'\n\n\n+')
# ===============================
# Section and indentation methods
# ===============================
def _indent(self):
self._current_indent += self._indent_increment
self._level += 1
def _dedent(self):
self._current_indent -= self._indent_increment
assert self._current_indent >= 0, 'Indent decreased below 0.'
self._level -= 1
class _Section(object):
def __init__(self, formatter, parent, heading=None):
self.formatter = formatter
self.parent = parent
self.heading = heading
self.items = []
def format_help(self):
# format the indented section
if self.parent is not None:
self.formatter._indent()
join = self.formatter._join_parts
for func, args in self.items:
func(*args)
item_help = join([func(*args) for func, args in self.items])
if self.parent is not None:
self.formatter._dedent()
# return nothing if the section was empty
if not item_help:
return ''
# add the heading if the section was non-empty
if self.heading is not SUPPRESS and self.heading is not None:
current_indent = self.formatter._current_indent
heading = '%*s%s:\n' % (current_indent, '', self.heading)
else:
heading = ''
# join the section-initial newline, the heading and the help
return join(['\n', heading, item_help, '\n'])
def _add_item(self, func, args):
self._current_section.items.append((func, args))
# ========================
# Message building methods
# ========================
def start_section(self, heading):
self._indent()
section = self._Section(self, self._current_section, heading)
self._add_item(section.format_help, [])
self._current_section = section
def end_section(self):
self._current_section = self._current_section.parent
self._dedent()
def add_text(self, text):
if text is not SUPPRESS and text is not None:
self._add_item(self._format_text, [text])
def add_usage(self, usage, actions, groups, prefix=None):
if usage is not SUPPRESS:
args = usage, actions, groups, prefix
self._add_item(self._format_usage, args)
def add_argument(self, action):
if action.help is not SUPPRESS:
# find all invocations
get_invocation = self._format_action_invocation
invocations = [get_invocation(action)]
for subaction in self._iter_indented_subactions(action):
invocations.append(get_invocation(subaction))
# update the maximum item length
invocation_length = max([len(s) for s in invocations])
action_length = invocation_length + self._current_indent
self._action_max_length = max(self._action_max_length,
action_length)
# add the item to the list
self._add_item(self._format_action, [action])
def add_arguments(self, actions):
for action in actions:
self.add_argument(action)
# =======================
# Help-formatting methods
# =======================
def format_help(self):
help = self._root_section.format_help()
if help:
help = self._long_break_matcher.sub('\n\n', help)
help = help.strip('\n') + '\n'
return help
def _join_parts(self, part_strings):
return ''.join([part
for part in part_strings
if part and part is not SUPPRESS])
def _format_usage(self, usage, actions, groups, prefix):
if prefix is None:
prefix = _('usage: ')
# if usage is specified, use that
if usage is not None:
usage = usage % dict(prog=self._prog)
# if no optionals or positionals are available, usage is just prog
elif usage is None and not actions:
usage = '%(prog)s' % dict(prog=self._prog)
# if optionals and positionals are available, calculate usage
elif usage is None:
prog = '%(prog)s' % dict(prog=self._prog)
# split optionals from positionals
optionals = []
positionals = []
for action in actions:
if action.option_strings:
optionals.append(action)
else:
positionals.append(action)
# build full usage string
format = self._format_actions_usage
action_usage = format(optionals + positionals, groups)
usage = ' '.join([s for s in [prog, action_usage] if s])
# wrap the usage parts if it's too long
text_width = self._width - self._current_indent
if len(prefix) + len(usage) > text_width:
# break usage into wrappable parts
part_regexp = r'\(.*?\)+|\[.*?\]+|\S+'
opt_usage = format(optionals, groups)
pos_usage = format(positionals, groups)
opt_parts = _re.findall(part_regexp, opt_usage)
pos_parts = _re.findall(part_regexp, pos_usage)
assert ' '.join(opt_parts) == opt_usage
assert ' '.join(pos_parts) == pos_usage
# helper for wrapping lines
def get_lines(parts, indent, prefix=None):
lines = []
line = []
if prefix is not None:
line_len = len(prefix) - 1
else:
line_len = len(indent) - 1
for part in parts:
if line_len + 1 + len(part) > text_width:
lines.append(indent + ' '.join(line))
line = []
line_len = len(indent) - 1
line.append(part)
line_len += len(part) + 1
if line:
lines.append(indent + ' '.join(line))
if prefix is not None:
lines[0] = lines[0][len(indent):]
return lines
# if prog is short, follow it with optionals or positionals
if len(prefix) + len(prog) <= 0.75 * text_width:
indent = ' ' * (len(prefix) + len(prog) + 1)
if opt_parts:
lines = get_lines([prog] + opt_parts, indent, prefix)
lines.extend(get_lines(pos_parts, indent))
elif pos_parts:
lines = get_lines([prog] + pos_parts, indent, prefix)
else:
lines = [prog]
# if prog is long, put it on its own line
else:
indent = ' ' * len(prefix)
parts = opt_parts + pos_parts
lines = get_lines(parts, indent)
if len(lines) > 1:
lines = []
lines.extend(get_lines(opt_parts, indent))
lines.extend(get_lines(pos_parts, indent))
lines = [prog] + lines
# join lines into usage
usage = '\n'.join(lines)
# prefix with 'usage:'
return '%s%s\n\n' % (prefix, usage)
def _format_actions_usage(self, actions, groups):
# find group indices and identify actions in groups
group_actions = _set()
inserts = {}
for group in groups:
try:
start = actions.index(group._group_actions[0])
except ValueError:
continue
else:
end = start + len(group._group_actions)
if actions[start:end] == group._group_actions:
for action in group._group_actions:
group_actions.add(action)
if not group.required:
inserts[start] = '['
inserts[end] = ']'
else:
inserts[start] = '('
inserts[end] = ')'
for i in range(start + 1, end):
inserts[i] = '|'
# collect all actions format strings
parts = []
for i, action in enumerate(actions):
# suppressed arguments are marked with None
# remove | separators for suppressed arguments
if action.help is SUPPRESS:
parts.append(None)
if inserts.get(i) == '|':
inserts.pop(i)
elif inserts.get(i + 1) == '|':
inserts.pop(i + 1)
# produce all arg strings
elif not action.option_strings:
part = self._format_args(action, action.dest)
# if it's in a group, strip the outer []
if action in group_actions:
if part[0] == '[' and part[-1] == ']':
part = part[1:-1]
# add the action string to the list
parts.append(part)
# produce the first way to invoke the option in brackets
else:
option_string = action.option_strings[0]
# if the Optional doesn't take a value, format is:
# -s or --long
if action.nargs == 0:
part = '%s' % option_string
# if the Optional takes a value, format is:
# -s ARGS or --long ARGS
else:
default = action.dest.upper()
args_string = self._format_args(action, default)
part = '%s %s' % (option_string, args_string)
# make it look optional if it's not required or in a group
if not action.required and action not in group_actions:
part = '[%s]' % part
# add the action string to the list
parts.append(part)
# insert things at the necessary indices
for i in _sorted(inserts, reverse=True):
parts[i:i] = [inserts[i]]
# join all the action items with spaces
text = ' '.join([item for item in parts if item is not None])
# clean up separators for mutually exclusive groups
open = r'[\[(]'
close = r'[\])]'
text = _re.sub(r'(%s) ' % open, r'\1', text)
text = _re.sub(r' (%s)' % close, r'\1', text)
text = _re.sub(r'%s *%s' % (open, close), r'', text)
text = _re.sub(r'\(([^|]*)\)', r'\1', text)
text = text.strip()
# return the text
return text
def _format_text(self, text):
if '%(prog)' in text:
text = text % dict(prog=self._prog)
text_width = self._width - self._current_indent
indent = ' ' * self._current_indent
return self._fill_text(text, text_width, indent) + '\n\n'
def _format_action(self, action):
# determine the required width and the entry label
help_position = min(self._action_max_length + 2,
self._max_help_position)
help_width = self._width - help_position
action_width = help_position - self._current_indent - 2
action_header = self._format_action_invocation(action)
# ho nelp; start on same line and add a final newline
if not action.help:
tup = self._current_indent, '', action_header
action_header = '%*s%s\n' % tup
# short action name; start on the same line and pad two spaces
elif len(action_header) <= action_width:
tup = self._current_indent, '', action_width, action_header
action_header = '%*s%-*s ' % tup
indent_first = 0
# long action name; start on the next line
else:
tup = self._current_indent, '', action_header
action_header = '%*s%s\n' % tup
indent_first = help_position
# collect the pieces of the action help
parts = [action_header]
# if there was help for the action, add lines of help text
if action.help:
help_text = self._expand_help(action)
help_lines = self._split_lines(help_text, help_width)
parts.append('%*s%s\n' % (indent_first, '', help_lines[0]))
for line in help_lines[1:]:
parts.append('%*s%s\n' % (help_position, '', line))
# or add a newline if the description doesn't end with one
elif not action_header.endswith('\n'):
parts.append('\n')
# if there are any sub-actions, add their help as well
for subaction in self._iter_indented_subactions(action):
parts.append(self._format_action(subaction))
# return a single string
return self._join_parts(parts)
def _format_action_invocation(self, action):
if not action.option_strings:
metavar, = self._metavar_formatter(action, action.dest)(1)
return metavar
else:
parts = []
# if the Optional doesn't take a value, format is:
# -s, --long
if action.nargs == 0:
parts.extend(action.option_strings)
# if the Optional takes a value, format is:
# -s ARGS, --long ARGS
else:
default = action.dest.upper()
args_string = self._format_args(action, default)
for option_string in action.option_strings:
parts.append('%s %s' % (option_string, args_string))
return ', '.join(parts)
def _metavar_formatter(self, action, default_metavar):
if action.metavar is not None:
result = action.metavar
elif action.choices is not None:
choice_strs = [str(choice) for choice in action.choices]
result = '{%s}' % ','.join(choice_strs)
else:
result = default_metavar
def format(tuple_size):
if isinstance(result, tuple):
return result
else:
return (result, ) * tuple_size
return format
def _format_args(self, action, default_metavar):
get_metavar = self._metavar_formatter(action, default_metavar)
if action.nargs is None:
result = '%s' % get_metavar(1)
elif action.nargs == OPTIONAL:
result = '[%s]' % get_metavar(1)
elif action.nargs == ZERO_OR_MORE:
result = '[%s [%s ...]]' % get_metavar(2)
elif action.nargs == ONE_OR_MORE:
result = '%s [%s ...]' % get_metavar(2)
elif action.nargs == REMAINDER:
result = '...'
elif action.nargs == PARSER:
result = '%s ...' % get_metavar(1)
else:
formats = ['%s' for _ in range(action.nargs)]
result = ' '.join(formats) % get_metavar(action.nargs)
return result
def _expand_help(self, action):
params = dict(vars(action), prog=self._prog)
for name in list(params):
if params[name] is SUPPRESS:
del params[name]
for name in list(params):
if hasattr(params[name], '__name__'):
params[name] = params[name].__name__
if params.get('choices') is not None:
choices_str = ', '.join([str(c) for c in params['choices']])
params['choices'] = choices_str
return self._get_help_string(action) % params
def _iter_indented_subactions(self, action):
try:
get_subactions = action._get_subactions
except AttributeError:
pass
else:
self._indent()
for subaction in get_subactions():
yield subaction
self._dedent()
def _split_lines(self, text, width):
text = self._whitespace_matcher.sub(' ', text).strip()
return _textwrap.wrap(text, width)
def _fill_text(self, text, width, indent):
text = self._whitespace_matcher.sub(' ', text).strip()
return _textwrap.fill(text, width, initial_indent=indent,
subsequent_indent=indent)
def _get_help_string(self, action):
return action.help
class RawDescriptionHelpFormatter(HelpFormatter):
"""Help message formatter which retains any formatting in descriptions.
Only the name of this class is considered a public API. All the methods
provided by the class are considered an implementation detail.
"""
def _fill_text(self, text, width, indent):
return ''.join([indent + line for line in text.splitlines(True)])
class RawTextHelpFormatter(RawDescriptionHelpFormatter):
"""Help message formatter which retains formatting of all help text.
Only the name of this class is considered a public API. All the methods
provided by the class are considered an implementation detail.
"""
def _split_lines(self, text, width):
return text.splitlines()
class ArgumentDefaultsHelpFormatter(HelpFormatter):
"""Help message formatter which adds default values to argument help.
Only the name of this class is considered a public API. All the methods
provided by the class are considered an implementation detail.
"""
def _get_help_string(self, action):
help = action.help
if '%(default)' not in action.help:
if action.default is not SUPPRESS:
defaulting_nargs = [OPTIONAL, ZERO_OR_MORE]
if action.option_strings or action.nargs in defaulting_nargs:
help += ' (default: %(default)s)'
return help
# =====================
# Options and Arguments
# =====================
def _get_action_name(argument):
if argument is None:
return None
elif argument.option_strings:
return '/'.join(argument.option_strings)
elif argument.metavar not in (None, SUPPRESS):
return argument.metavar
elif argument.dest not in (None, SUPPRESS):
return argument.dest
else:
return None
class ArgumentError(Exception):
"""An error from creating or using an argument (optional or positional).
The string value of this exception is the message, augmented with
information about the argument that caused it.
"""
def __init__(self, argument, message):
self.argument_name = _get_action_name(argument)
self.message = message
def __str__(self):
if self.argument_name is None:
format = '%(message)s'
else:
format = 'argument %(argument_name)s: %(message)s'
return format % dict(message=self.message,
argument_name=self.argument_name)
class ArgumentTypeError(Exception):
"""An error from trying to convert a command line string to a type."""
pass
# ==============
# Action classes
# ==============
class Action(_AttributeHolder):
"""Information about how to convert command line strings to Python objects.
Action objects are used by an ArgumentParser to represent the information
needed to parse a single argument from one or more strings from the
command line. The keyword arguments to the Action constructor are also
all attributes of Action instances.
Keyword Arguments:
- option_strings -- A list of command-line option strings which
should be associated with this action.
- dest -- The name of the attribute to hold the created object(s)
- nargs -- The number of command-line arguments that should be
consumed. By default, one argument will be consumed and a single
value will be produced. Other values include:
- N (an integer) consumes N arguments (and produces a list)
- '?' consumes zero or one arguments
- '*' consumes zero or more arguments (and produces a list)
- '+' consumes one or more arguments (and produces a list)
Note that the difference between the default and nargs=1 is that
with the default, a single value will be produced, while with
nargs=1, a list containing a single value will be produced.
- const -- The value to be produced if the option is specified and the
option uses an action that takes no values.
- default -- The value to be produced if the option is not specified.
- type -- The type which the command-line arguments should be converted
to, should be one of 'string', 'int', 'float', 'complex' or a
callable object that accepts a single string argument. If None,
'string' is assumed.
- choices -- A container of values that should be allowed. If not None,
after a command-line argument has been converted to the appropriate
type, an exception will be raised if it is not a member of this
collection.
- required -- True if the action must always be specified at the
command line. This is only meaningful for optional command-line
arguments.
- help -- The help string describing the argument.
- metavar -- The name to be used for the option's argument with the
help string. If None, the 'dest' value will be used as the name.
"""
def __init__(self,
option_strings,
dest,
nargs=None,
const=None,
default=None,
type=None,
choices=None,
required=False,
help=None,
metavar=None):
self.option_strings = option_strings
self.dest = dest
self.nargs = nargs
self.const = const
self.default = default
self.type = type
self.choices = choices
self.required = required
self.help = help
self.metavar = metavar
def _get_kwargs(self):
names = [
'option_strings',
'dest',
'nargs',
'const',
'default',
'type',
'choices',
'help',
'metavar',
]
return [(name, getattr(self, name)) for name in names]
def __call__(self, parser, namespace, values, option_string=None):
raise NotImplementedError(_('.__call__() not defined'))
class _StoreAction(Action):
def __init__(self,
option_strings,
dest,
nargs=None,
const=None,
default=None,
type=None,
choices=None,
required=False,
help=None,
metavar=None):
if nargs == 0:
raise ValueError('nargs for store actions must be > 0; if you '
'have nothing to store, actions such as store '
'true or store const may be more appropriate')
if const is not None and nargs != OPTIONAL:
raise ValueError('nargs must be %r to supply const' % OPTIONAL)
super(_StoreAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=nargs,
const=const,
default=default,
type=type,
choices=choices,
required=required,
help=help,
metavar=metavar)
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, values)
class _StoreConstAction(Action):
def __init__(self,
option_strings,
dest,
const,
default=None,
required=False,
help=None,
metavar=None):
super(_StoreConstAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=0,
const=const,
default=default,
required=required,
help=help)
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, self.const)
class _StoreTrueAction(_StoreConstAction):
def __init__(self,
option_strings,
dest,
default=False,
required=False,
help=None):
super(_StoreTrueAction, self).__init__(
option_strings=option_strings,
dest=dest,
const=True,
default=default,
required=required,
help=help)
class _StoreFalseAction(_StoreConstAction):
def __init__(self,
option_strings,
dest,
default=True,
required=False,
help=None):
super(_StoreFalseAction, self).__init__(
option_strings=option_strings,
dest=dest,
const=False,
default=default,
required=required,
help=help)
class _AppendAction(Action):
def __init__(self,
option_strings,
dest,
nargs=None,
const=None,
default=None,
type=None,
choices=None,
required=False,
help=None,
metavar=None):
if nargs == 0:
raise ValueError('nargs for append actions must be > 0; if arg '
'strings are not supplying the value to append, '
'the append const action may be more appropriate')
if const is not None and nargs != OPTIONAL:
raise ValueError('nargs must be %r to supply const' % OPTIONAL)
super(_AppendAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=nargs,
const=const,
default=default,
type=type,
choices=choices,
required=required,
help=help,
metavar=metavar)
def __call__(self, parser, namespace, values, option_string=None):
items = _copy.copy(_ensure_value(namespace, self.dest, []))
items.append(values)
setattr(namespace, self.dest, items)
class _AppendConstAction(Action):
def __init__(self,
option_strings,
dest,
const,
default=None,
required=False,
help=None,
metavar=None):
super(_AppendConstAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=0,
const=const,
default=default,
required=required,
help=help,
metavar=metavar)
def __call__(self, parser, namespace, values, option_string=None):
items = _copy.copy(_ensure_value(namespace, self.dest, []))
items.append(self.const)
setattr(namespace, self.dest, items)
class _CountAction(Action):
def __init__(self,
option_strings,
dest,
default=None,
required=False,
help=None):
super(_CountAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=0,
default=default,
required=required,
help=help)
def __call__(self, parser, namespace, values, option_string=None):
new_count = _ensure_value(namespace, self.dest, 0) + 1
setattr(namespace, self.dest, new_count)
class _HelpAction(Action):
def __init__(self,
option_strings,
dest=SUPPRESS,
default=SUPPRESS,
help=None):
super(_HelpAction, self).__init__(
option_strings=option_strings,
dest=dest,
default=default,
nargs=0,
help=help)
def __call__(self, parser, namespace, values, option_string=None):
parser.print_help()
parser.exit()
class _VersionAction(Action):
def __init__(self,
option_strings,
version=None,
dest=SUPPRESS,
default=SUPPRESS,
help=None):
super(_VersionAction, self).__init__(
option_strings=option_strings,
dest=dest,
default=default,
nargs=0,
help=help)
self.version = version
def __call__(self, parser, namespace, values, option_string=None):
version = self.version
if version is None:
version = parser.version
formatter = parser._get_formatter()
formatter.add_text(version)
parser.exit(message=formatter.format_help())
class _SubParsersAction(Action):
class _ChoicesPseudoAction(Action):
def __init__(self, name, help):
sup = super(_SubParsersAction._ChoicesPseudoAction, self)
sup.__init__(option_strings=[], dest=name, help=help)
def __init__(self,
option_strings,
prog,
parser_class,
dest=SUPPRESS,
help=None,
metavar=None):
self._prog_prefix = prog
self._parser_class = parser_class
self._name_parser_map = {}
self._choices_actions = []
super(_SubParsersAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=PARSER,
choices=self._name_parser_map,
help=help,
metavar=metavar)
def add_parser(self, name, **kwargs):
# set prog from the existing prefix
if kwargs.get('prog') is None:
kwargs['prog'] = '%s %s' % (self._prog_prefix, name)
# create a pseudo-action to hold the choice help
if 'help' in kwargs:
help = kwargs.pop('help')
choice_action = self._ChoicesPseudoAction(name, help)
self._choices_actions.append(choice_action)
# create the parser and add it to the map
parser = self._parser_class(**kwargs)
self._name_parser_map[name] = parser
return parser
def _get_subactions(self):
return self._choices_actions
def __call__(self, parser, namespace, values, option_string=None):
parser_name = values[0]
arg_strings = values[1:]
# set the parser name if requested
if self.dest is not SUPPRESS:
setattr(namespace, self.dest, parser_name)
# select the parser
try:
parser = self._name_parser_map[parser_name]
except KeyError:
tup = parser_name, ', '.join(self._name_parser_map)
msg = _('unknown parser %r (choices: %s)' % tup)
raise ArgumentError(self, msg)
# parse all the remaining options into the namespace
parser.parse_args(arg_strings, namespace)
# ==============
# Type classes
# ==============
class FileType(object):
"""Factory for creating file object types
Instances of FileType are typically passed as type= arguments to the
ArgumentParser add_argument() method.
Keyword Arguments:
- mode -- A string indicating how the file is to be opened. Accepts the
same values as the builtin open() function.
- bufsize -- The file's desired buffer size. Accepts the same values as
the builtin open() function.
"""
def __init__(self, mode='r', bufsize=None):
self._mode = mode
self._bufsize = bufsize
def __call__(self, string):
# the special argument "-" means sys.std{in,out}
if string == '-':
if 'r' in self._mode:
return _sys.stdin
elif 'w' in self._mode:
return _sys.stdout
else:
msg = _('argument "-" with mode %r' % self._mode)
raise ValueError(msg)
# all other arguments are used as file names
if self._bufsize:
return open(string, self._mode, self._bufsize)
else:
return open(string, self._mode)
def __repr__(self):
args = [self._mode, self._bufsize]
args_str = ', '.join([repr(arg) for arg in args if arg is not None])
return '%s(%s)' % (type(self).__name__, args_str)
# ===========================
# Optional and Positional Parsing
# ===========================
class Namespace(_AttributeHolder):
"""Simple object for storing attributes.
Implements equality by attribute names and values, and provides a simple
string representation.
"""
def __init__(self, **kwargs):
for name in kwargs:
setattr(self, name, kwargs[name])
def __eq__(self, other):
return vars(self) == vars(other)
def __ne__(self, other):
return not (self == other)
def __contains__(self, key):
return key in self.__dict__
class _ActionsContainer(object):
def __init__(self,
description,
prefix_chars,
argument_default,
conflict_handler):
super(_ActionsContainer, self).__init__()
self.description = description
self.argument_default = argument_default
self.prefix_chars = prefix_chars
self.conflict_handler = conflict_handler
# set up registries
self._registries = {}
# register actions
self.register('action', None, _StoreAction)
self.register('action', 'store', _StoreAction)
self.register('action', 'store_const', _StoreConstAction)
self.register('action', 'store_true', _StoreTrueAction)
self.register('action', 'store_false', _StoreFalseAction)
self.register('action', 'append', _AppendAction)
self.register('action', 'append_const', _AppendConstAction)
self.register('action', 'count', _CountAction)
self.register('action', 'help', _HelpAction)
self.register('action', 'version', _VersionAction)
self.register('action', 'parsers', _SubParsersAction)
# raise an exception if the conflict handler is invalid
self._get_handler()
# action storage
self._actions = []
self._option_string_actions = {}
# groups
self._action_groups = []
self._mutually_exclusive_groups = []
# defaults storage
self._defaults = {}
# determines whether an "option" looks like a negative number
self._negative_number_matcher = _re.compile(r'^-\d+$|^-\d*\.\d+$')
# whether or not there are any optionals that look like negative
# numbers -- uses a list so it can be shared and edited
self._has_negative_number_optionals = []
# ====================
# Registration methods
# ====================
def register(self, registry_name, value, object):
registry = self._registries.setdefault(registry_name, {})
registry[value] = object
def _registry_get(self, registry_name, value, default=None):
return self._registries[registry_name].get(value, default)
# ==================================
# Namespace default accessor methods
# ==================================
def set_defaults(self, **kwargs):
self._defaults.update(kwargs)
# if these defaults match any existing arguments, replace
# the previous default on the object with the new one
for action in self._actions:
if action.dest in kwargs:
action.default = kwargs[action.dest]
def get_default(self, dest):
for action in self._actions:
if action.dest == dest and action.default is not None:
return action.default
return self._defaults.get(dest, None)
# =======================
# Adding argument actions
# =======================
def add_argument(self, *args, **kwargs):
"""
add_argument(dest, ..., name=value, ...)
add_argument(option_string, option_string, ..., name=value, ...)
"""
# if no positional args are supplied or only one is supplied and
# it doesn't look like an option string, parse a positional
# argument
chars = self.prefix_chars
if not args or len(args) == 1 and args[0][0] not in chars:
if args and 'dest' in kwargs:
raise ValueError('dest supplied twice for positional argument')
kwargs = self._get_positional_kwargs(*args, **kwargs)
# otherwise, we're adding an optional argument
else:
kwargs = self._get_optional_kwargs(*args, **kwargs)
# if no default was supplied, use the parser-level default
if 'default' not in kwargs:
dest = kwargs['dest']
if dest in self._defaults:
kwargs['default'] = self._defaults[dest]
elif self.argument_default is not None:
kwargs['default'] = self.argument_default
# create the action object, and add it to the parser
action_class = self._pop_action_class(kwargs)
if not _callable(action_class):
raise ValueError('unknown action "%s"' % action_class)
action = action_class(**kwargs)
# raise an error if the action type is not callable
type_func = self._registry_get('type', action.type, action.type)
if not _callable(type_func):
raise ValueError('%r is not callable' % type_func)
return self._add_action(action)
def add_argument_group(self, *args, **kwargs):
group = _ArgumentGroup(self, *args, **kwargs)
self._action_groups.append(group)
return group
def add_mutually_exclusive_group(self, **kwargs):
group = _MutuallyExclusiveGroup(self, **kwargs)
self._mutually_exclusive_groups.append(group)
return group
def _add_action(self, action):
# resolve any conflicts
self._check_conflict(action)
# add to actions list
self._actions.append(action)
action.container = self
# index the action by any option strings it has
for option_string in action.option_strings:
self._option_string_actions[option_string] = action
# set the flag if any option strings look like negative numbers
for option_string in action.option_strings:
if self._negative_number_matcher.match(option_string):
if not self._has_negative_number_optionals:
self._has_negative_number_optionals.append(True)
# return the created action
return action
def _remove_action(self, action):
self._actions.remove(action)
def _add_container_actions(self, container):
# collect groups by titles
title_group_map = {}
for group in self._action_groups:
if group.title in title_group_map:
msg = _('cannot merge actions - two groups are named %r')
raise ValueError(msg % (group.title))
title_group_map[group.title] = group
# map each action to its group
group_map = {}
for group in container._action_groups:
# if a group with the title exists, use that, otherwise
# create a new group matching the container's group
if group.title not in title_group_map:
title_group_map[group.title] = self.add_argument_group(
title=group.title,
description=group.description,
conflict_handler=group.conflict_handler)
# map the actions to their new group
for action in group._group_actions:
group_map[action] = title_group_map[group.title]
# add container's mutually exclusive groups
# NOTE: if add_mutually_exclusive_group ever gains title= and
# description= then this code will need to be expanded as above
for group in container._mutually_exclusive_groups:
mutex_group = self.add_mutually_exclusive_group(
required=group.required)
# map the actions to their new mutex group
for action in group._group_actions:
group_map[action] = mutex_group
# add all actions to this container or their group
for action in container._actions:
group_map.get(action, self)._add_action(action)
def _get_positional_kwargs(self, dest, **kwargs):
# make sure required is not specified
if 'required' in kwargs:
msg = _("'required' is an invalid argument for positionals")
raise TypeError(msg)
# mark positional arguments as required if at least one is
# always required
if kwargs.get('nargs') not in [OPTIONAL, ZERO_OR_MORE]:
kwargs['required'] = True
if kwargs.get('nargs') == ZERO_OR_MORE and 'default' not in kwargs:
kwargs['required'] = True
# return the keyword arguments with no option strings
return dict(kwargs, dest=dest, option_strings=[])
def _get_optional_kwargs(self, *args, **kwargs):
# determine short and long option strings
option_strings = []
long_option_strings = []
for option_string in args:
# error on strings that don't start with an appropriate prefix
if not option_string[0] in self.prefix_chars:
msg = _('invalid option string %r: '
'must start with a character %r')
tup = option_string, self.prefix_chars
raise ValueError(msg % tup)
# strings starting with two prefix characters are long options
option_strings.append(option_string)
if option_string[0] in self.prefix_chars:
if len(option_string) > 1:
if option_string[1] in self.prefix_chars:
long_option_strings.append(option_string)
# infer destination, '--foo-bar' -> 'foo_bar' and '-x' -> 'x'
dest = kwargs.pop('dest', None)
if dest is None:
if long_option_strings:
dest_option_string = long_option_strings[0]
else:
dest_option_string = option_strings[0]
dest = dest_option_string.lstrip(self.prefix_chars)
if not dest:
msg = _('dest= is required for options like %r')
raise ValueError(msg % option_string)
dest = dest.replace('-', '_')
# return the updated keyword arguments
return dict(kwargs, dest=dest, option_strings=option_strings)
def _pop_action_class(self, kwargs, default=None):
action = kwargs.pop('action', default)
return self._registry_get('action', action, action)
def _get_handler(self):
# determine function from conflict handler string
handler_func_name = '_handle_conflict_%s' % self.conflict_handler
try:
return getattr(self, handler_func_name)
except AttributeError:
msg = _('invalid conflict_resolution value: %r')
raise ValueError(msg % self.conflict_handler)
def _check_conflict(self, action):
# find all options that conflict with this option
confl_optionals = []
for option_string in action.option_strings:
if option_string in self._option_string_actions:
confl_optional = self._option_string_actions[option_string]
confl_optionals.append((option_string, confl_optional))
# resolve any conflicts
if confl_optionals:
conflict_handler = self._get_handler()
conflict_handler(action, confl_optionals)
def _handle_conflict_error(self, action, conflicting_actions):
message = _('conflicting option string(s): %s')
conflict_string = ', '.join([option_string
for option_string, action
in conflicting_actions])
raise ArgumentError(action, message % conflict_string)
def _handle_conflict_resolve(self, action, conflicting_actions):
# remove all conflicting options
for option_string, action in conflicting_actions:
# remove the conflicting option
action.option_strings.remove(option_string)
self._option_string_actions.pop(option_string, None)
# if the option now has no option string, remove it from the
# container holding it
if not action.option_strings:
action.container._remove_action(action)
class _ArgumentGroup(_ActionsContainer):
def __init__(self, container, title=None, description=None, **kwargs):
# add any missing keyword arguments by checking the container
update = kwargs.setdefault
update('conflict_handler', container.conflict_handler)
update('prefix_chars', container.prefix_chars)
update('argument_default', container.argument_default)
super_init = super(_ArgumentGroup, self).__init__
super_init(description=description, **kwargs)
# group attributes
self.title = title
self._group_actions = []
# share most attributes with the container
self._registries = container._registries
self._actions = container._actions
self._option_string_actions = container._option_string_actions
self._defaults = container._defaults
self._has_negative_number_optionals = \
container._has_negative_number_optionals
def _add_action(self, action):
action = super(_ArgumentGroup, self)._add_action(action)
self._group_actions.append(action)
return action
def _remove_action(self, action):
super(_ArgumentGroup, self)._remove_action(action)
self._group_actions.remove(action)
class _MutuallyExclusiveGroup(_ArgumentGroup):
def __init__(self, container, required=False):
super(_MutuallyExclusiveGroup, self).__init__(container)
self.required = required
self._container = container
def _add_action(self, action):
if action.required:
msg = _('mutually exclusive arguments must be optional')
raise ValueError(msg)
action = self._container._add_action(action)
self._group_actions.append(action)
return action
def _remove_action(self, action):
self._container._remove_action(action)
self._group_actions.remove(action)
class ArgumentParser(_AttributeHolder, _ActionsContainer):
"""Object for parsing command line strings into Python objects.
Keyword Arguments:
- prog -- The name of the program (default: sys.argv[0])
- usage -- A usage message (default: auto-generated from arguments)
- description -- A description of what the program does
- epilog -- Text following the argument descriptions
- parents -- Parsers whose arguments should be copied into this one
- formatter_class -- HelpFormatter class for printing help messages
- prefix_chars -- Characters that prefix optional arguments
- fromfile_prefix_chars -- Characters that prefix files containing
additional arguments
- argument_default -- The default value for all arguments
- conflict_handler -- String indicating how to handle conflicts
- add_help -- Add a -h/-help option
"""
def __init__(self,
prog=None,
usage=None,
description=None,
epilog=None,
version=None,
parents=[],
formatter_class=HelpFormatter,
prefix_chars='-',
fromfile_prefix_chars=None,
argument_default=None,
conflict_handler='error',
add_help=True):
if version is not None:
import warnings
warnings.warn(
"""The "version" argument to ArgumentParser is deprecated. """
"""Please use """
""""add_argument(..., action='version', version="N", ...)" """
"""instead""", DeprecationWarning)
superinit = super(ArgumentParser, self).__init__
superinit(description=description,
prefix_chars=prefix_chars,
argument_default=argument_default,
conflict_handler=conflict_handler)
# default setting for prog
if prog is None:
prog = _os.path.basename(_sys.argv[0])
self.prog = prog
self.usage = usage
self.epilog = epilog
self.version = version
self.formatter_class = formatter_class
self.fromfile_prefix_chars = fromfile_prefix_chars
self.add_help = add_help
add_group = self.add_argument_group
self._positionals = add_group(_('positional arguments'))
self._optionals = add_group(_('optional arguments'))
self._subparsers = None
# register types
def identity(string):
return string
self.register('type', None, identity)
# add help and version arguments if necessary
# (using explicit default to override global argument_default)
if self.add_help:
self.add_argument(
'-h', '--help', action='help', default=SUPPRESS,
help=_('show this help message and exit'))
if self.version:
self.add_argument(
'-v', '--version', action='version', default=SUPPRESS,
version=self.version,
help=_("show program's version number and exit"))
# add parent arguments and defaults
for parent in parents:
self._add_container_actions(parent)
try:
defaults = parent._defaults
except AttributeError:
pass
else:
self._defaults.update(defaults)
# =======================
# Pretty __repr__ methods
# =======================
def _get_kwargs(self):
names = [
'prog',
'usage',
'description',
'version',
'formatter_class',
'conflict_handler',
'add_help',
]
return [(name, getattr(self, name)) for name in names]
# ==================================
# Optional/Positional adding methods
# ==================================
def add_subparsers(self, **kwargs):
if self._subparsers is not None:
self.error(_('cannot have multiple subparser arguments'))
# add the parser class to the arguments if it's not present
kwargs.setdefault('parser_class', type(self))
if 'title' in kwargs or 'description' in kwargs:
title = _(kwargs.pop('title', 'subcommands'))
description = _(kwargs.pop('description', None))
self._subparsers = self.add_argument_group(title, description)
else:
self._subparsers = self._positionals
# prog defaults to the usage message of this parser, skipping
# optional arguments and with no "usage:" prefix
if kwargs.get('prog') is None:
formatter = self._get_formatter()
positionals = self._get_positional_actions()
groups = self._mutually_exclusive_groups
formatter.add_usage(self.usage, positionals, groups, '')
kwargs['prog'] = formatter.format_help().strip()
# create the parsers action and add it to the positionals list
parsers_class = self._pop_action_class(kwargs, 'parsers')
action = parsers_class(option_strings=[], **kwargs)
self._subparsers._add_action(action)
# return the created parsers action
return action
def _add_action(self, action):
if action.option_strings:
self._optionals._add_action(action)
else:
self._positionals._add_action(action)
return action
def _get_optional_actions(self):
return [action
for action in self._actions
if action.option_strings]
def _get_positional_actions(self):
return [action
for action in self._actions
if not action.option_strings]
# =====================================
# Command line argument parsing methods
# =====================================
def parse_args(self, args=None, namespace=None):
args, argv = self.parse_known_args(args, namespace)
if argv:
msg = _('unrecognized arguments: %s')
self.error(msg % ' '.join(argv))
return args
def parse_known_args(self, args=None, namespace=None):
# args default to the system args
if args is None:
args = _sys.argv[1:]
# default Namespace built from parser defaults
if namespace is None:
namespace = Namespace()
# add any action defaults that aren't present
for action in self._actions:
if action.dest is not SUPPRESS:
if not hasattr(namespace, action.dest):
if action.default is not SUPPRESS:
default = action.default
if isinstance(action.default, _basestring):
default = self._get_value(action, default)
setattr(namespace, action.dest, default)
# add any parser defaults that aren't present
for dest in self._defaults:
if not hasattr(namespace, dest):
setattr(namespace, dest, self._defaults[dest])
# parse the arguments and exit if there are any errors
try:
return self._parse_known_args(args, namespace)
except ArgumentError:
err = _sys.exc_info()[1]
self.error(str(err))
def _parse_known_args(self, arg_strings, namespace):
# replace arg strings that are file references
if self.fromfile_prefix_chars is not None:
arg_strings = self._read_args_from_files(arg_strings)
# map all mutually exclusive arguments to the other arguments
# they can't occur with
action_conflicts = {}
for mutex_group in self._mutually_exclusive_groups:
group_actions = mutex_group._group_actions
for i, mutex_action in enumerate(mutex_group._group_actions):
conflicts = action_conflicts.setdefault(mutex_action, [])
conflicts.extend(group_actions[:i])
conflicts.extend(group_actions[i + 1:])
# find all option indices, and determine the arg_string_pattern
# which has an 'O' if there is an option at an index,
# an 'A' if there is an argument, or a '-' if there is a '--'
option_string_indices = {}
arg_string_pattern_parts = []
arg_strings_iter = iter(arg_strings)
for i, arg_string in enumerate(arg_strings_iter):
# all args after -- are non-options
if arg_string == '--':
arg_string_pattern_parts.append('-')
for arg_string in arg_strings_iter:
arg_string_pattern_parts.append('A')
# otherwise, add the arg to the arg strings
# and note the index if it was an option
else:
option_tuple = self._parse_optional(arg_string)
if option_tuple is None:
pattern = 'A'
else:
option_string_indices[i] = option_tuple
pattern = 'O'
arg_string_pattern_parts.append(pattern)
# join the pieces together to form the pattern
arg_strings_pattern = ''.join(arg_string_pattern_parts)
# converts arg strings to the appropriate and then takes the action
seen_actions = _set()
seen_non_default_actions = _set()
def take_action(action, argument_strings, option_string=None):
seen_actions.add(action)
argument_values = self._get_values(action, argument_strings)
# error if this argument is not allowed with other previously
# seen arguments, assuming that actions that use the default
# value don't really count as "present"
if argument_values is not action.default:
seen_non_default_actions.add(action)
for conflict_action in action_conflicts.get(action, []):
if conflict_action in seen_non_default_actions:
msg = _('not allowed with argument %s')
action_name = _get_action_name(conflict_action)
raise ArgumentError(action, msg % action_name)
# take the action if we didn't receive a SUPPRESS value
# (e.g. from a default)
if argument_values is not SUPPRESS:
action(self, namespace, argument_values, option_string)
# function to convert arg_strings into an optional action
def consume_optional(start_index):
# get the optional identified at this index
option_tuple = option_string_indices[start_index]
action, option_string, explicit_arg = option_tuple
# identify additional optionals in the same arg string
# (e.g. -xyz is the same as -x -y -z if no args are required)
match_argument = self._match_argument
action_tuples = []
while True:
# if we found no optional action, skip it
if action is None:
extras.append(arg_strings[start_index])
return start_index + 1
# if there is an explicit argument, try to match the
# optional's string arguments to only this
if explicit_arg is not None:
arg_count = match_argument(action, 'A')
# if the action is a single-dash option and takes no
# arguments, try to parse more single-dash options out
# of the tail of the option string
chars = self.prefix_chars
if arg_count == 0 and option_string[1] not in chars:
action_tuples.append((action, [], option_string))
for char in self.prefix_chars:
option_string = char + explicit_arg[0]
explicit_arg = explicit_arg[1:] or None
optionals_map = self._option_string_actions
if option_string in optionals_map:
action = optionals_map[option_string]
break
else:
msg = _('ignored explicit argument %r')
raise ArgumentError(action, msg % explicit_arg)
# if the action expect exactly one argument, we've
# successfully matched the option; exit the loop
elif arg_count == 1:
stop = start_index + 1
args = [explicit_arg]
action_tuples.append((action, args, option_string))
break
# error if a double-dash option did not use the
# explicit argument
else:
msg = _('ignored explicit argument %r')
raise ArgumentError(action, msg % explicit_arg)
# if there is no explicit argument, try to match the
# optional's string arguments with the following strings
# if successful, exit the loop
else:
start = start_index + 1
selected_patterns = arg_strings_pattern[start:]
arg_count = match_argument(action, selected_patterns)
stop = start + arg_count
args = arg_strings[start:stop]
action_tuples.append((action, args, option_string))
break
# add the Optional to the list and return the index at which
# the Optional's string args stopped
assert action_tuples
for action, args, option_string in action_tuples:
take_action(action, args, option_string)
return stop
# the list of Positionals left to be parsed; this is modified
# by consume_positionals()
positionals = self._get_positional_actions()
# function to convert arg_strings into positional actions
def consume_positionals(start_index):
# match as many Positionals as possible
match_partial = self._match_arguments_partial
selected_pattern = arg_strings_pattern[start_index:]
arg_counts = match_partial(positionals, selected_pattern)
# slice off the appropriate arg strings for each Positional
# and add the Positional and its args to the list
for action, arg_count in zip(positionals, arg_counts):
args = arg_strings[start_index: start_index + arg_count]
start_index += arg_count
take_action(action, args)
# slice off the Positionals that we just parsed and return the
# index at which the Positionals' string args stopped
positionals[:] = positionals[len(arg_counts):]
return start_index
# consume Positionals and Optionals alternately, until we have
# passed the last option string
extras = []
start_index = 0
if option_string_indices:
max_option_string_index = max(option_string_indices)
else:
max_option_string_index = -1
while start_index <= max_option_string_index:
# consume any Positionals preceding the next option
next_option_string_index = min([
index
for index in option_string_indices
if index >= start_index])
if start_index != next_option_string_index:
positionals_end_index = consume_positionals(start_index)
# only try to parse the next optional if we didn't consume
# the option string during the positionals parsing
if positionals_end_index > start_index:
start_index = positionals_end_index
continue
else:
start_index = positionals_end_index
# if we consumed all the positionals we could and we're not
# at the index of an option string, there were extra arguments
if start_index not in option_string_indices:
strings = arg_strings[start_index:next_option_string_index]
extras.extend(strings)
start_index = next_option_string_index
# consume the next optional and any arguments for it
start_index = consume_optional(start_index)
# consume any positionals following the last Optional
stop_index = consume_positionals(start_index)
# if we didn't consume all the argument strings, there were extras
extras.extend(arg_strings[stop_index:])
# if we didn't use all the Positional objects, there were too few
# arg strings supplied.
if positionals:
self.error(_('too few arguments'))
# make sure all required actions were present
for action in self._actions:
if action.required:
if action not in seen_actions:
name = _get_action_name(action)
self.error(_('argument %s is required') % name)
# make sure all required groups had one option present
for group in self._mutually_exclusive_groups:
if group.required:
for action in group._group_actions:
if action in seen_non_default_actions:
break
# if no actions were used, report the error
else:
names = [_get_action_name(action)
for action in group._group_actions
if action.help is not SUPPRESS]
msg = _('one of the arguments %s is required')
self.error(msg % ' '.join(names))
# return the updated namespace and the extra arguments
return namespace, extras
def _read_args_from_files(self, arg_strings):
# expand arguments referencing files
new_arg_strings = []
for arg_string in arg_strings:
# for regular arguments, just add them back into the list
if arg_string[0] not in self.fromfile_prefix_chars:
new_arg_strings.append(arg_string)
# replace arguments referencing files with the file content
else:
try:
args_file = open(arg_string[1:])
try:
arg_strings = []
for arg_line in args_file.read().splitlines():
for arg in self.convert_arg_line_to_args(arg_line):
arg_strings.append(arg)
arg_strings = self._read_args_from_files(arg_strings)
new_arg_strings.extend(arg_strings)
finally:
args_file.close()
except IOError:
err = _sys.exc_info()[1]
self.error(str(err))
# return the modified argument list
return new_arg_strings
def convert_arg_line_to_args(self, arg_line):
return [arg_line]
def _match_argument(self, action, arg_strings_pattern):
# match the pattern for this action to the arg strings
nargs_pattern = self._get_nargs_pattern(action)
match = _re.match(nargs_pattern, arg_strings_pattern)
# raise an exception if we weren't able to find a match
if match is None:
nargs_errors = {
None: _('expected one argument'),
OPTIONAL: _('expected at most one argument'),
ONE_OR_MORE: _('expected at least one argument'),
}
default = _('expected %s argument(s)') % action.nargs
msg = nargs_errors.get(action.nargs, default)
raise ArgumentError(action, msg)
# return the number of arguments matched
return len(match.group(1))
def _match_arguments_partial(self, actions, arg_strings_pattern):
# progressively shorten the actions list by slicing off the
# final actions until we find a match
result = []
for i in range(len(actions), 0, -1):
actions_slice = actions[:i]
pattern = ''.join([self._get_nargs_pattern(action)
for action in actions_slice])
match = _re.match(pattern, arg_strings_pattern)
if match is not None:
result.extend([len(string) for string in match.groups()])
break
# return the list of arg string counts
return result
def _parse_optional(self, arg_string):
# if it's an empty string, it was meant to be a positional
if not arg_string:
return None
# if it doesn't start with a prefix, it was meant to be positional
if not arg_string[0] in self.prefix_chars:
return None
# if the option string is present in the parser, return the action
if arg_string in self._option_string_actions:
action = self._option_string_actions[arg_string]
return action, arg_string, None
# if it's just a single character, it was meant to be positional
if len(arg_string) == 1:
return None
# if the option string before the "=" is present, return the action
if '=' in arg_string:
option_string, explicit_arg = arg_string.split('=', 1)
if option_string in self._option_string_actions:
action = self._option_string_actions[option_string]
return action, option_string, explicit_arg
# search through all possible prefixes of the option string
# and all actions in the parser for possible interpretations
option_tuples = self._get_option_tuples(arg_string)
# if multiple actions match, the option string was ambiguous
if len(option_tuples) > 1:
options = ', '.join([option_string
for action, option_string, explicit_arg in option_tuples])
tup = arg_string, options
self.error(_('ambiguous option: %s could match %s') % tup)
# if exactly one action matched, this segmentation is good,
# so return the parsed action
elif len(option_tuples) == 1:
option_tuple, = option_tuples
return option_tuple
# if it was not found as an option, but it looks like a negative
# number, it was meant to be positional
# unless there are negative-number-like options
if self._negative_number_matcher.match(arg_string):
if not self._has_negative_number_optionals:
return None
# if it contains a space, it was meant to be a positional
if ' ' in arg_string:
return None
# it was meant to be an optional but there is no such option
# in this parser (though it might be a valid option in a subparser)
return None, arg_string, None
def _get_option_tuples(self, option_string):
result = []
# option strings starting with two prefix characters are only
# split at the '='
chars = self.prefix_chars
if option_string[0] in chars and option_string[1] in chars:
if '=' in option_string:
option_prefix, explicit_arg = option_string.split('=', 1)
else:
option_prefix = option_string
explicit_arg = None
for option_string in self._option_string_actions:
if option_string.startswith(option_prefix):
action = self._option_string_actions[option_string]
tup = action, option_string, explicit_arg
result.append(tup)
# single character options can be concatenated with their arguments
# but multiple character options always have to have their argument
# separate
elif option_string[0] in chars and option_string[1] not in chars:
option_prefix = option_string
explicit_arg = None
short_option_prefix = option_string[:2]
short_explicit_arg = option_string[2:]
for option_string in self._option_string_actions:
if option_string == short_option_prefix:
action = self._option_string_actions[option_string]
tup = action, option_string, short_explicit_arg
result.append(tup)
elif option_string.startswith(option_prefix):
action = self._option_string_actions[option_string]
tup = action, option_string, explicit_arg
result.append(tup)
# shouldn't ever get here
else:
self.error(_('unexpected option string: %s') % option_string)
# return the collected option tuples
return result
def _get_nargs_pattern(self, action):
# in all examples below, we have to allow for '--' args
# which are represented as '-' in the pattern
nargs = action.nargs
# the default (None) is assumed to be a single argument
if nargs is None:
nargs_pattern = '(-*A-*)'
# allow zero or one arguments
elif nargs == OPTIONAL:
nargs_pattern = '(-*A?-*)'
# allow zero or more arguments
elif nargs == ZERO_OR_MORE:
nargs_pattern = '(-*[A-]*)'
# allow one or more arguments
elif nargs == ONE_OR_MORE:
nargs_pattern = '(-*A[A-]*)'
# allow any number of options or arguments
elif nargs == REMAINDER:
nargs_pattern = '([-AO]*)'
# allow one argument followed by any number of options or arguments
elif nargs == PARSER:
nargs_pattern = '(-*A[-AO]*)'
# all others should be integers
else:
nargs_pattern = '(-*%s-*)' % '-*'.join('A' * nargs)
# if this is an optional action, -- is not allowed
if action.option_strings:
nargs_pattern = nargs_pattern.replace('-*', '')
nargs_pattern = nargs_pattern.replace('-', '')
# return the pattern
return nargs_pattern
# ========================
# Value conversion methods
# ========================
def _get_values(self, action, arg_strings):
# for everything but PARSER args, strip out '--'
if action.nargs not in [PARSER, REMAINDER]:
arg_strings = [s for s in arg_strings if s != '--']
# optional argument produces a default when not present
if not arg_strings and action.nargs == OPTIONAL:
if action.option_strings:
value = action.const
else:
value = action.default
if isinstance(value, _basestring):
value = self._get_value(action, value)
self._check_value(action, value)
# when nargs='*' on a positional, if there were no command-line
# args, use the default if it is anything other than None
elif (not arg_strings and action.nargs == ZERO_OR_MORE and
not action.option_strings):
if action.default is not None:
value = action.default
else:
value = arg_strings
self._check_value(action, value)
# single argument or optional argument produces a single value
elif len(arg_strings) == 1 and action.nargs in [None, OPTIONAL]:
arg_string, = arg_strings
value = self._get_value(action, arg_string)
self._check_value(action, value)
# REMAINDER arguments convert all values, checking none
elif action.nargs == REMAINDER:
value = [self._get_value(action, v) for v in arg_strings]
# PARSER arguments convert all values, but check only the first
elif action.nargs == PARSER:
value = [self._get_value(action, v) for v in arg_strings]
self._check_value(action, value[0])
# all other types of nargs produce a list
else:
value = [self._get_value(action, v) for v in arg_strings]
for v in value:
self._check_value(action, v)
# return the converted value
return value
def _get_value(self, action, arg_string):
type_func = self._registry_get('type', action.type, action.type)
if not _callable(type_func):
msg = _('%r is not callable')
raise ArgumentError(action, msg % type_func)
# convert the value to the appropriate type
try:
result = type_func(arg_string)
# ArgumentTypeErrors indicate errors
except ArgumentTypeError:
name = getattr(action.type, '__name__', repr(action.type))
msg = str(_sys.exc_info()[1])
raise ArgumentError(action, msg)
# TypeErrors or ValueErrors also indicate errors
except (TypeError, ValueError):
name = getattr(action.type, '__name__', repr(action.type))
msg = _('invalid %s value: %r')
raise ArgumentError(action, msg % (name, arg_string))
# return the converted value
return result
def _check_value(self, action, value):
# converted value must be one of the choices (if specified)
if action.choices is not None and value not in action.choices:
tup = value, ', '.join(map(repr, action.choices))
msg = _('invalid choice: %r (choose from %s)') % tup
raise ArgumentError(action, msg)
# =======================
# Help-formatting methods
# =======================
def format_usage(self):
formatter = self._get_formatter()
formatter.add_usage(self.usage, self._actions,
self._mutually_exclusive_groups)
return formatter.format_help()
def format_help(self):
formatter = self._get_formatter()
# usage
formatter.add_usage(self.usage, self._actions,
self._mutually_exclusive_groups)
# description
formatter.add_text(self.description)
# positionals, optionals and user-defined groups
for action_group in self._action_groups:
formatter.start_section(action_group.title)
formatter.add_text(action_group.description)
formatter.add_arguments(action_group._group_actions)
formatter.end_section()
# epilog
formatter.add_text(self.epilog)
# determine help from format above
return formatter.format_help()
def format_version(self):
import warnings
warnings.warn(
'The format_version method is deprecated -- the "version" '
'argument to ArgumentParser is no longer supported.',
DeprecationWarning)
formatter = self._get_formatter()
formatter.add_text(self.version)
return formatter.format_help()
def _get_formatter(self):
return self.formatter_class(prog=self.prog)
# =====================
# Help-printing methods
# =====================
def print_usage(self, file=None):
if file is None:
file = _sys.stdout
self._print_message(self.format_usage(), file)
def print_help(self, file=None):
if file is None:
file = _sys.stdout
self._print_message(self.format_help(), file)
def print_version(self, file=None):
import warnings
warnings.warn(
'The print_version method is deprecated -- the "version" '
'argument to ArgumentParser is no longer supported.',
DeprecationWarning)
self._print_message(self.format_version(), file)
def _print_message(self, message, file=None):
if message:
if file is None:
file = _sys.stderr
file.write(message)
# ===============
# Exiting methods
# ===============
def exit(self, status=0, message=None):
if message:
self._print_message(message, _sys.stderr)
_sys.exit(status)
def error(self, message):
"""error(message: string)
Prints a usage message incorporating the message to stderr and
exits.
If you override this in a subclass, it should not return -- it
should either exit or raise an exception.
"""
self.print_usage(_sys.stderr)
self.exit(2, _('%s: error: %s\n') % (self.prog, message))
| gpl-3.0 |
FrankBian/kuma | kuma/wiki/migrations/0017_deferred_rendering.py | 5 | 15728 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Document.rendered_html'
db.add_column('wiki_document', 'rendered_html', self.gf('django.db.models.fields.TextField')(null=True, blank=True), keep_default=False)
# Adding field 'Document.rendered_errors'
db.add_column('wiki_document', 'rendered_errors', self.gf('django.db.models.fields.TextField')(null=True, blank=True), keep_default=False)
# Adding field 'Document.defer_rendering'
db.add_column('wiki_document', 'defer_rendering', self.gf('django.db.models.fields.BooleanField')(default=False, db_index=True), keep_default=False)
# Adding field 'Document.render_scheduled_at'
db.add_column('wiki_document', 'render_scheduled_at', self.gf('django.db.models.fields.DateTimeField')(null=True, db_index=True), keep_default=False)
# Adding field 'Document.render_started_at'
db.add_column('wiki_document', 'render_started_at', self.gf('django.db.models.fields.DateTimeField')(null=True, db_index=True), keep_default=False)
# Adding field 'Document.last_rendered_at'
db.add_column('wiki_document', 'last_rendered_at', self.gf('django.db.models.fields.DateTimeField')(null=True, db_index=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'Document.rendered_html'
db.delete_column('wiki_document', 'rendered_html')
# Deleting field 'Document.rendered_errors'
db.delete_column('wiki_document', 'rendered_errors')
# Deleting field 'Document.defer_rendering'
db.delete_column('wiki_document', 'defer_rendering')
# Deleting field 'Document.render_scheduled_at'
db.delete_column('wiki_document', 'render_scheduled_at')
# Deleting field 'Document.render_started_at'
db.delete_column('wiki_document', 'render_started_at')
# Deleting field 'Document.last_rendered_at'
db.delete_column('wiki_document', 'last_rendered_at')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'tidings.watch': {
'Meta': {'object_name': 'Watch'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']", 'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'db_index': 'True', 'max_length': '75', 'null': 'True', 'blank': 'True'}),
'event_type': ('django.db.models.fields.CharField', [], {'max_length': '30', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'secret': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'wiki.document': {
'Meta': {'unique_together': "(('parent', 'locale'), ('slug', 'locale'))", 'object_name': 'Document'},
'category': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'current_revision': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'current_for+'", 'null': 'True', 'to': "orm['wiki.Revision']"}),
'defer_rendering': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'html': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_localizable': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'is_template': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'last_rendered_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'locale': ('kuma.core.fields.LocaleField', [], {'default': "'en-US'", 'max_length': '7', 'db_index': 'True'}),
'mindtouch_page_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'db_index': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'translations'", 'null': 'True', 'to': "orm['wiki.Document']"}),
'parent_topic': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['wiki.Document']"}),
'related_documents': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['wiki.Document']", 'through': "orm['wiki.RelatedDocument']", 'symmetrical': 'False'}),
'render_scheduled_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'render_started_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'rendered_errors': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'rendered_html': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'})
},
'wiki.documenttag': {
'Meta': {'object_name': 'DocumentTag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'})
},
'wiki.editortoolbar': {
'Meta': {'object_name': 'EditorToolbar'},
'code': ('django.db.models.fields.TextField', [], {'max_length': '2000'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_toolbars'", 'to': "orm['auth.User']"}),
'default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'wiki.firefoxversion': {
'Meta': {'unique_together': "(('item_id', 'document'),)", 'object_name': 'FirefoxVersion'},
'document': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'firefox_version_set'", 'to': "orm['wiki.Document']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item_id': ('django.db.models.fields.IntegerField', [], {})
},
'wiki.helpfulvote': {
'Meta': {'object_name': 'HelpfulVote'},
'anonymous_id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'poll_votes'", 'null': 'True', 'to': "orm['auth.User']"}),
'document': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'poll_votes'", 'to': "orm['wiki.Document']"}),
'helpful': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user_agent': ('django.db.models.fields.CharField', [], {'max_length': '1000'})
},
'wiki.operatingsystem': {
'Meta': {'unique_together': "(('item_id', 'document'),)", 'object_name': 'OperatingSystem'},
'document': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'operating_system_set'", 'to': "orm['wiki.Document']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item_id': ('django.db.models.fields.IntegerField', [], {})
},
'wiki.relateddocument': {
'Meta': {'ordering': "['-in_common']", 'object_name': 'RelatedDocument'},
'document': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'related_from'", 'to': "orm['wiki.Document']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_common': ('django.db.models.fields.IntegerField', [], {}),
'related': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'related_to'", 'to': "orm['wiki.Document']"})
},
'wiki.reviewtag': {
'Meta': {'object_name': 'ReviewTag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'})
},
'wiki.reviewtaggedrevision': {
'Meta': {'object_name': 'ReviewTaggedRevision'},
'content_object': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['wiki.Revision']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['wiki.ReviewTag']"})
},
'wiki.revision': {
'Meta': {'object_name': 'Revision'},
'based_on': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['wiki.Revision']", 'null': 'True', 'blank': 'True'}),
'comment': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'content': ('django.db.models.fields.TextField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_revisions'", 'to': "orm['auth.User']"}),
'document': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'revisions'", 'to': "orm['wiki.Document']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_approved': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'is_mindtouch_migration': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'keywords': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'mindtouch_old_id': ('django.db.models.fields.IntegerField', [], {'unique': 'True', 'null': 'True', 'db_index': 'True'}),
'reviewed': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'reviewer': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'reviewed_revisions'", 'null': 'True', 'to': "orm['auth.User']"}),
'show_toc': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'significance': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'db_index': 'True'}),
'summary': ('django.db.models.fields.TextField', [], {}),
'tags': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'db_index': 'True'})
},
'wiki.taggeddocument': {
'Meta': {'object_name': 'TaggedDocument'},
'content_object': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['wiki.Document']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['wiki.DocumentTag']"})
}
}
complete_apps = ['wiki']
| mpl-2.0 |
fearthecowboy/pygments | Pygments/pygments-lib/pygments/lexers/actionscript.py | 47 | 11179 | # -*- coding: utf-8 -*-
"""
pygments.lexers.actionscript
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Lexers for ActionScript and MXML.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, bygroups, using, this, words, default
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation
__all__ = ['ActionScriptLexer', 'ActionScript3Lexer', 'MxmlLexer']
class ActionScriptLexer(RegexLexer):
"""
For ActionScript source code.
.. versionadded:: 0.9
"""
name = 'ActionScript'
aliases = ['as', 'actionscript']
filenames = ['*.as']
mimetypes = ['application/x-actionscript', 'text/x-actionscript',
'text/actionscript']
flags = re.DOTALL
tokens = {
'root': [
(r'\s+', Text),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
(r'/(\\\\|\\/|[^/\n])*/[gim]*', String.Regex),
(r'[~^*!%&<>|+=:;,/?\\-]+', Operator),
(r'[{}\[\]();.]+', Punctuation),
(words((
'case', 'default', 'for', 'each', 'in', 'while', 'do', 'break',
'return', 'continue', 'if', 'else', 'throw', 'try', 'catch',
'var', 'with', 'new', 'typeof', 'arguments', 'instanceof', 'this',
'switch'), suffix=r'\b'),
Keyword),
(words((
'class', 'public', 'final', 'internal', 'native', 'override', 'private',
'protected', 'static', 'import', 'extends', 'implements', 'interface',
'intrinsic', 'return', 'super', 'dynamic', 'function', 'const', 'get',
'namespace', 'package', 'set'), suffix=r'\b'),
Keyword.Declaration),
(r'(true|false|null|NaN|Infinity|-Infinity|undefined|Void)\b',
Keyword.Constant),
(words((
'Accessibility', 'AccessibilityProperties', 'ActionScriptVersion',
'ActivityEvent', 'AntiAliasType', 'ApplicationDomain', 'AsBroadcaster', 'Array',
'AsyncErrorEvent', 'AVM1Movie', 'BevelFilter', 'Bitmap', 'BitmapData',
'BitmapDataChannel', 'BitmapFilter', 'BitmapFilterQuality', 'BitmapFilterType',
'BlendMode', 'BlurFilter', 'Boolean', 'ByteArray', 'Camera', 'Capabilities', 'CapsStyle',
'Class', 'Color', 'ColorMatrixFilter', 'ColorTransform', 'ContextMenu',
'ContextMenuBuiltInItems', 'ContextMenuEvent', 'ContextMenuItem',
'ConvultionFilter', 'CSMSettings', 'DataEvent', 'Date', 'DefinitionError',
'DeleteObjectSample', 'Dictionary', 'DisplacmentMapFilter', 'DisplayObject',
'DisplacmentMapFilterMode', 'DisplayObjectContainer', 'DropShadowFilter',
'Endian', 'EOFError', 'Error', 'ErrorEvent', 'EvalError', 'Event', 'EventDispatcher',
'EventPhase', 'ExternalInterface', 'FileFilter', 'FileReference',
'FileReferenceList', 'FocusDirection', 'FocusEvent', 'Font', 'FontStyle', 'FontType',
'FrameLabel', 'FullScreenEvent', 'Function', 'GlowFilter', 'GradientBevelFilter',
'GradientGlowFilter', 'GradientType', 'Graphics', 'GridFitType', 'HTTPStatusEvent',
'IBitmapDrawable', 'ID3Info', 'IDataInput', 'IDataOutput', 'IDynamicPropertyOutput'
'IDynamicPropertyWriter', 'IEventDispatcher', 'IExternalizable',
'IllegalOperationError', 'IME', 'IMEConversionMode', 'IMEEvent', 'int',
'InteractiveObject', 'InterpolationMethod', 'InvalidSWFError', 'InvokeEvent',
'IOError', 'IOErrorEvent', 'JointStyle', 'Key', 'Keyboard', 'KeyboardEvent', 'KeyLocation',
'LineScaleMode', 'Loader', 'LoaderContext', 'LoaderInfo', 'LoadVars', 'LocalConnection',
'Locale', 'Math', 'Matrix', 'MemoryError', 'Microphone', 'MorphShape', 'Mouse', 'MouseEvent',
'MovieClip', 'MovieClipLoader', 'Namespace', 'NetConnection', 'NetStatusEvent',
'NetStream', 'NewObjectSample', 'Number', 'Object', 'ObjectEncoding', 'PixelSnapping',
'Point', 'PrintJob', 'PrintJobOptions', 'PrintJobOrientation', 'ProgressEvent', 'Proxy',
'QName', 'RangeError', 'Rectangle', 'ReferenceError', 'RegExp', 'Responder', 'Sample',
'Scene', 'ScriptTimeoutError', 'Security', 'SecurityDomain', 'SecurityError',
'SecurityErrorEvent', 'SecurityPanel', 'Selection', 'Shape', 'SharedObject',
'SharedObjectFlushStatus', 'SimpleButton', 'Socket', 'Sound', 'SoundChannel',
'SoundLoaderContext', 'SoundMixer', 'SoundTransform', 'SpreadMethod', 'Sprite',
'StackFrame', 'StackOverflowError', 'Stage', 'StageAlign', 'StageDisplayState',
'StageQuality', 'StageScaleMode', 'StaticText', 'StatusEvent', 'String', 'StyleSheet',
'SWFVersion', 'SyncEvent', 'SyntaxError', 'System', 'TextColorType', 'TextField',
'TextFieldAutoSize', 'TextFieldType', 'TextFormat', 'TextFormatAlign',
'TextLineMetrics', 'TextRenderer', 'TextSnapshot', 'Timer', 'TimerEvent', 'Transform',
'TypeError', 'uint', 'URIError', 'URLLoader', 'URLLoaderDataFormat', 'URLRequest',
'URLRequestHeader', 'URLRequestMethod', 'URLStream', 'URLVariabeles', 'VerifyError',
'Video', 'XML', 'XMLDocument', 'XMLList', 'XMLNode', 'XMLNodeType', 'XMLSocket',
'XMLUI'), suffix=r'\b'),
Name.Builtin),
(words((
'decodeURI', 'decodeURIComponent', 'encodeURI', 'escape', 'eval', 'isFinite', 'isNaN',
'isXMLName', 'clearInterval', 'fscommand', 'getTimer', 'getURL', 'getVersion',
'parseFloat', 'parseInt', 'setInterval', 'trace', 'updateAfterEvent',
'unescape'), suffix=r'\b'),
Name.Function),
(r'[$a-zA-Z_]\w*', Name.Other),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-f]+', Number.Hex),
(r'[0-9]+', Number.Integer),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single),
]
}
class ActionScript3Lexer(RegexLexer):
"""
For ActionScript 3 source code.
.. versionadded:: 0.11
"""
name = 'ActionScript 3'
aliases = ['as3', 'actionscript3']
filenames = ['*.as']
mimetypes = ['application/x-actionscript3', 'text/x-actionscript3',
'text/actionscript3']
identifier = r'[$a-zA-Z_]\w*'
typeidentifier = identifier + '(?:\.<\w+>)?'
flags = re.DOTALL | re.MULTILINE
tokens = {
'root': [
(r'\s+', Text),
(r'(function\s+)(' + identifier + r')(\s*)(\()',
bygroups(Keyword.Declaration, Name.Function, Text, Operator),
'funcparams'),
(r'(var|const)(\s+)(' + identifier + r')(\s*)(:)(\s*)(' +
typeidentifier + r')',
bygroups(Keyword.Declaration, Text, Name, Text, Punctuation, Text,
Keyword.Type)),
(r'(import|package)(\s+)((?:' + identifier + r'|\.)+)(\s*)',
bygroups(Keyword, Text, Name.Namespace, Text)),
(r'(new)(\s+)(' + typeidentifier + r')(\s*)(\()',
bygroups(Keyword, Text, Keyword.Type, Text, Operator)),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
(r'/(\\\\|\\/|[^\n])*/[gisx]*', String.Regex),
(r'(\.)(' + identifier + r')', bygroups(Operator, Name.Attribute)),
(r'(case|default|for|each|in|while|do|break|return|continue|if|else|'
r'throw|try|catch|with|new|typeof|arguments|instanceof|this|'
r'switch|import|include|as|is)\b',
Keyword),
(r'(class|public|final|internal|native|override|private|protected|'
r'static|import|extends|implements|interface|intrinsic|return|super|'
r'dynamic|function|const|get|namespace|package|set)\b',
Keyword.Declaration),
(r'(true|false|null|NaN|Infinity|-Infinity|undefined|void)\b',
Keyword.Constant),
(r'(decodeURI|decodeURIComponent|encodeURI|escape|eval|isFinite|isNaN|'
r'isXMLName|clearInterval|fscommand|getTimer|getURL|getVersion|'
r'isFinite|parseFloat|parseInt|setInterval|trace|updateAfterEvent|'
r'unescape)\b', Name.Function),
(identifier, Name),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-f]+', Number.Hex),
(r'[0-9]+', Number.Integer),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single),
(r'[~^*!%&<>|+=:;,/?\\{}\[\]().-]+', Operator),
],
'funcparams': [
(r'\s+', Text),
(r'(\s*)(\.\.\.)?(' + identifier + r')(\s*)(:)(\s*)(' +
typeidentifier + r'|\*)(\s*)',
bygroups(Text, Punctuation, Name, Text, Operator, Text,
Keyword.Type, Text), 'defval'),
(r'\)', Operator, 'type')
],
'type': [
(r'(\s*)(:)(\s*)(' + typeidentifier + r'|\*)',
bygroups(Text, Operator, Text, Keyword.Type), '#pop:2'),
(r'\s+', Text, '#pop:2'),
default('#pop:2')
],
'defval': [
(r'(=)(\s*)([^(),]+)(\s*)(,?)',
bygroups(Operator, Text, using(this), Text, Operator), '#pop'),
(r',', Operator, '#pop'),
default('#pop')
]
}
def analyse_text(text):
if re.match(r'\w+\s*:\s*\w', text):
return 0.3
return 0
class MxmlLexer(RegexLexer):
"""
For MXML markup.
Nested AS3 in <script> tags is highlighted by the appropriate lexer.
.. versionadded:: 1.1
"""
flags = re.MULTILINE | re.DOTALL
name = 'MXML'
aliases = ['mxml']
filenames = ['*.mxml']
mimetimes = ['text/xml', 'application/xml']
tokens = {
'root': [
('[^<&]+', Text),
(r'&\S*?;', Name.Entity),
(r'(\<\!\[CDATA\[)(.*?)(\]\]\>)',
bygroups(String, using(ActionScript3Lexer), String)),
('<!--', Comment, 'comment'),
(r'<\?.*?\?>', Comment.Preproc),
('<![^>]*>', Comment.Preproc),
(r'<\s*[\w:.-]+', Name.Tag, 'tag'),
(r'<\s*/\s*[\w:.-]+\s*>', Name.Tag),
],
'comment': [
('[^-]+', Comment),
('-->', Comment, '#pop'),
('-', Comment),
],
'tag': [
(r'\s+', Text),
(r'[\w.:-]+\s*=', Name.Attribute, 'attr'),
(r'/?\s*>', Name.Tag, '#pop'),
],
'attr': [
('\s+', Text),
('".*?"', String, '#pop'),
("'.*?'", String, '#pop'),
(r'[^\s>]+', String, '#pop'),
],
}
| bsd-2-clause |
palerdot/calibre | src/calibre/ebooks/metadata/pdb.py | 24 | 1552 | # -*- coding: utf-8 -*-
'''
Read meta information from pdb files.
'''
__license__ = 'GPL v3'
__copyright__ = '2009, John Schember <john@nachtimwald.com>'
__docformat__ = 'restructuredtext en'
import re
from calibre.ebooks.metadata import MetaInformation
from calibre.ebooks.pdb.header import PdbHeaderReader
from calibre.ebooks.metadata.ereader import get_metadata as get_eReader
from calibre.ebooks.metadata.plucker import get_metadata as get_plucker
from calibre.ebooks.metadata.haodoo import get_metadata as get_Haodoo
MREADER = {
'PNPdPPrs' : get_eReader,
'PNRdPPrs' : get_eReader,
'DataPlkr' : get_plucker,
'BOOKMTIT' : get_Haodoo,
'BOOKMTIU' : get_Haodoo,
}
from calibre.ebooks.metadata.ereader import set_metadata as set_eReader
MWRITER = {
'PNPdPPrs' : set_eReader,
'PNRdPPrs' : set_eReader,
}
def get_metadata(stream, extract_cover=True):
"""
Return metadata as a L{MetaInfo} object
"""
pheader = PdbHeaderReader(stream)
MetadataReader = MREADER.get(pheader.ident, None)
if MetadataReader is None:
return MetaInformation(pheader.title, [_('Unknown')])
return MetadataReader(stream, extract_cover)
def set_metadata(stream, mi):
stream.seek(0)
pheader = PdbHeaderReader(stream)
MetadataWriter = MWRITER.get(pheader.ident, None)
if MetadataWriter:
MetadataWriter(stream, mi)
stream.seek(0)
stream.write('%s\x00' % re.sub('[^-A-Za-z0-9 ]+', '_', mi.title).ljust(31, '\x00')[:31].encode('ascii', 'replace'))
| gpl-3.0 |
Blazemeter/taurus | tests/resources/selenium/generated_from_requests_appium_browser.py | 1 | 2336 | # coding=utf-8
import logging
import random
import string
import sys
import unittest
from time import time, sleep
import apiritif
import os
import re
from appium import webdriver
from selenium.common.exceptions import NoSuchElementException, TimeoutException
from selenium.webdriver.common.by import By
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.support.ui import Select
from selenium.webdriver.support import expected_conditions as econd
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.common.keys import Keys
from bzt.resources.selenium_extras import dialogs_replace, get_locator, wait_for, waiter
class TestLocScAppium(unittest.TestCase):
def setUp(self):
self.vars = {}
timeout = 3.5
self.driver = None
options = webdriver.ChromeOptions()
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.set_capability('unhandledPromptBehavior', 'ignore')
self.driver = webdriver.Remote(command_executor='http://localhost:4723/wd/hub',
desired_capabilities={'browserName': 'chrome', 'deviceName': '',
'platformName': 'android'},
options=options)
self.driver.implicitly_wait(timeout)
apiritif.put_into_thread_store(timeout=timeout, func_mode=False, driver=self.driver, windows={},
scenario_name='loc_sc_appium')
def _1_(self):
with apiritif.smart_transaction('/'):
self.driver.get('http://blazedemo.com/')
dialogs_replace()
wait_for('present', [{'xpath': "//input[@type='submit']"}], 3.5)
self.assertEqual(self.driver.title, 'BlazeDemo')
body = self.driver.page_source
re_pattern = re.compile('contained_text')
self.assertEqual(0, len(re.findall(re_pattern, body)), "Assertion: 'contained_text' found in BODY")
def _2_empty(self):
with apiritif.smart_transaction('empty'):
pass
def test_locscappium(self):
self._1_()
self._2_empty()
def tearDown(self):
if self.driver:
self.driver.quit()
| apache-2.0 |
webmull/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/common/net/web_mock.py | 184 | 2241 | # Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import StringIO
class MockWeb(object):
def __init__(self, urls=None):
self.urls = urls or {}
self.urls_fetched = []
def get_binary(self, url, convert_404_to_None=False):
self.urls_fetched.append(url)
if url in self.urls:
return self.urls[url]
return "MOCK Web result, convert 404 to None=%s" % convert_404_to_None
# FIXME: Classes which are using Browser probably want to use Web instead.
class MockBrowser(object):
params = {}
def open(self, url):
pass
def select_form(self, name):
pass
def __setitem__(self, key, value):
self.params[key] = value
def submit(self):
return StringIO.StringIO()
| bsd-3-clause |
brandond/ansible | lib/ansible/module_utils/storage/emc/emc_vnx.py | 79 | 1915 | # This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# (c) 2018 Luca 'remix_tj' Lorenzetto
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
emc_vnx_argument_spec = {
'sp_address': dict(type='str', required=True),
'sp_user': dict(type='str', required=False, default='sysadmin'),
'sp_password': dict(type='str', required=False, default='sysadmin',
no_log=True),
}
| gpl-3.0 |
simonwydooghe/ansible | lib/ansible/modules/storage/purestorage/purefa_hg.py | 21 | 7794 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2018, Simon Dodsley (simon@purestorage.com)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: purefa_hg
version_added: '2.4'
short_description: Manage hostgroups on Pure Storage FlashArrays
description:
- Create, delete or modify hostgroups on Pure Storage FlashArrays.
author:
- Pure Storage ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
options:
hostgroup:
description:
- The name of the hostgroup.
type: str
required: true
state:
description:
- Define whether the hostgroup should exist or not.
type: str
default: present
choices: [ absent, present ]
host:
type: list
description:
- List of existing hosts to add to hostgroup.
volume:
type: list
description:
- List of existing volumes to add to hostgroup.
extends_documentation_fragment:
- purestorage.fa
'''
EXAMPLES = r'''
- name: Create empty hostgroup
purefa_hg:
hostgroup: foo
fa_url: 10.10.10.2
api_token: e31060a7-21fc-e277-6240-25983c6c4592
- name: Add hosts and volumes to existing or new hostgroup
purefa_hg:
hostgroup: foo
host:
- host1
- host2
volume:
- vol1
- vol2
fa_url: 10.10.10.2
api_token: e31060a7-21fc-e277-6240-25983c6c4592
- name: Delete hosts and volumes from hostgroup
purefa_hg:
hostgroup: foo
host:
- host1
- host2
volume:
- vol1
- vol2
fa_url: 10.10.10.2
api_token: e31060a7-21fc-e277-6240-25983c6c4592
state: absent
# This will disconnect all hosts and volumes in the hostgroup
- name: Delete hostgroup
purefa_hg:
hostgroup: foo
fa_url: 10.10.10.2
api_token: e31060a7-21fc-e277-6240-25983c6c4592
state: absent
- name: Create host group with hosts and volumes
purefa_hg:
hostgroup: bar
host:
- host1
- host2
volume:
- vol1
- vol2
fa_url: 10.10.10.2
api_token: e31060a7-21fc-e277-6240-25983c6c4592
'''
RETURN = r'''
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pure import get_system, purefa_argument_spec
try:
from purestorage import purestorage
HAS_PURESTORAGE = True
except ImportError:
HAS_PURESTORAGE = False
def get_hostgroup(module, array):
hostgroup = None
for host in array.list_hgroups():
if host["name"] == module.params['hostgroup']:
hostgroup = host
break
return hostgroup
def make_hostgroup(module, array):
changed = True
try:
array.create_hgroup(module.params['hostgroup'])
except Exception:
changed = False
if module.params['host']:
array.set_hgroup(module.params['hostgroup'], hostlist=module.params['host'])
if module.params['volume']:
for vol in module.params['volume']:
array.connect_hgroup(module.params['hostgroup'], vol)
module.exit_json(changed=changed)
def update_hostgroup(module, array):
changed = False
hgroup = get_hostgroup(module, array)
volumes = array.list_hgroup_connections(module.params['hostgroup'])
if module.params['state'] == "present":
if module.params['host']:
new_hosts = list(set(module.params['host']).difference(hgroup['hosts']))
if new_hosts:
try:
array.set_hgroup(module.params['hostgroup'], addhostlist=new_hosts)
changed = True
except Exception:
module.fail_josn(msg='Failed to add host(s) to hostgroup')
if module.params['volume']:
if volumes:
current_vols = [vol['vol'] for vol in volumes]
new_volumes = list(set(module.params['volume']).difference(set(current_vols)))
for cvol in new_volumes:
try:
array.connect_hgroup(module.params['hostgroup'], cvol)
changed = True
except Exception:
changed = False
else:
for cvol in module.params['volume']:
try:
array.connect_hgroup(module.params['hostgroup'], cvol)
changed = True
except Exception:
changed = False
else:
if module.params['host']:
old_hosts = list(set(module.params['host']).intersection(hgroup['hosts']))
if old_hosts:
try:
array.set_hgroup(module.params['hostgroup'], remhostlist=old_hosts)
changed = True
except Exception:
changed = False
if module.params['volume']:
old_volumes = list(set(module.params['volume']).difference(set([vol['name'] for vol in volumes])))
for cvol in old_volumes:
try:
array.disconnect_hgroup(module.params['hostgroup'], cvol)
changed = True
except Exception:
changed = False
module.exit_json(changed=changed)
def delete_hostgroup(module, array):
changed = True
try:
vols = array.list_hgroup_connections(module.params['hostgroup'])
for vol in vols:
try:
array.disconnect_hgroup(module.params['hostgroup'], vol["vol"])
except Exception:
changed = False
host = array.get_hgroup(module.params['hostgroup'])
try:
array.set_hgroup(module.params['hostgroup'], remhostlist=host['hosts'])
try:
array.delete_hgroup(module.params['hostgroup'])
except Exception:
changed = False
except Exception:
changed = False
except Exception:
changed = False
module.exit_json(changed=changed)
def main():
argument_spec = purefa_argument_spec()
argument_spec.update(dict(
hostgroup=dict(type='str', required=True),
state=dict(type='str', default='present', choices=['absent', 'present']),
host=dict(type='list'),
volume=dict(type='list'),
))
module = AnsibleModule(argument_spec, supports_check_mode=False)
if not HAS_PURESTORAGE:
module.fail_json(msg='purestorage sdk is required for this module in host')
state = module.params['state']
array = get_system(module)
hostgroup = get_hostgroup(module, array)
if module.params['host']:
try:
for hst in module.params['host']:
array.get_host(hst)
except Exception:
module.fail_json(msg='Host {0} not found'.format(hst))
if module.params['volume']:
try:
for vol in module.params['volume']:
array.get_volume(vol)
except Exception:
module.fail_json(msg='Volume {0} not found'.format(vol))
if hostgroup and state == 'present':
update_hostgroup(module, array)
elif hostgroup and module.params['volume'] and state == 'absent':
update_hostgroup(module, array)
elif hostgroup and module.params['host'] and state == 'absent':
update_hostgroup(module, array)
elif hostgroup and state == 'absent':
delete_hostgroup(module, array)
elif hostgroup is None and state == 'absent':
module.exit_json(changed=False)
else:
make_hostgroup(module, array)
if __name__ == '__main__':
main()
| gpl-3.0 |
PGower/PyCanvas | pycanvas/apis/quiz_questions.py | 1 | 25992 | """QuizQuestions API Version 1.0.
This API client was generated using a template. Make sure this code is valid before using it.
"""
import logging
from datetime import date, datetime
from base import BaseCanvasAPI
from base import BaseModel
class QuizQuestionsAPI(BaseCanvasAPI):
"""QuizQuestions API Version 1.0."""
def __init__(self, *args, **kwargs):
"""Init method for QuizQuestionsAPI."""
super(QuizQuestionsAPI, self).__init__(*args, **kwargs)
self.logger = logging.getLogger("pycanvas.QuizQuestionsAPI")
def list_questions_in_quiz_or_submission(self, quiz_id, course_id, quiz_submission_attempt=None, quiz_submission_id=None):
"""
List questions in a quiz or a submission.
Returns the list of QuizQuestions in this quiz.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# REQUIRED - PATH - quiz_id
"""ID"""
path["quiz_id"] = quiz_id
# OPTIONAL - quiz_submission_id
"""If specified, the endpoint will return the questions that were presented
for that submission. This is useful if the quiz has been modified after
the submission was created and the latest quiz version's set of questions
does not match the submission's.
NOTE: you must specify quiz_submission_attempt as well if you specify this
parameter."""
if quiz_submission_id is not None:
params["quiz_submission_id"] = quiz_submission_id
# OPTIONAL - quiz_submission_attempt
"""The attempt of the submission you want the questions for."""
if quiz_submission_attempt is not None:
params["quiz_submission_attempt"] = quiz_submission_attempt
self.logger.debug("GET /api/v1/courses/{course_id}/quizzes/{quiz_id}/questions with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/courses/{course_id}/quizzes/{quiz_id}/questions".format(**path), data=data, params=params, all_pages=True)
def get_single_quiz_question(self, id, quiz_id, course_id):
"""
Get a single quiz question.
Returns the quiz question with the given id
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# REQUIRED - PATH - quiz_id
"""ID"""
path["quiz_id"] = quiz_id
# REQUIRED - PATH - id
"""The quiz question unique identifier."""
path["id"] = id
self.logger.debug("GET /api/v1/courses/{course_id}/quizzes/{quiz_id}/questions/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/courses/{course_id}/quizzes/{quiz_id}/questions/{id}".format(**path), data=data, params=params, single_item=True)
def create_single_quiz_question(self, quiz_id, course_id, question_answers=None, question_correct_comments=None, question_incorrect_comments=None, question_neutral_comments=None, question_points_possible=None, question_position=None, question_question_name=None, question_question_text=None, question_question_type=None, question_quiz_group_id=None, question_text_after_answers=None):
"""
Create a single quiz question.
Create a new quiz question for this quiz
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# REQUIRED - PATH - quiz_id
"""ID"""
path["quiz_id"] = quiz_id
# OPTIONAL - question[question_name]
"""The name of the question."""
if question_question_name is not None:
data["question[question_name]"] = question_question_name
# OPTIONAL - question[question_text]
"""The text of the question."""
if question_question_text is not None:
data["question[question_text]"] = question_question_text
# OPTIONAL - question[quiz_group_id]
"""The id of the quiz group to assign the question to."""
if question_quiz_group_id is not None:
data["question[quiz_group_id]"] = question_quiz_group_id
# OPTIONAL - question[question_type]
"""The type of question. Multiple optional fields depend upon the type of question to be used."""
if question_question_type is not None:
self._validate_enum(question_question_type, ["calculated_question", "essay_question", "file_upload_question", "fill_in_multiple_blanks_question", "matching_question", "multiple_answers_question", "multiple_choice_question", "multiple_dropdowns_question", "numerical_question", "short_answer_question", "text_only_question", "true_false_question"])
data["question[question_type]"] = question_question_type
# OPTIONAL - question[position]
"""The order in which the question will be displayed in the quiz in relation to other questions."""
if question_position is not None:
data["question[position]"] = question_position
# OPTIONAL - question[points_possible]
"""The maximum amount of points received for answering this question correctly."""
if question_points_possible is not None:
data["question[points_possible]"] = question_points_possible
# OPTIONAL - question[correct_comments]
"""The comment to display if the student answers the question correctly."""
if question_correct_comments is not None:
data["question[correct_comments]"] = question_correct_comments
# OPTIONAL - question[incorrect_comments]
"""The comment to display if the student answers incorrectly."""
if question_incorrect_comments is not None:
data["question[incorrect_comments]"] = question_incorrect_comments
# OPTIONAL - question[neutral_comments]
"""The comment to display regardless of how the student answered."""
if question_neutral_comments is not None:
data["question[neutral_comments]"] = question_neutral_comments
# OPTIONAL - question[text_after_answers]
"""no description"""
if question_text_after_answers is not None:
data["question[text_after_answers]"] = question_text_after_answers
# OPTIONAL - question[answers]
"""no description"""
if question_answers is not None:
data["question[answers]"] = question_answers
self.logger.debug("POST /api/v1/courses/{course_id}/quizzes/{quiz_id}/questions with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("POST", "/api/v1/courses/{course_id}/quizzes/{quiz_id}/questions".format(**path), data=data, params=params, single_item=True)
def update_existing_quiz_question(self, id, quiz_id, course_id, question_answers=None, question_correct_comments=None, question_incorrect_comments=None, question_neutral_comments=None, question_points_possible=None, question_position=None, question_question_name=None, question_question_text=None, question_question_type=None, question_quiz_group_id=None, question_text_after_answers=None):
"""
Update an existing quiz question.
Updates an existing quiz question for this quiz
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# REQUIRED - PATH - quiz_id
"""The associated quiz's unique identifier."""
path["quiz_id"] = quiz_id
# REQUIRED - PATH - id
"""The quiz question's unique identifier."""
path["id"] = id
# OPTIONAL - question[question_name]
"""The name of the question."""
if question_question_name is not None:
data["question[question_name]"] = question_question_name
# OPTIONAL - question[question_text]
"""The text of the question."""
if question_question_text is not None:
data["question[question_text]"] = question_question_text
# OPTIONAL - question[quiz_group_id]
"""The id of the quiz group to assign the question to."""
if question_quiz_group_id is not None:
data["question[quiz_group_id]"] = question_quiz_group_id
# OPTIONAL - question[question_type]
"""The type of question. Multiple optional fields depend upon the type of question to be used."""
if question_question_type is not None:
self._validate_enum(question_question_type, ["calculated_question", "essay_question", "file_upload_question", "fill_in_multiple_blanks_question", "matching_question", "multiple_answers_question", "multiple_choice_question", "multiple_dropdowns_question", "numerical_question", "short_answer_question", "text_only_question", "true_false_question"])
data["question[question_type]"] = question_question_type
# OPTIONAL - question[position]
"""The order in which the question will be displayed in the quiz in relation to other questions."""
if question_position is not None:
data["question[position]"] = question_position
# OPTIONAL - question[points_possible]
"""The maximum amount of points received for answering this question correctly."""
if question_points_possible is not None:
data["question[points_possible]"] = question_points_possible
# OPTIONAL - question[correct_comments]
"""The comment to display if the student answers the question correctly."""
if question_correct_comments is not None:
data["question[correct_comments]"] = question_correct_comments
# OPTIONAL - question[incorrect_comments]
"""The comment to display if the student answers incorrectly."""
if question_incorrect_comments is not None:
data["question[incorrect_comments]"] = question_incorrect_comments
# OPTIONAL - question[neutral_comments]
"""The comment to display regardless of how the student answered."""
if question_neutral_comments is not None:
data["question[neutral_comments]"] = question_neutral_comments
# OPTIONAL - question[text_after_answers]
"""no description"""
if question_text_after_answers is not None:
data["question[text_after_answers]"] = question_text_after_answers
# OPTIONAL - question[answers]
"""no description"""
if question_answers is not None:
data["question[answers]"] = question_answers
self.logger.debug("PUT /api/v1/courses/{course_id}/quizzes/{quiz_id}/questions/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("PUT", "/api/v1/courses/{course_id}/quizzes/{quiz_id}/questions/{id}".format(**path), data=data, params=params, single_item=True)
def delete_quiz_question(self, id, quiz_id, course_id):
"""
Delete a quiz question.
<b>204 No Content</b> response code is returned if the deletion was successful.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# REQUIRED - PATH - quiz_id
"""The associated quiz's unique identifier"""
path["quiz_id"] = quiz_id
# REQUIRED - PATH - id
"""The quiz question's unique identifier"""
path["id"] = id
self.logger.debug("DELETE /api/v1/courses/{course_id}/quizzes/{quiz_id}/questions/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("DELETE", "/api/v1/courses/{course_id}/quizzes/{quiz_id}/questions/{id}".format(**path), data=data, params=params, no_data=True)
class Answer(BaseModel):
"""Answer Model."""
def __init__(self, answer_text, answer_weight, text_after_answers=None, answer_match_left=None, answer_comments=None, margin=None, matching_answer_incorrect_matches=None, approximate=None, start=None, answer_match_right=None, precision=None, numerical_answer_type=None, end=None, blank_id=None, exact=None, id=None):
"""Init method for Answer class."""
self._text_after_answers = text_after_answers
self._answer_match_left = answer_match_left
self._answer_comments = answer_comments
self._margin = margin
self._matching_answer_incorrect_matches = matching_answer_incorrect_matches
self._approximate = approximate
self._start = start
self._answer_text = answer_text
self._answer_weight = answer_weight
self._answer_match_right = answer_match_right
self._precision = precision
self._numerical_answer_type = numerical_answer_type
self._end = end
self._blank_id = blank_id
self._exact = exact
self._id = id
self.logger = logging.getLogger('pycanvas.Answer')
@property
def text_after_answers(self):
"""Used in missing word questions. The text to follow the missing word."""
return self._text_after_answers
@text_after_answers.setter
def text_after_answers(self, value):
"""Setter for text_after_answers property."""
self.logger.warn("Setting values on text_after_answers will NOT update the remote Canvas instance.")
self._text_after_answers = value
@property
def answer_match_left(self):
"""Used in matching questions. The static value of the answer that will be displayed on the left for students to match for."""
return self._answer_match_left
@answer_match_left.setter
def answer_match_left(self, value):
"""Setter for answer_match_left property."""
self.logger.warn("Setting values on answer_match_left will NOT update the remote Canvas instance.")
self._answer_match_left = value
@property
def answer_comments(self):
"""Specific contextual comments for a particular answer."""
return self._answer_comments
@answer_comments.setter
def answer_comments(self, value):
"""Setter for answer_comments property."""
self.logger.warn("Setting values on answer_comments will NOT update the remote Canvas instance.")
self._answer_comments = value
@property
def margin(self):
"""Used in numerical questions of type 'exact_answer'. The margin of error allowed for the student's answer."""
return self._margin
@margin.setter
def margin(self, value):
"""Setter for margin property."""
self.logger.warn("Setting values on margin will NOT update the remote Canvas instance.")
self._margin = value
@property
def matching_answer_incorrect_matches(self):
"""Used in matching questions. A list of distractors, delimited by new lines (
) that will be seeded with all the answer_match_right values."""
return self._matching_answer_incorrect_matches
@matching_answer_incorrect_matches.setter
def matching_answer_incorrect_matches(self, value):
"""Setter for matching_answer_incorrect_matches property."""
self.logger.warn("Setting values on matching_answer_incorrect_matches will NOT update the remote Canvas instance.")
self._matching_answer_incorrect_matches = value
@property
def approximate(self):
"""Used in numerical questions of type 'precision_answer'. The value the answer should equal."""
return self._approximate
@approximate.setter
def approximate(self, value):
"""Setter for approximate property."""
self.logger.warn("Setting values on approximate will NOT update the remote Canvas instance.")
self._approximate = value
@property
def start(self):
"""Used in numerical questions of type 'range_answer'. The start of the allowed range (inclusive)."""
return self._start
@start.setter
def start(self, value):
"""Setter for start property."""
self.logger.warn("Setting values on start will NOT update the remote Canvas instance.")
self._start = value
@property
def answer_text(self):
"""The text of the answer."""
return self._answer_text
@answer_text.setter
def answer_text(self, value):
"""Setter for answer_text property."""
self.logger.warn("Setting values on answer_text will NOT update the remote Canvas instance.")
self._answer_text = value
@property
def answer_weight(self):
"""An integer to determine correctness of the answer. Incorrect answers should be 0, correct answers should be non-negative."""
return self._answer_weight
@answer_weight.setter
def answer_weight(self, value):
"""Setter for answer_weight property."""
self.logger.warn("Setting values on answer_weight will NOT update the remote Canvas instance.")
self._answer_weight = value
@property
def answer_match_right(self):
"""Used in matching questions. The correct match for the value given in answer_match_left. Will be displayed in a dropdown with the other answer_match_right values.."""
return self._answer_match_right
@answer_match_right.setter
def answer_match_right(self, value):
"""Setter for answer_match_right property."""
self.logger.warn("Setting values on answer_match_right will NOT update the remote Canvas instance.")
self._answer_match_right = value
@property
def precision(self):
"""Used in numerical questions of type 'precision_answer'. The numerical precision that will be used when comparing the student's answer."""
return self._precision
@precision.setter
def precision(self, value):
"""Setter for precision property."""
self.logger.warn("Setting values on precision will NOT update the remote Canvas instance.")
self._precision = value
@property
def numerical_answer_type(self):
"""Used in numerical questions. Values can be 'exact_answer', 'range_answer', or 'precision_answer'."""
return self._numerical_answer_type
@numerical_answer_type.setter
def numerical_answer_type(self, value):
"""Setter for numerical_answer_type property."""
self.logger.warn("Setting values on numerical_answer_type will NOT update the remote Canvas instance.")
self._numerical_answer_type = value
@property
def end(self):
"""Used in numerical questions of type 'range_answer'. The end of the allowed range (inclusive)."""
return self._end
@end.setter
def end(self, value):
"""Setter for end property."""
self.logger.warn("Setting values on end will NOT update the remote Canvas instance.")
self._end = value
@property
def blank_id(self):
"""Used in fill in multiple blank and multiple dropdowns questions."""
return self._blank_id
@blank_id.setter
def blank_id(self, value):
"""Setter for blank_id property."""
self.logger.warn("Setting values on blank_id will NOT update the remote Canvas instance.")
self._blank_id = value
@property
def exact(self):
"""Used in numerical questions of type 'exact_answer'. The value the answer should equal."""
return self._exact
@exact.setter
def exact(self, value):
"""Setter for exact property."""
self.logger.warn("Setting values on exact will NOT update the remote Canvas instance.")
self._exact = value
@property
def id(self):
"""The unique identifier for the answer. Do not supply if this answer is part of a new question."""
return self._id
@id.setter
def id(self, value):
"""Setter for id property."""
self.logger.warn("Setting values on id will NOT update the remote Canvas instance.")
self._id = value
class Quizquestion(BaseModel):
"""Quizquestion Model."""
def __init__(self, id, quiz_id, question_text=None, neutral_comments=None, points_possible=None, question_name=None, answers=None, question_type=None, correct_comments=None, incorrect_comments=None, position=None):
"""Init method for Quizquestion class."""
self._question_text = question_text
self._neutral_comments = neutral_comments
self._points_possible = points_possible
self._question_name = question_name
self._answers = answers
self._question_type = question_type
self._correct_comments = correct_comments
self._incorrect_comments = incorrect_comments
self._position = position
self._quiz_id = quiz_id
self._id = id
self.logger = logging.getLogger('pycanvas.Quizquestion')
@property
def question_text(self):
"""The text of the question."""
return self._question_text
@question_text.setter
def question_text(self, value):
"""Setter for question_text property."""
self.logger.warn("Setting values on question_text will NOT update the remote Canvas instance.")
self._question_text = value
@property
def neutral_comments(self):
"""The comments to display regardless of how the student answered."""
return self._neutral_comments
@neutral_comments.setter
def neutral_comments(self, value):
"""Setter for neutral_comments property."""
self.logger.warn("Setting values on neutral_comments will NOT update the remote Canvas instance.")
self._neutral_comments = value
@property
def points_possible(self):
"""The maximum amount of points possible received for getting this question correct."""
return self._points_possible
@points_possible.setter
def points_possible(self, value):
"""Setter for points_possible property."""
self.logger.warn("Setting values on points_possible will NOT update the remote Canvas instance.")
self._points_possible = value
@property
def question_name(self):
"""The name of the question."""
return self._question_name
@question_name.setter
def question_name(self, value):
"""Setter for question_name property."""
self.logger.warn("Setting values on question_name will NOT update the remote Canvas instance.")
self._question_name = value
@property
def answers(self):
"""An array of available answers to display to the student."""
return self._answers
@answers.setter
def answers(self, value):
"""Setter for answers property."""
self.logger.warn("Setting values on answers will NOT update the remote Canvas instance.")
self._answers = value
@property
def question_type(self):
"""The type of the question."""
return self._question_type
@question_type.setter
def question_type(self, value):
"""Setter for question_type property."""
self.logger.warn("Setting values on question_type will NOT update the remote Canvas instance.")
self._question_type = value
@property
def correct_comments(self):
"""The comments to display if the student answers the question correctly."""
return self._correct_comments
@correct_comments.setter
def correct_comments(self, value):
"""Setter for correct_comments property."""
self.logger.warn("Setting values on correct_comments will NOT update the remote Canvas instance.")
self._correct_comments = value
@property
def incorrect_comments(self):
"""The comments to display if the student answers incorrectly."""
return self._incorrect_comments
@incorrect_comments.setter
def incorrect_comments(self, value):
"""Setter for incorrect_comments property."""
self.logger.warn("Setting values on incorrect_comments will NOT update the remote Canvas instance.")
self._incorrect_comments = value
@property
def position(self):
"""The order in which the question will be retrieved and displayed."""
return self._position
@position.setter
def position(self, value):
"""Setter for position property."""
self.logger.warn("Setting values on position will NOT update the remote Canvas instance.")
self._position = value
@property
def quiz_id(self):
"""The ID of the Quiz the question belongs to."""
return self._quiz_id
@quiz_id.setter
def quiz_id(self, value):
"""Setter for quiz_id property."""
self.logger.warn("Setting values on quiz_id will NOT update the remote Canvas instance.")
self._quiz_id = value
@property
def id(self):
"""The ID of the quiz question."""
return self._id
@id.setter
def id(self, value):
"""Setter for id property."""
self.logger.warn("Setting values on id will NOT update the remote Canvas instance.")
self._id = value
| mit |
75651/kbengine_cloud | kbe/src/lib/python/Lib/test/test_ipaddress.py | 72 | 74848 | # Copyright 2007 Google Inc.
# Licensed to PSF under a Contributor Agreement.
"""Unittest for ipaddress module."""
import unittest
import re
import contextlib
import operator
import ipaddress
class BaseTestCase(unittest.TestCase):
# One big change in ipaddress over the original ipaddr module is
# error reporting that tries to assume users *don't know the rules*
# for what constitutes an RFC compliant IP address
# Ensuring these errors are emitted correctly in all relevant cases
# meant moving to a more systematic test structure that allows the
# test structure to map more directly to the module structure
# Note that if the constructors are refactored so that addresses with
# multiple problems get classified differently, that's OK - just
# move the affected examples to the newly appropriate test case.
# There is some duplication between the original relatively ad hoc
# test suite and the new systematic tests. While some redundancy in
# testing is considered preferable to accidentally deleting a valid
# test, the original test suite will likely be reduced over time as
# redundant tests are identified.
@property
def factory(self):
raise NotImplementedError
@contextlib.contextmanager
def assertCleanError(self, exc_type, details, *args):
"""
Ensure exception does not display a context by default
Wraps unittest.TestCase.assertRaisesRegex
"""
if args:
details = details % args
cm = self.assertRaisesRegex(exc_type, details)
with cm as exc:
yield exc
# Ensure we produce clean tracebacks on failure
if exc.exception.__context__ is not None:
self.assertTrue(exc.exception.__suppress_context__)
def assertAddressError(self, details, *args):
"""Ensure a clean AddressValueError"""
return self.assertCleanError(ipaddress.AddressValueError,
details, *args)
def assertNetmaskError(self, details, *args):
"""Ensure a clean NetmaskValueError"""
return self.assertCleanError(ipaddress.NetmaskValueError,
details, *args)
def assertInstancesEqual(self, lhs, rhs):
"""Check constructor arguments produce equivalent instances"""
self.assertEqual(self.factory(lhs), self.factory(rhs))
class CommonTestMixin:
def test_empty_address(self):
with self.assertAddressError("Address cannot be empty"):
self.factory("")
def test_floats_rejected(self):
with self.assertAddressError(re.escape(repr("1.0"))):
self.factory(1.0)
def test_not_an_index_issue15559(self):
# Implementing __index__ makes for a very nasty interaction with the
# bytes constructor. Thus, we disallow implicit use as an integer
self.assertRaises(TypeError, operator.index, self.factory(1))
self.assertRaises(TypeError, hex, self.factory(1))
self.assertRaises(TypeError, bytes, self.factory(1))
class CommonTestMixin_v4(CommonTestMixin):
def test_leading_zeros(self):
self.assertInstancesEqual("000.000.000.000", "0.0.0.0")
self.assertInstancesEqual("192.168.000.001", "192.168.0.1")
def test_int(self):
self.assertInstancesEqual(0, "0.0.0.0")
self.assertInstancesEqual(3232235521, "192.168.0.1")
def test_packed(self):
self.assertInstancesEqual(bytes.fromhex("00000000"), "0.0.0.0")
self.assertInstancesEqual(bytes.fromhex("c0a80001"), "192.168.0.1")
def test_negative_ints_rejected(self):
msg = "-1 (< 0) is not permitted as an IPv4 address"
with self.assertAddressError(re.escape(msg)):
self.factory(-1)
def test_large_ints_rejected(self):
msg = "%d (>= 2**32) is not permitted as an IPv4 address"
with self.assertAddressError(re.escape(msg % 2**32)):
self.factory(2**32)
def test_bad_packed_length(self):
def assertBadLength(length):
addr = bytes(length)
msg = "%r (len %d != 4) is not permitted as an IPv4 address"
with self.assertAddressError(re.escape(msg % (addr, length))):
self.factory(addr)
assertBadLength(3)
assertBadLength(5)
class CommonTestMixin_v6(CommonTestMixin):
def test_leading_zeros(self):
self.assertInstancesEqual("0000::0000", "::")
self.assertInstancesEqual("000::c0a8:0001", "::c0a8:1")
def test_int(self):
self.assertInstancesEqual(0, "::")
self.assertInstancesEqual(3232235521, "::c0a8:1")
def test_packed(self):
addr = bytes(12) + bytes.fromhex("00000000")
self.assertInstancesEqual(addr, "::")
addr = bytes(12) + bytes.fromhex("c0a80001")
self.assertInstancesEqual(addr, "::c0a8:1")
addr = bytes.fromhex("c0a80001") + bytes(12)
self.assertInstancesEqual(addr, "c0a8:1::")
def test_negative_ints_rejected(self):
msg = "-1 (< 0) is not permitted as an IPv6 address"
with self.assertAddressError(re.escape(msg)):
self.factory(-1)
def test_large_ints_rejected(self):
msg = "%d (>= 2**128) is not permitted as an IPv6 address"
with self.assertAddressError(re.escape(msg % 2**128)):
self.factory(2**128)
def test_bad_packed_length(self):
def assertBadLength(length):
addr = bytes(length)
msg = "%r (len %d != 16) is not permitted as an IPv6 address"
with self.assertAddressError(re.escape(msg % (addr, length))):
self.factory(addr)
self.factory(addr)
assertBadLength(15)
assertBadLength(17)
class AddressTestCase_v4(BaseTestCase, CommonTestMixin_v4):
factory = ipaddress.IPv4Address
def test_network_passed_as_address(self):
addr = "127.0.0.1/24"
with self.assertAddressError("Unexpected '/' in %r", addr):
ipaddress.IPv4Address(addr)
def test_bad_address_split(self):
def assertBadSplit(addr):
with self.assertAddressError("Expected 4 octets in %r", addr):
ipaddress.IPv4Address(addr)
assertBadSplit("127.0.1")
assertBadSplit("42.42.42.42.42")
assertBadSplit("42.42.42")
assertBadSplit("42.42")
assertBadSplit("42")
assertBadSplit("42..42.42.42")
assertBadSplit("42.42.42.42.")
assertBadSplit("42.42.42.42...")
assertBadSplit(".42.42.42.42")
assertBadSplit("...42.42.42.42")
assertBadSplit("016.016.016")
assertBadSplit("016.016")
assertBadSplit("016")
assertBadSplit("000")
assertBadSplit("0x0a.0x0a.0x0a")
assertBadSplit("0x0a.0x0a")
assertBadSplit("0x0a")
assertBadSplit(".")
assertBadSplit("bogus")
assertBadSplit("bogus.com")
assertBadSplit("1000")
assertBadSplit("1000000000000000")
assertBadSplit("192.168.0.1.com")
def test_empty_octet(self):
def assertBadOctet(addr):
with self.assertAddressError("Empty octet not permitted in %r",
addr):
ipaddress.IPv4Address(addr)
assertBadOctet("42..42.42")
assertBadOctet("...")
def test_invalid_characters(self):
def assertBadOctet(addr, octet):
msg = "Only decimal digits permitted in %r in %r" % (octet, addr)
with self.assertAddressError(re.escape(msg)):
ipaddress.IPv4Address(addr)
assertBadOctet("0x0a.0x0a.0x0a.0x0a", "0x0a")
assertBadOctet("0xa.0x0a.0x0a.0x0a", "0xa")
assertBadOctet("42.42.42.-0", "-0")
assertBadOctet("42.42.42.+0", "+0")
assertBadOctet("42.42.42.-42", "-42")
assertBadOctet("+1.+2.+3.4", "+1")
assertBadOctet("1.2.3.4e0", "4e0")
assertBadOctet("1.2.3.4::", "4::")
assertBadOctet("1.a.2.3", "a")
def test_octal_decimal_ambiguity(self):
def assertBadOctet(addr, octet):
msg = "Ambiguous (octal/decimal) value in %r not permitted in %r"
with self.assertAddressError(re.escape(msg % (octet, addr))):
ipaddress.IPv4Address(addr)
assertBadOctet("016.016.016.016", "016")
assertBadOctet("001.000.008.016", "008")
def test_octet_length(self):
def assertBadOctet(addr, octet):
msg = "At most 3 characters permitted in %r in %r"
with self.assertAddressError(re.escape(msg % (octet, addr))):
ipaddress.IPv4Address(addr)
assertBadOctet("0000.000.000.000", "0000")
assertBadOctet("12345.67899.-54321.-98765", "12345")
def test_octet_limit(self):
def assertBadOctet(addr, octet):
msg = "Octet %d (> 255) not permitted in %r" % (octet, addr)
with self.assertAddressError(re.escape(msg)):
ipaddress.IPv4Address(addr)
assertBadOctet("257.0.0.0", 257)
assertBadOctet("192.168.0.999", 999)
class AddressTestCase_v6(BaseTestCase, CommonTestMixin_v6):
factory = ipaddress.IPv6Address
def test_network_passed_as_address(self):
addr = "::1/24"
with self.assertAddressError("Unexpected '/' in %r", addr):
ipaddress.IPv6Address(addr)
def test_bad_address_split_v6_not_enough_parts(self):
def assertBadSplit(addr):
msg = "At least 3 parts expected in %r"
with self.assertAddressError(msg, addr):
ipaddress.IPv6Address(addr)
assertBadSplit(":")
assertBadSplit(":1")
assertBadSplit("FEDC:9878")
def test_bad_address_split_v6_too_many_colons(self):
def assertBadSplit(addr):
msg = "At most 8 colons permitted in %r"
with self.assertAddressError(msg, addr):
ipaddress.IPv6Address(addr)
assertBadSplit("9:8:7:6:5:4:3::2:1")
assertBadSplit("10:9:8:7:6:5:4:3:2:1")
assertBadSplit("::8:7:6:5:4:3:2:1")
assertBadSplit("8:7:6:5:4:3:2:1::")
# A trailing IPv4 address is two parts
assertBadSplit("10:9:8:7:6:5:4:3:42.42.42.42")
def test_bad_address_split_v6_too_many_parts(self):
def assertBadSplit(addr):
msg = "Exactly 8 parts expected without '::' in %r"
with self.assertAddressError(msg, addr):
ipaddress.IPv6Address(addr)
assertBadSplit("3ffe:0:0:0:0:0:0:0:1")
assertBadSplit("9:8:7:6:5:4:3:2:1")
assertBadSplit("7:6:5:4:3:2:1")
# A trailing IPv4 address is two parts
assertBadSplit("9:8:7:6:5:4:3:42.42.42.42")
assertBadSplit("7:6:5:4:3:42.42.42.42")
def test_bad_address_split_v6_too_many_parts_with_double_colon(self):
def assertBadSplit(addr):
msg = "Expected at most 7 other parts with '::' in %r"
with self.assertAddressError(msg, addr):
ipaddress.IPv6Address(addr)
assertBadSplit("1:2:3:4::5:6:7:8")
def test_bad_address_split_v6_repeated_double_colon(self):
def assertBadSplit(addr):
msg = "At most one '::' permitted in %r"
with self.assertAddressError(msg, addr):
ipaddress.IPv6Address(addr)
assertBadSplit("3ffe::1::1")
assertBadSplit("1::2::3::4:5")
assertBadSplit("2001::db:::1")
assertBadSplit("3ffe::1::")
assertBadSplit("::3ffe::1")
assertBadSplit(":3ffe::1::1")
assertBadSplit("3ffe::1::1:")
assertBadSplit(":3ffe::1::1:")
assertBadSplit(":::")
assertBadSplit('2001:db8:::1')
def test_bad_address_split_v6_leading_colon(self):
def assertBadSplit(addr):
msg = "Leading ':' only permitted as part of '::' in %r"
with self.assertAddressError(msg, addr):
ipaddress.IPv6Address(addr)
assertBadSplit(":2001:db8::1")
assertBadSplit(":1:2:3:4:5:6:7")
assertBadSplit(":1:2:3:4:5:6:")
assertBadSplit(":6:5:4:3:2:1::")
def test_bad_address_split_v6_trailing_colon(self):
def assertBadSplit(addr):
msg = "Trailing ':' only permitted as part of '::' in %r"
with self.assertAddressError(msg, addr):
ipaddress.IPv6Address(addr)
assertBadSplit("2001:db8::1:")
assertBadSplit("1:2:3:4:5:6:7:")
assertBadSplit("::1.2.3.4:")
assertBadSplit("::7:6:5:4:3:2:")
def test_bad_v4_part_in(self):
def assertBadAddressPart(addr, v4_error):
with self.assertAddressError("%s in %r", v4_error, addr):
ipaddress.IPv6Address(addr)
assertBadAddressPart("3ffe::1.net", "Expected 4 octets in '1.net'")
assertBadAddressPart("3ffe::127.0.1",
"Expected 4 octets in '127.0.1'")
assertBadAddressPart("::1.2.3",
"Expected 4 octets in '1.2.3'")
assertBadAddressPart("::1.2.3.4.5",
"Expected 4 octets in '1.2.3.4.5'")
assertBadAddressPart("3ffe::1.1.1.net",
"Only decimal digits permitted in 'net' "
"in '1.1.1.net'")
def test_invalid_characters(self):
def assertBadPart(addr, part):
msg = "Only hex digits permitted in %r in %r" % (part, addr)
with self.assertAddressError(re.escape(msg)):
ipaddress.IPv6Address(addr)
assertBadPart("3ffe::goog", "goog")
assertBadPart("3ffe::-0", "-0")
assertBadPart("3ffe::+0", "+0")
assertBadPart("3ffe::-1", "-1")
assertBadPart("1.2.3.4::", "1.2.3.4")
assertBadPart('1234:axy::b', "axy")
def test_part_length(self):
def assertBadPart(addr, part):
msg = "At most 4 characters permitted in %r in %r"
with self.assertAddressError(msg, part, addr):
ipaddress.IPv6Address(addr)
assertBadPart("::00000", "00000")
assertBadPart("3ffe::10000", "10000")
assertBadPart("02001:db8::", "02001")
assertBadPart('2001:888888::1', "888888")
class NetmaskTestMixin_v4(CommonTestMixin_v4):
"""Input validation on interfaces and networks is very similar"""
def test_split_netmask(self):
addr = "1.2.3.4/32/24"
with self.assertAddressError("Only one '/' permitted in %r" % addr):
self.factory(addr)
def test_address_errors(self):
def assertBadAddress(addr, details):
with self.assertAddressError(details):
self.factory(addr)
assertBadAddress("/", "Address cannot be empty")
assertBadAddress("/8", "Address cannot be empty")
assertBadAddress("bogus", "Expected 4 octets")
assertBadAddress("google.com", "Expected 4 octets")
assertBadAddress("10/8", "Expected 4 octets")
assertBadAddress("::1.2.3.4", "Only decimal digits")
assertBadAddress("1.2.3.256", re.escape("256 (> 255)"))
def test_valid_netmask(self):
self.assertEqual(str(self.factory('192.0.2.0/255.255.255.0')),
'192.0.2.0/24')
for i in range(0, 33):
# Generate and re-parse the CIDR format (trivial).
net_str = '0.0.0.0/%d' % i
net = self.factory(net_str)
self.assertEqual(str(net), net_str)
# Generate and re-parse the expanded netmask.
self.assertEqual(
str(self.factory('0.0.0.0/%s' % net.netmask)), net_str)
# Zero prefix is treated as decimal.
self.assertEqual(str(self.factory('0.0.0.0/0%d' % i)), net_str)
# Generate and re-parse the expanded hostmask. The ambiguous
# cases (/0 and /32) are treated as netmasks.
if i in (32, 0):
net_str = '0.0.0.0/%d' % (32 - i)
self.assertEqual(
str(self.factory('0.0.0.0/%s' % net.hostmask)), net_str)
def test_netmask_errors(self):
def assertBadNetmask(addr, netmask):
msg = "%r is not a valid netmask" % netmask
with self.assertNetmaskError(re.escape(msg)):
self.factory("%s/%s" % (addr, netmask))
assertBadNetmask("1.2.3.4", "")
assertBadNetmask("1.2.3.4", "-1")
assertBadNetmask("1.2.3.4", "+1")
assertBadNetmask("1.2.3.4", " 1 ")
assertBadNetmask("1.2.3.4", "0x1")
assertBadNetmask("1.2.3.4", "33")
assertBadNetmask("1.2.3.4", "254.254.255.256")
assertBadNetmask("1.2.3.4", "1.a.2.3")
assertBadNetmask("1.1.1.1", "254.xyz.2.3")
assertBadNetmask("1.1.1.1", "240.255.0.0")
assertBadNetmask("1.1.1.1", "255.254.128.0")
assertBadNetmask("1.1.1.1", "0.1.127.255")
assertBadNetmask("1.1.1.1", "pudding")
assertBadNetmask("1.1.1.1", "::")
class InterfaceTestCase_v4(BaseTestCase, NetmaskTestMixin_v4):
factory = ipaddress.IPv4Interface
class NetworkTestCase_v4(BaseTestCase, NetmaskTestMixin_v4):
factory = ipaddress.IPv4Network
class NetmaskTestMixin_v6(CommonTestMixin_v6):
"""Input validation on interfaces and networks is very similar"""
def test_split_netmask(self):
addr = "cafe:cafe::/128/190"
with self.assertAddressError("Only one '/' permitted in %r" % addr):
self.factory(addr)
def test_address_errors(self):
def assertBadAddress(addr, details):
with self.assertAddressError(details):
self.factory(addr)
assertBadAddress("/", "Address cannot be empty")
assertBadAddress("/8", "Address cannot be empty")
assertBadAddress("google.com", "At least 3 parts")
assertBadAddress("1.2.3.4", "At least 3 parts")
assertBadAddress("10/8", "At least 3 parts")
assertBadAddress("1234:axy::b", "Only hex digits")
def test_valid_netmask(self):
# We only support CIDR for IPv6, because expanded netmasks are not
# standard notation.
self.assertEqual(str(self.factory('2001:db8::/32')), '2001:db8::/32')
for i in range(0, 129):
# Generate and re-parse the CIDR format (trivial).
net_str = '::/%d' % i
self.assertEqual(str(self.factory(net_str)), net_str)
# Zero prefix is treated as decimal.
self.assertEqual(str(self.factory('::/0%d' % i)), net_str)
def test_netmask_errors(self):
def assertBadNetmask(addr, netmask):
msg = "%r is not a valid netmask" % netmask
with self.assertNetmaskError(re.escape(msg)):
self.factory("%s/%s" % (addr, netmask))
assertBadNetmask("::1", "")
assertBadNetmask("::1", "::1")
assertBadNetmask("::1", "1::")
assertBadNetmask("::1", "-1")
assertBadNetmask("::1", "+1")
assertBadNetmask("::1", " 1 ")
assertBadNetmask("::1", "0x1")
assertBadNetmask("::1", "129")
assertBadNetmask("::1", "1.2.3.4")
assertBadNetmask("::1", "pudding")
assertBadNetmask("::", "::")
class InterfaceTestCase_v6(BaseTestCase, NetmaskTestMixin_v6):
factory = ipaddress.IPv6Interface
class NetworkTestCase_v6(BaseTestCase, NetmaskTestMixin_v6):
factory = ipaddress.IPv6Network
class FactoryFunctionErrors(BaseTestCase):
def assertFactoryError(self, factory, kind):
"""Ensure a clean ValueError with the expected message"""
addr = "camelot"
msg = '%r does not appear to be an IPv4 or IPv6 %s'
with self.assertCleanError(ValueError, msg, addr, kind):
factory(addr)
def test_ip_address(self):
self.assertFactoryError(ipaddress.ip_address, "address")
def test_ip_interface(self):
self.assertFactoryError(ipaddress.ip_interface, "interface")
def test_ip_network(self):
self.assertFactoryError(ipaddress.ip_network, "network")
class ComparisonTests(unittest.TestCase):
v4addr = ipaddress.IPv4Address(1)
v4net = ipaddress.IPv4Network(1)
v4intf = ipaddress.IPv4Interface(1)
v6addr = ipaddress.IPv6Address(1)
v6net = ipaddress.IPv6Network(1)
v6intf = ipaddress.IPv6Interface(1)
v4_addresses = [v4addr, v4intf]
v4_objects = v4_addresses + [v4net]
v6_addresses = [v6addr, v6intf]
v6_objects = v6_addresses + [v6net]
objects = v4_objects + v6_objects
def test_foreign_type_equality(self):
# __eq__ should never raise TypeError directly
other = object()
for obj in self.objects:
self.assertNotEqual(obj, other)
self.assertFalse(obj == other)
self.assertEqual(obj.__eq__(other), NotImplemented)
self.assertEqual(obj.__ne__(other), NotImplemented)
def test_mixed_type_equality(self):
# Ensure none of the internal objects accidentally
# expose the right set of attributes to become "equal"
for lhs in self.objects:
for rhs in self.objects:
if lhs is rhs:
continue
self.assertNotEqual(lhs, rhs)
def test_containment(self):
for obj in self.v4_addresses:
self.assertIn(obj, self.v4net)
for obj in self.v6_addresses:
self.assertIn(obj, self.v6net)
for obj in self.v4_objects + [self.v6net]:
self.assertNotIn(obj, self.v6net)
for obj in self.v6_objects + [self.v4net]:
self.assertNotIn(obj, self.v4net)
def test_mixed_type_ordering(self):
for lhs in self.objects:
for rhs in self.objects:
if isinstance(lhs, type(rhs)) or isinstance(rhs, type(lhs)):
continue
self.assertRaises(TypeError, lambda: lhs < rhs)
self.assertRaises(TypeError, lambda: lhs > rhs)
self.assertRaises(TypeError, lambda: lhs <= rhs)
self.assertRaises(TypeError, lambda: lhs >= rhs)
def test_mixed_type_key(self):
# with get_mixed_type_key, you can sort addresses and network.
v4_ordered = [self.v4addr, self.v4net, self.v4intf]
v6_ordered = [self.v6addr, self.v6net, self.v6intf]
self.assertEqual(v4_ordered,
sorted(self.v4_objects,
key=ipaddress.get_mixed_type_key))
self.assertEqual(v6_ordered,
sorted(self.v6_objects,
key=ipaddress.get_mixed_type_key))
self.assertEqual(v4_ordered + v6_ordered,
sorted(self.objects,
key=ipaddress.get_mixed_type_key))
self.assertEqual(NotImplemented, ipaddress.get_mixed_type_key(object))
def test_incompatible_versions(self):
# These should always raise TypeError
v4addr = ipaddress.ip_address('1.1.1.1')
v4net = ipaddress.ip_network('1.1.1.1')
v6addr = ipaddress.ip_address('::1')
v6net = ipaddress.ip_address('::1')
self.assertRaises(TypeError, v4addr.__lt__, v6addr)
self.assertRaises(TypeError, v4addr.__gt__, v6addr)
self.assertRaises(TypeError, v4net.__lt__, v6net)
self.assertRaises(TypeError, v4net.__gt__, v6net)
self.assertRaises(TypeError, v6addr.__lt__, v4addr)
self.assertRaises(TypeError, v6addr.__gt__, v4addr)
self.assertRaises(TypeError, v6net.__lt__, v4net)
self.assertRaises(TypeError, v6net.__gt__, v4net)
class IpaddrUnitTest(unittest.TestCase):
def setUp(self):
self.ipv4_address = ipaddress.IPv4Address('1.2.3.4')
self.ipv4_interface = ipaddress.IPv4Interface('1.2.3.4/24')
self.ipv4_network = ipaddress.IPv4Network('1.2.3.0/24')
#self.ipv4_hostmask = ipaddress.IPv4Interface('10.0.0.1/0.255.255.255')
self.ipv6_address = ipaddress.IPv6Interface(
'2001:658:22a:cafe:200:0:0:1')
self.ipv6_interface = ipaddress.IPv6Interface(
'2001:658:22a:cafe:200:0:0:1/64')
self.ipv6_network = ipaddress.IPv6Network('2001:658:22a:cafe::/64')
def testRepr(self):
self.assertEqual("IPv4Interface('1.2.3.4/32')",
repr(ipaddress.IPv4Interface('1.2.3.4')))
self.assertEqual("IPv6Interface('::1/128')",
repr(ipaddress.IPv6Interface('::1')))
# issue57
def testAddressIntMath(self):
self.assertEqual(ipaddress.IPv4Address('1.1.1.1') + 255,
ipaddress.IPv4Address('1.1.2.0'))
self.assertEqual(ipaddress.IPv4Address('1.1.1.1') - 256,
ipaddress.IPv4Address('1.1.0.1'))
self.assertEqual(ipaddress.IPv6Address('::1') + (2**16 - 2),
ipaddress.IPv6Address('::ffff'))
self.assertEqual(ipaddress.IPv6Address('::ffff') - (2**16 - 2),
ipaddress.IPv6Address('::1'))
def testInvalidIntToBytes(self):
self.assertRaises(ValueError, ipaddress.v4_int_to_packed, -1)
self.assertRaises(ValueError, ipaddress.v4_int_to_packed,
2 ** ipaddress.IPV4LENGTH)
self.assertRaises(ValueError, ipaddress.v6_int_to_packed, -1)
self.assertRaises(ValueError, ipaddress.v6_int_to_packed,
2 ** ipaddress.IPV6LENGTH)
def testInternals(self):
first, last = ipaddress._find_address_range([
ipaddress.IPv4Address('10.10.10.10'),
ipaddress.IPv4Address('10.10.10.12')])
self.assertEqual(first, last)
self.assertEqual(128, ipaddress._count_righthand_zero_bits(0, 128))
self.assertEqual("IPv4Network('1.2.3.0/24')", repr(self.ipv4_network))
def testMissingAddressVersion(self):
class Broken(ipaddress._BaseAddress):
pass
broken = Broken('127.0.0.1')
with self.assertRaisesRegex(NotImplementedError, "Broken.*version"):
broken.version
def testMissingNetworkVersion(self):
class Broken(ipaddress._BaseNetwork):
pass
broken = Broken('127.0.0.1')
with self.assertRaisesRegex(NotImplementedError, "Broken.*version"):
broken.version
def testMissingAddressClass(self):
class Broken(ipaddress._BaseNetwork):
pass
broken = Broken('127.0.0.1')
with self.assertRaisesRegex(NotImplementedError, "Broken.*address"):
broken._address_class
def testGetNetwork(self):
self.assertEqual(int(self.ipv4_network.network_address), 16909056)
self.assertEqual(str(self.ipv4_network.network_address), '1.2.3.0')
self.assertEqual(int(self.ipv6_network.network_address),
42540616829182469433403647294022090752)
self.assertEqual(str(self.ipv6_network.network_address),
'2001:658:22a:cafe::')
self.assertEqual(str(self.ipv6_network.hostmask),
'::ffff:ffff:ffff:ffff')
def testIpFromInt(self):
self.assertEqual(self.ipv4_interface._ip,
ipaddress.IPv4Interface(16909060)._ip)
ipv4 = ipaddress.ip_network('1.2.3.4')
ipv6 = ipaddress.ip_network('2001:658:22a:cafe:200:0:0:1')
self.assertEqual(ipv4, ipaddress.ip_network(int(ipv4.network_address)))
self.assertEqual(ipv6, ipaddress.ip_network(int(ipv6.network_address)))
v6_int = 42540616829182469433547762482097946625
self.assertEqual(self.ipv6_interface._ip,
ipaddress.IPv6Interface(v6_int)._ip)
self.assertEqual(ipaddress.ip_network(self.ipv4_address._ip).version,
4)
self.assertEqual(ipaddress.ip_network(self.ipv6_address._ip).version,
6)
def testIpFromPacked(self):
address = ipaddress.ip_address
self.assertEqual(self.ipv4_interface._ip,
ipaddress.ip_interface(b'\x01\x02\x03\x04')._ip)
self.assertEqual(address('255.254.253.252'),
address(b'\xff\xfe\xfd\xfc'))
self.assertEqual(self.ipv6_interface.ip,
ipaddress.ip_interface(
b'\x20\x01\x06\x58\x02\x2a\xca\xfe'
b'\x02\x00\x00\x00\x00\x00\x00\x01').ip)
self.assertEqual(address('ffff:2:3:4:ffff::'),
address(b'\xff\xff\x00\x02\x00\x03\x00\x04' +
b'\xff\xff' + b'\x00' * 6))
self.assertEqual(address('::'),
address(b'\x00' * 16))
def testGetIp(self):
self.assertEqual(int(self.ipv4_interface.ip), 16909060)
self.assertEqual(str(self.ipv4_interface.ip), '1.2.3.4')
self.assertEqual(int(self.ipv6_interface.ip),
42540616829182469433547762482097946625)
self.assertEqual(str(self.ipv6_interface.ip),
'2001:658:22a:cafe:200::1')
def testGetNetmask(self):
self.assertEqual(int(self.ipv4_network.netmask), 4294967040)
self.assertEqual(str(self.ipv4_network.netmask), '255.255.255.0')
self.assertEqual(int(self.ipv6_network.netmask),
340282366920938463444927863358058659840)
self.assertEqual(self.ipv6_network.prefixlen, 64)
def testZeroNetmask(self):
ipv4_zero_netmask = ipaddress.IPv4Interface('1.2.3.4/0')
self.assertEqual(int(ipv4_zero_netmask.network.netmask), 0)
self.assertEqual(ipv4_zero_netmask._prefix_from_prefix_string('0'), 0)
self.assertTrue(ipv4_zero_netmask._is_valid_netmask('0'))
self.assertTrue(ipv4_zero_netmask._is_valid_netmask('0.0.0.0'))
self.assertFalse(ipv4_zero_netmask._is_valid_netmask('invalid'))
ipv6_zero_netmask = ipaddress.IPv6Interface('::1/0')
self.assertEqual(int(ipv6_zero_netmask.network.netmask), 0)
self.assertEqual(ipv6_zero_netmask._prefix_from_prefix_string('0'), 0)
def testIPv4NetAndHostmasks(self):
net = self.ipv4_network
self.assertFalse(net._is_valid_netmask('invalid'))
self.assertTrue(net._is_valid_netmask('128.128.128.128'))
self.assertFalse(net._is_valid_netmask('128.128.128.127'))
self.assertFalse(net._is_valid_netmask('128.128.128.255'))
self.assertTrue(net._is_valid_netmask('255.128.128.128'))
self.assertFalse(net._is_hostmask('invalid'))
self.assertTrue(net._is_hostmask('128.255.255.255'))
self.assertFalse(net._is_hostmask('255.255.255.255'))
self.assertFalse(net._is_hostmask('1.2.3.4'))
net = ipaddress.IPv4Network('127.0.0.0/0.0.0.255')
self.assertEqual(net.prefixlen, 24)
def testGetBroadcast(self):
self.assertEqual(int(self.ipv4_network.broadcast_address), 16909311)
self.assertEqual(str(self.ipv4_network.broadcast_address), '1.2.3.255')
self.assertEqual(int(self.ipv6_network.broadcast_address),
42540616829182469451850391367731642367)
self.assertEqual(str(self.ipv6_network.broadcast_address),
'2001:658:22a:cafe:ffff:ffff:ffff:ffff')
def testGetPrefixlen(self):
self.assertEqual(self.ipv4_interface.network.prefixlen, 24)
self.assertEqual(self.ipv6_interface.network.prefixlen, 64)
def testGetSupernet(self):
self.assertEqual(self.ipv4_network.supernet().prefixlen, 23)
self.assertEqual(str(self.ipv4_network.supernet().network_address),
'1.2.2.0')
self.assertEqual(
ipaddress.IPv4Interface('0.0.0.0/0').network.supernet(),
ipaddress.IPv4Network('0.0.0.0/0'))
self.assertEqual(self.ipv6_network.supernet().prefixlen, 63)
self.assertEqual(str(self.ipv6_network.supernet().network_address),
'2001:658:22a:cafe::')
self.assertEqual(ipaddress.IPv6Interface('::0/0').network.supernet(),
ipaddress.IPv6Network('::0/0'))
def testGetSupernet3(self):
self.assertEqual(self.ipv4_network.supernet(3).prefixlen, 21)
self.assertEqual(str(self.ipv4_network.supernet(3).network_address),
'1.2.0.0')
self.assertEqual(self.ipv6_network.supernet(3).prefixlen, 61)
self.assertEqual(str(self.ipv6_network.supernet(3).network_address),
'2001:658:22a:caf8::')
def testGetSupernet4(self):
self.assertRaises(ValueError, self.ipv4_network.supernet,
prefixlen_diff=2, new_prefix=1)
self.assertRaises(ValueError, self.ipv4_network.supernet,
new_prefix=25)
self.assertEqual(self.ipv4_network.supernet(prefixlen_diff=2),
self.ipv4_network.supernet(new_prefix=22))
self.assertRaises(ValueError, self.ipv6_network.supernet,
prefixlen_diff=2, new_prefix=1)
self.assertRaises(ValueError, self.ipv6_network.supernet,
new_prefix=65)
self.assertEqual(self.ipv6_network.supernet(prefixlen_diff=2),
self.ipv6_network.supernet(new_prefix=62))
def testHosts(self):
hosts = list(self.ipv4_network.hosts())
self.assertEqual(254, len(hosts))
self.assertEqual(ipaddress.IPv4Address('1.2.3.1'), hosts[0])
self.assertEqual(ipaddress.IPv4Address('1.2.3.254'), hosts[-1])
# special case where only 1 bit is left for address
self.assertEqual([ipaddress.IPv4Address('2.0.0.0'),
ipaddress.IPv4Address('2.0.0.1')],
list(ipaddress.ip_network('2.0.0.0/31').hosts()))
def testFancySubnetting(self):
self.assertEqual(sorted(self.ipv4_network.subnets(prefixlen_diff=3)),
sorted(self.ipv4_network.subnets(new_prefix=27)))
self.assertRaises(ValueError, list,
self.ipv4_network.subnets(new_prefix=23))
self.assertRaises(ValueError, list,
self.ipv4_network.subnets(prefixlen_diff=3,
new_prefix=27))
self.assertEqual(sorted(self.ipv6_network.subnets(prefixlen_diff=4)),
sorted(self.ipv6_network.subnets(new_prefix=68)))
self.assertRaises(ValueError, list,
self.ipv6_network.subnets(new_prefix=63))
self.assertRaises(ValueError, list,
self.ipv6_network.subnets(prefixlen_diff=4,
new_prefix=68))
def testGetSubnets(self):
self.assertEqual(list(self.ipv4_network.subnets())[0].prefixlen, 25)
self.assertEqual(str(list(
self.ipv4_network.subnets())[0].network_address),
'1.2.3.0')
self.assertEqual(str(list(
self.ipv4_network.subnets())[1].network_address),
'1.2.3.128')
self.assertEqual(list(self.ipv6_network.subnets())[0].prefixlen, 65)
def testGetSubnetForSingle32(self):
ip = ipaddress.IPv4Network('1.2.3.4/32')
subnets1 = [str(x) for x in ip.subnets()]
subnets2 = [str(x) for x in ip.subnets(2)]
self.assertEqual(subnets1, ['1.2.3.4/32'])
self.assertEqual(subnets1, subnets2)
def testGetSubnetForSingle128(self):
ip = ipaddress.IPv6Network('::1/128')
subnets1 = [str(x) for x in ip.subnets()]
subnets2 = [str(x) for x in ip.subnets(2)]
self.assertEqual(subnets1, ['::1/128'])
self.assertEqual(subnets1, subnets2)
def testSubnet2(self):
ips = [str(x) for x in self.ipv4_network.subnets(2)]
self.assertEqual(
ips,
['1.2.3.0/26', '1.2.3.64/26', '1.2.3.128/26', '1.2.3.192/26'])
ipsv6 = [str(x) for x in self.ipv6_network.subnets(2)]
self.assertEqual(
ipsv6,
['2001:658:22a:cafe::/66',
'2001:658:22a:cafe:4000::/66',
'2001:658:22a:cafe:8000::/66',
'2001:658:22a:cafe:c000::/66'])
def testSubnetFailsForLargeCidrDiff(self):
self.assertRaises(ValueError, list,
self.ipv4_interface.network.subnets(9))
self.assertRaises(ValueError, list,
self.ipv4_network.subnets(9))
self.assertRaises(ValueError, list,
self.ipv6_interface.network.subnets(65))
self.assertRaises(ValueError, list,
self.ipv6_network.subnets(65))
def testSupernetFailsForLargeCidrDiff(self):
self.assertRaises(ValueError,
self.ipv4_interface.network.supernet, 25)
self.assertRaises(ValueError,
self.ipv6_interface.network.supernet, 65)
def testSubnetFailsForNegativeCidrDiff(self):
self.assertRaises(ValueError, list,
self.ipv4_interface.network.subnets(-1))
self.assertRaises(ValueError, list,
self.ipv4_network.subnets(-1))
self.assertRaises(ValueError, list,
self.ipv6_interface.network.subnets(-1))
self.assertRaises(ValueError, list,
self.ipv6_network.subnets(-1))
def testGetNum_Addresses(self):
self.assertEqual(self.ipv4_network.num_addresses, 256)
self.assertEqual(list(self.ipv4_network.subnets())[0].num_addresses,
128)
self.assertEqual(self.ipv4_network.supernet().num_addresses, 512)
self.assertEqual(self.ipv6_network.num_addresses, 18446744073709551616)
self.assertEqual(list(self.ipv6_network.subnets())[0].num_addresses,
9223372036854775808)
self.assertEqual(self.ipv6_network.supernet().num_addresses,
36893488147419103232)
def testContains(self):
self.assertIn(ipaddress.IPv4Interface('1.2.3.128/25'),
self.ipv4_network)
self.assertNotIn(ipaddress.IPv4Interface('1.2.4.1/24'),
self.ipv4_network)
# We can test addresses and string as well.
addr1 = ipaddress.IPv4Address('1.2.3.37')
self.assertIn(addr1, self.ipv4_network)
# issue 61, bad network comparison on like-ip'd network objects
# with identical broadcast addresses.
self.assertFalse(ipaddress.IPv4Network('1.1.0.0/16').__contains__(
ipaddress.IPv4Network('1.0.0.0/15')))
def testNth(self):
self.assertEqual(str(self.ipv4_network[5]), '1.2.3.5')
self.assertRaises(IndexError, self.ipv4_network.__getitem__, 256)
self.assertEqual(str(self.ipv6_network[5]),
'2001:658:22a:cafe::5')
def testGetitem(self):
# http://code.google.com/p/ipaddr-py/issues/detail?id=15
addr = ipaddress.IPv4Network('172.31.255.128/255.255.255.240')
self.assertEqual(28, addr.prefixlen)
addr_list = list(addr)
self.assertEqual('172.31.255.128', str(addr_list[0]))
self.assertEqual('172.31.255.128', str(addr[0]))
self.assertEqual('172.31.255.143', str(addr_list[-1]))
self.assertEqual('172.31.255.143', str(addr[-1]))
self.assertEqual(addr_list[-1], addr[-1])
def testEqual(self):
self.assertTrue(self.ipv4_interface ==
ipaddress.IPv4Interface('1.2.3.4/24'))
self.assertFalse(self.ipv4_interface ==
ipaddress.IPv4Interface('1.2.3.4/23'))
self.assertFalse(self.ipv4_interface ==
ipaddress.IPv6Interface('::1.2.3.4/24'))
self.assertFalse(self.ipv4_interface == '')
self.assertFalse(self.ipv4_interface == [])
self.assertFalse(self.ipv4_interface == 2)
self.assertTrue(self.ipv6_interface ==
ipaddress.IPv6Interface('2001:658:22a:cafe:200::1/64'))
self.assertFalse(self.ipv6_interface ==
ipaddress.IPv6Interface('2001:658:22a:cafe:200::1/63'))
self.assertFalse(self.ipv6_interface ==
ipaddress.IPv4Interface('1.2.3.4/23'))
self.assertFalse(self.ipv6_interface == '')
self.assertFalse(self.ipv6_interface == [])
self.assertFalse(self.ipv6_interface == 2)
def testNotEqual(self):
self.assertFalse(self.ipv4_interface !=
ipaddress.IPv4Interface('1.2.3.4/24'))
self.assertTrue(self.ipv4_interface !=
ipaddress.IPv4Interface('1.2.3.4/23'))
self.assertTrue(self.ipv4_interface !=
ipaddress.IPv6Interface('::1.2.3.4/24'))
self.assertTrue(self.ipv4_interface != '')
self.assertTrue(self.ipv4_interface != [])
self.assertTrue(self.ipv4_interface != 2)
self.assertTrue(self.ipv4_address !=
ipaddress.IPv4Address('1.2.3.5'))
self.assertTrue(self.ipv4_address != '')
self.assertTrue(self.ipv4_address != [])
self.assertTrue(self.ipv4_address != 2)
self.assertFalse(self.ipv6_interface !=
ipaddress.IPv6Interface('2001:658:22a:cafe:200::1/64'))
self.assertTrue(self.ipv6_interface !=
ipaddress.IPv6Interface('2001:658:22a:cafe:200::1/63'))
self.assertTrue(self.ipv6_interface !=
ipaddress.IPv4Interface('1.2.3.4/23'))
self.assertTrue(self.ipv6_interface != '')
self.assertTrue(self.ipv6_interface != [])
self.assertTrue(self.ipv6_interface != 2)
self.assertTrue(self.ipv6_address !=
ipaddress.IPv4Address('1.2.3.4'))
self.assertTrue(self.ipv6_address != '')
self.assertTrue(self.ipv6_address != [])
self.assertTrue(self.ipv6_address != 2)
def testSlash32Constructor(self):
self.assertEqual(str(ipaddress.IPv4Interface(
'1.2.3.4/255.255.255.255')), '1.2.3.4/32')
def testSlash128Constructor(self):
self.assertEqual(str(ipaddress.IPv6Interface('::1/128')),
'::1/128')
def testSlash0Constructor(self):
self.assertEqual(str(ipaddress.IPv4Interface('1.2.3.4/0.0.0.0')),
'1.2.3.4/0')
def testCollapsing(self):
# test only IP addresses including some duplicates
ip1 = ipaddress.IPv4Address('1.1.1.0')
ip2 = ipaddress.IPv4Address('1.1.1.1')
ip3 = ipaddress.IPv4Address('1.1.1.2')
ip4 = ipaddress.IPv4Address('1.1.1.3')
ip5 = ipaddress.IPv4Address('1.1.1.4')
ip6 = ipaddress.IPv4Address('1.1.1.0')
# check that addreses are subsumed properly.
collapsed = ipaddress.collapse_addresses(
[ip1, ip2, ip3, ip4, ip5, ip6])
self.assertEqual(list(collapsed),
[ipaddress.IPv4Network('1.1.1.0/30'),
ipaddress.IPv4Network('1.1.1.4/32')])
# test a mix of IP addresses and networks including some duplicates
ip1 = ipaddress.IPv4Address('1.1.1.0')
ip2 = ipaddress.IPv4Address('1.1.1.1')
ip3 = ipaddress.IPv4Address('1.1.1.2')
ip4 = ipaddress.IPv4Address('1.1.1.3')
#ip5 = ipaddress.IPv4Interface('1.1.1.4/30')
#ip6 = ipaddress.IPv4Interface('1.1.1.4/30')
# check that addreses are subsumed properly.
collapsed = ipaddress.collapse_addresses([ip1, ip2, ip3, ip4])
self.assertEqual(list(collapsed),
[ipaddress.IPv4Network('1.1.1.0/30')])
# test only IP networks
ip1 = ipaddress.IPv4Network('1.1.0.0/24')
ip2 = ipaddress.IPv4Network('1.1.1.0/24')
ip3 = ipaddress.IPv4Network('1.1.2.0/24')
ip4 = ipaddress.IPv4Network('1.1.3.0/24')
ip5 = ipaddress.IPv4Network('1.1.4.0/24')
# stored in no particular order b/c we want CollapseAddr to call
# [].sort
ip6 = ipaddress.IPv4Network('1.1.0.0/22')
# check that addreses are subsumed properly.
collapsed = ipaddress.collapse_addresses([ip1, ip2, ip3, ip4, ip5,
ip6])
self.assertEqual(list(collapsed),
[ipaddress.IPv4Network('1.1.0.0/22'),
ipaddress.IPv4Network('1.1.4.0/24')])
# test that two addresses are supernet'ed properly
collapsed = ipaddress.collapse_addresses([ip1, ip2])
self.assertEqual(list(collapsed),
[ipaddress.IPv4Network('1.1.0.0/23')])
# test same IP networks
ip_same1 = ip_same2 = ipaddress.IPv4Network('1.1.1.1/32')
self.assertEqual(list(ipaddress.collapse_addresses(
[ip_same1, ip_same2])),
[ip_same1])
# test same IP addresses
ip_same1 = ip_same2 = ipaddress.IPv4Address('1.1.1.1')
self.assertEqual(list(ipaddress.collapse_addresses(
[ip_same1, ip_same2])),
[ipaddress.ip_network('1.1.1.1/32')])
ip1 = ipaddress.IPv6Network('2001::/100')
ip2 = ipaddress.IPv6Network('2001::/120')
ip3 = ipaddress.IPv6Network('2001::/96')
# test that ipv6 addresses are subsumed properly.
collapsed = ipaddress.collapse_addresses([ip1, ip2, ip3])
self.assertEqual(list(collapsed), [ip3])
# the toejam test
addr_tuples = [
(ipaddress.ip_address('1.1.1.1'),
ipaddress.ip_address('::1')),
(ipaddress.IPv4Network('1.1.0.0/24'),
ipaddress.IPv6Network('2001::/120')),
(ipaddress.IPv4Network('1.1.0.0/32'),
ipaddress.IPv6Network('2001::/128')),
]
for ip1, ip2 in addr_tuples:
self.assertRaises(TypeError, ipaddress.collapse_addresses,
[ip1, ip2])
def testSummarizing(self):
#ip = ipaddress.ip_address
#ipnet = ipaddress.ip_network
summarize = ipaddress.summarize_address_range
ip1 = ipaddress.ip_address('1.1.1.0')
ip2 = ipaddress.ip_address('1.1.1.255')
# summarize works only for IPv4 & IPv6
class IPv7Address(ipaddress.IPv6Address):
@property
def version(self):
return 7
ip_invalid1 = IPv7Address('::1')
ip_invalid2 = IPv7Address('::1')
self.assertRaises(ValueError, list,
summarize(ip_invalid1, ip_invalid2))
# test that a summary over ip4 & ip6 fails
self.assertRaises(TypeError, list,
summarize(ip1, ipaddress.IPv6Address('::1')))
# test a /24 is summarized properly
self.assertEqual(list(summarize(ip1, ip2))[0],
ipaddress.ip_network('1.1.1.0/24'))
# test an IPv4 range that isn't on a network byte boundary
ip2 = ipaddress.ip_address('1.1.1.8')
self.assertEqual(list(summarize(ip1, ip2)),
[ipaddress.ip_network('1.1.1.0/29'),
ipaddress.ip_network('1.1.1.8')])
# all!
ip1 = ipaddress.IPv4Address(0)
ip2 = ipaddress.IPv4Address(ipaddress.IPv4Address._ALL_ONES)
self.assertEqual([ipaddress.IPv4Network('0.0.0.0/0')],
list(summarize(ip1, ip2)))
ip1 = ipaddress.ip_address('1::')
ip2 = ipaddress.ip_address('1:ffff:ffff:ffff:ffff:ffff:ffff:ffff')
# test a IPv6 is sumamrized properly
self.assertEqual(list(summarize(ip1, ip2))[0],
ipaddress.ip_network('1::/16'))
# test an IPv6 range that isn't on a network byte boundary
ip2 = ipaddress.ip_address('2::')
self.assertEqual(list(summarize(ip1, ip2)),
[ipaddress.ip_network('1::/16'),
ipaddress.ip_network('2::/128')])
# test exception raised when first is greater than last
self.assertRaises(ValueError, list,
summarize(ipaddress.ip_address('1.1.1.0'),
ipaddress.ip_address('1.1.0.0')))
# test exception raised when first and last aren't IP addresses
self.assertRaises(TypeError, list,
summarize(ipaddress.ip_network('1.1.1.0'),
ipaddress.ip_network('1.1.0.0')))
self.assertRaises(TypeError, list,
summarize(ipaddress.ip_network('1.1.1.0'),
ipaddress.ip_network('1.1.0.0')))
# test exception raised when first and last are not same version
self.assertRaises(TypeError, list,
summarize(ipaddress.ip_address('::'),
ipaddress.ip_network('1.1.0.0')))
def testAddressComparison(self):
self.assertTrue(ipaddress.ip_address('1.1.1.1') <=
ipaddress.ip_address('1.1.1.1'))
self.assertTrue(ipaddress.ip_address('1.1.1.1') <=
ipaddress.ip_address('1.1.1.2'))
self.assertTrue(ipaddress.ip_address('::1') <=
ipaddress.ip_address('::1'))
self.assertTrue(ipaddress.ip_address('::1') <=
ipaddress.ip_address('::2'))
def testInterfaceComparison(self):
self.assertTrue(ipaddress.ip_interface('1.1.1.1') <=
ipaddress.ip_interface('1.1.1.1'))
self.assertTrue(ipaddress.ip_interface('1.1.1.1') <=
ipaddress.ip_interface('1.1.1.2'))
self.assertTrue(ipaddress.ip_interface('::1') <=
ipaddress.ip_interface('::1'))
self.assertTrue(ipaddress.ip_interface('::1') <=
ipaddress.ip_interface('::2'))
def testNetworkComparison(self):
# ip1 and ip2 have the same network address
ip1 = ipaddress.IPv4Network('1.1.1.0/24')
ip2 = ipaddress.IPv4Network('1.1.1.0/32')
ip3 = ipaddress.IPv4Network('1.1.2.0/24')
self.assertTrue(ip1 < ip3)
self.assertTrue(ip3 > ip2)
self.assertEqual(ip1.compare_networks(ip1), 0)
# if addresses are the same, sort by netmask
self.assertEqual(ip1.compare_networks(ip2), -1)
self.assertEqual(ip2.compare_networks(ip1), 1)
self.assertEqual(ip1.compare_networks(ip3), -1)
self.assertEqual(ip3.compare_networks(ip1), 1)
self.assertTrue(ip1._get_networks_key() < ip3._get_networks_key())
ip1 = ipaddress.IPv6Network('2001:2000::/96')
ip2 = ipaddress.IPv6Network('2001:2001::/96')
ip3 = ipaddress.IPv6Network('2001:ffff:2000::/96')
self.assertTrue(ip1 < ip3)
self.assertTrue(ip3 > ip2)
self.assertEqual(ip1.compare_networks(ip3), -1)
self.assertTrue(ip1._get_networks_key() < ip3._get_networks_key())
# Test comparing different protocols.
# Should always raise a TypeError.
self.assertRaises(TypeError,
self.ipv4_network.compare_networks,
self.ipv6_network)
ipv6 = ipaddress.IPv6Interface('::/0')
ipv4 = ipaddress.IPv4Interface('0.0.0.0/0')
self.assertRaises(TypeError, ipv4.__lt__, ipv6)
self.assertRaises(TypeError, ipv4.__gt__, ipv6)
self.assertRaises(TypeError, ipv6.__lt__, ipv4)
self.assertRaises(TypeError, ipv6.__gt__, ipv4)
# Regression test for issue 19.
ip1 = ipaddress.ip_network('10.1.2.128/25')
self.assertFalse(ip1 < ip1)
self.assertFalse(ip1 > ip1)
ip2 = ipaddress.ip_network('10.1.3.0/24')
self.assertTrue(ip1 < ip2)
self.assertFalse(ip2 < ip1)
self.assertFalse(ip1 > ip2)
self.assertTrue(ip2 > ip1)
ip3 = ipaddress.ip_network('10.1.3.0/25')
self.assertTrue(ip2 < ip3)
self.assertFalse(ip3 < ip2)
self.assertFalse(ip2 > ip3)
self.assertTrue(ip3 > ip2)
# Regression test for issue 28.
ip1 = ipaddress.ip_network('10.10.10.0/31')
ip2 = ipaddress.ip_network('10.10.10.0')
ip3 = ipaddress.ip_network('10.10.10.2/31')
ip4 = ipaddress.ip_network('10.10.10.2')
sorted = [ip1, ip2, ip3, ip4]
unsorted = [ip2, ip4, ip1, ip3]
unsorted.sort()
self.assertEqual(sorted, unsorted)
unsorted = [ip4, ip1, ip3, ip2]
unsorted.sort()
self.assertEqual(sorted, unsorted)
self.assertRaises(TypeError, ip1.__lt__,
ipaddress.ip_address('10.10.10.0'))
self.assertRaises(TypeError, ip2.__lt__,
ipaddress.ip_address('10.10.10.0'))
# <=, >=
self.assertTrue(ipaddress.ip_network('1.1.1.1') <=
ipaddress.ip_network('1.1.1.1'))
self.assertTrue(ipaddress.ip_network('1.1.1.1') <=
ipaddress.ip_network('1.1.1.2'))
self.assertFalse(ipaddress.ip_network('1.1.1.2') <=
ipaddress.ip_network('1.1.1.1'))
self.assertTrue(ipaddress.ip_network('::1') <=
ipaddress.ip_network('::1'))
self.assertTrue(ipaddress.ip_network('::1') <=
ipaddress.ip_network('::2'))
self.assertFalse(ipaddress.ip_network('::2') <=
ipaddress.ip_network('::1'))
def testStrictNetworks(self):
self.assertRaises(ValueError, ipaddress.ip_network, '192.168.1.1/24')
self.assertRaises(ValueError, ipaddress.ip_network, '::1/120')
def testOverlaps(self):
other = ipaddress.IPv4Network('1.2.3.0/30')
other2 = ipaddress.IPv4Network('1.2.2.0/24')
other3 = ipaddress.IPv4Network('1.2.2.64/26')
self.assertTrue(self.ipv4_network.overlaps(other))
self.assertFalse(self.ipv4_network.overlaps(other2))
self.assertTrue(other2.overlaps(other3))
def testEmbeddedIpv4(self):
ipv4_string = '192.168.0.1'
ipv4 = ipaddress.IPv4Interface(ipv4_string)
v4compat_ipv6 = ipaddress.IPv6Interface('::%s' % ipv4_string)
self.assertEqual(int(v4compat_ipv6.ip), int(ipv4.ip))
v4mapped_ipv6 = ipaddress.IPv6Interface('::ffff:%s' % ipv4_string)
self.assertNotEqual(v4mapped_ipv6.ip, ipv4.ip)
self.assertRaises(ipaddress.AddressValueError, ipaddress.IPv6Interface,
'2001:1.1.1.1:1.1.1.1')
# Issue 67: IPv6 with embedded IPv4 address not recognized.
def testIPv6AddressTooLarge(self):
# RFC4291 2.5.5.2
self.assertEqual(ipaddress.ip_address('::FFFF:192.0.2.1'),
ipaddress.ip_address('::FFFF:c000:201'))
# RFC4291 2.2 (part 3) x::d.d.d.d
self.assertEqual(ipaddress.ip_address('FFFF::192.0.2.1'),
ipaddress.ip_address('FFFF::c000:201'))
def testIPVersion(self):
self.assertEqual(self.ipv4_address.version, 4)
self.assertEqual(self.ipv6_address.version, 6)
def testMaxPrefixLength(self):
self.assertEqual(self.ipv4_interface.max_prefixlen, 32)
self.assertEqual(self.ipv6_interface.max_prefixlen, 128)
def testPacked(self):
self.assertEqual(self.ipv4_address.packed,
b'\x01\x02\x03\x04')
self.assertEqual(ipaddress.IPv4Interface('255.254.253.252').packed,
b'\xff\xfe\xfd\xfc')
self.assertEqual(self.ipv6_address.packed,
b'\x20\x01\x06\x58\x02\x2a\xca\xfe'
b'\x02\x00\x00\x00\x00\x00\x00\x01')
self.assertEqual(ipaddress.IPv6Interface('ffff:2:3:4:ffff::').packed,
b'\xff\xff\x00\x02\x00\x03\x00\x04\xff\xff'
+ b'\x00' * 6)
self.assertEqual(ipaddress.IPv6Interface('::1:0:0:0:0').packed,
b'\x00' * 6 + b'\x00\x01' + b'\x00' * 8)
def testIpType(self):
ipv4net = ipaddress.ip_network('1.2.3.4')
ipv4addr = ipaddress.ip_address('1.2.3.4')
ipv6net = ipaddress.ip_network('::1.2.3.4')
ipv6addr = ipaddress.ip_address('::1.2.3.4')
self.assertEqual(ipaddress.IPv4Network, type(ipv4net))
self.assertEqual(ipaddress.IPv4Address, type(ipv4addr))
self.assertEqual(ipaddress.IPv6Network, type(ipv6net))
self.assertEqual(ipaddress.IPv6Address, type(ipv6addr))
def testReservedIpv4(self):
# test networks
self.assertEqual(True, ipaddress.ip_interface(
'224.1.1.1/31').is_multicast)
self.assertEqual(False, ipaddress.ip_network('240.0.0.0').is_multicast)
self.assertEqual(True, ipaddress.ip_network('240.0.0.0').is_reserved)
self.assertEqual(True, ipaddress.ip_interface(
'192.168.1.1/17').is_private)
self.assertEqual(False, ipaddress.ip_network('192.169.0.0').is_private)
self.assertEqual(True, ipaddress.ip_network(
'10.255.255.255').is_private)
self.assertEqual(False, ipaddress.ip_network('11.0.0.0').is_private)
self.assertEqual(False, ipaddress.ip_network('11.0.0.0').is_reserved)
self.assertEqual(True, ipaddress.ip_network(
'172.31.255.255').is_private)
self.assertEqual(False, ipaddress.ip_network('172.32.0.0').is_private)
self.assertEqual(True,
ipaddress.ip_network('169.254.1.0/24').is_link_local)
self.assertEqual(True,
ipaddress.ip_interface(
'169.254.100.200/24').is_link_local)
self.assertEqual(False,
ipaddress.ip_interface(
'169.255.100.200/24').is_link_local)
self.assertEqual(True,
ipaddress.ip_network(
'127.100.200.254/32').is_loopback)
self.assertEqual(True, ipaddress.ip_network(
'127.42.0.0/16').is_loopback)
self.assertEqual(False, ipaddress.ip_network('128.0.0.0').is_loopback)
self.assertEqual(False,
ipaddress.ip_network('100.64.0.0/10').is_private)
self.assertEqual(False, ipaddress.ip_network('100.64.0.0/10').is_global)
self.assertEqual(True,
ipaddress.ip_network('192.0.2.128/25').is_private)
self.assertEqual(True,
ipaddress.ip_network('192.0.3.0/24').is_global)
# test addresses
self.assertEqual(True, ipaddress.ip_address('0.0.0.0').is_unspecified)
self.assertEqual(True, ipaddress.ip_address('224.1.1.1').is_multicast)
self.assertEqual(False, ipaddress.ip_address('240.0.0.0').is_multicast)
self.assertEqual(True, ipaddress.ip_address('240.0.0.1').is_reserved)
self.assertEqual(False,
ipaddress.ip_address('239.255.255.255').is_reserved)
self.assertEqual(True, ipaddress.ip_address('192.168.1.1').is_private)
self.assertEqual(False, ipaddress.ip_address('192.169.0.0').is_private)
self.assertEqual(True, ipaddress.ip_address(
'10.255.255.255').is_private)
self.assertEqual(False, ipaddress.ip_address('11.0.0.0').is_private)
self.assertEqual(True, ipaddress.ip_address(
'172.31.255.255').is_private)
self.assertEqual(False, ipaddress.ip_address('172.32.0.0').is_private)
self.assertEqual(True,
ipaddress.ip_address('169.254.100.200').is_link_local)
self.assertEqual(False,
ipaddress.ip_address('169.255.100.200').is_link_local)
self.assertEqual(True,
ipaddress.ip_address('127.100.200.254').is_loopback)
self.assertEqual(True, ipaddress.ip_address('127.42.0.0').is_loopback)
self.assertEqual(False, ipaddress.ip_address('128.0.0.0').is_loopback)
self.assertEqual(True, ipaddress.ip_network('0.0.0.0').is_unspecified)
def testReservedIpv6(self):
self.assertEqual(True, ipaddress.ip_network('ffff::').is_multicast)
self.assertEqual(True, ipaddress.ip_network(2**128 - 1).is_multicast)
self.assertEqual(True, ipaddress.ip_network('ff00::').is_multicast)
self.assertEqual(False, ipaddress.ip_network('fdff::').is_multicast)
self.assertEqual(True, ipaddress.ip_network('fecf::').is_site_local)
self.assertEqual(True, ipaddress.ip_network(
'feff:ffff:ffff:ffff::').is_site_local)
self.assertEqual(False, ipaddress.ip_network(
'fbf:ffff::').is_site_local)
self.assertEqual(False, ipaddress.ip_network('ff00::').is_site_local)
self.assertEqual(True, ipaddress.ip_network('fc00::').is_private)
self.assertEqual(True, ipaddress.ip_network(
'fc00:ffff:ffff:ffff::').is_private)
self.assertEqual(False, ipaddress.ip_network('fbff:ffff::').is_private)
self.assertEqual(False, ipaddress.ip_network('fe00::').is_private)
self.assertEqual(True, ipaddress.ip_network('fea0::').is_link_local)
self.assertEqual(True, ipaddress.ip_network(
'febf:ffff::').is_link_local)
self.assertEqual(False, ipaddress.ip_network(
'fe7f:ffff::').is_link_local)
self.assertEqual(False, ipaddress.ip_network('fec0::').is_link_local)
self.assertEqual(True, ipaddress.ip_interface('0:0::0:01').is_loopback)
self.assertEqual(False, ipaddress.ip_interface('::1/127').is_loopback)
self.assertEqual(False, ipaddress.ip_network('::').is_loopback)
self.assertEqual(False, ipaddress.ip_network('::2').is_loopback)
self.assertEqual(True, ipaddress.ip_network('0::0').is_unspecified)
self.assertEqual(False, ipaddress.ip_network('::1').is_unspecified)
self.assertEqual(False, ipaddress.ip_network('::/127').is_unspecified)
self.assertEqual(True,
ipaddress.ip_network('2001::1/128').is_private)
self.assertEqual(True,
ipaddress.ip_network('200::1/128').is_global)
# test addresses
self.assertEqual(True, ipaddress.ip_address('ffff::').is_multicast)
self.assertEqual(True, ipaddress.ip_address(2**128 - 1).is_multicast)
self.assertEqual(True, ipaddress.ip_address('ff00::').is_multicast)
self.assertEqual(False, ipaddress.ip_address('fdff::').is_multicast)
self.assertEqual(True, ipaddress.ip_address('fecf::').is_site_local)
self.assertEqual(True, ipaddress.ip_address(
'feff:ffff:ffff:ffff::').is_site_local)
self.assertEqual(False, ipaddress.ip_address(
'fbf:ffff::').is_site_local)
self.assertEqual(False, ipaddress.ip_address('ff00::').is_site_local)
self.assertEqual(True, ipaddress.ip_address('fc00::').is_private)
self.assertEqual(True, ipaddress.ip_address(
'fc00:ffff:ffff:ffff::').is_private)
self.assertEqual(False, ipaddress.ip_address('fbff:ffff::').is_private)
self.assertEqual(False, ipaddress.ip_address('fe00::').is_private)
self.assertEqual(True, ipaddress.ip_address('fea0::').is_link_local)
self.assertEqual(True, ipaddress.ip_address(
'febf:ffff::').is_link_local)
self.assertEqual(False, ipaddress.ip_address(
'fe7f:ffff::').is_link_local)
self.assertEqual(False, ipaddress.ip_address('fec0::').is_link_local)
self.assertEqual(True, ipaddress.ip_address('0:0::0:01').is_loopback)
self.assertEqual(True, ipaddress.ip_address('::1').is_loopback)
self.assertEqual(False, ipaddress.ip_address('::2').is_loopback)
self.assertEqual(True, ipaddress.ip_address('0::0').is_unspecified)
self.assertEqual(False, ipaddress.ip_address('::1').is_unspecified)
# some generic IETF reserved addresses
self.assertEqual(True, ipaddress.ip_address('100::').is_reserved)
self.assertEqual(True, ipaddress.ip_network('4000::1/128').is_reserved)
def testIpv4Mapped(self):
self.assertEqual(
ipaddress.ip_address('::ffff:192.168.1.1').ipv4_mapped,
ipaddress.ip_address('192.168.1.1'))
self.assertEqual(ipaddress.ip_address('::c0a8:101').ipv4_mapped, None)
self.assertEqual(ipaddress.ip_address('::ffff:c0a8:101').ipv4_mapped,
ipaddress.ip_address('192.168.1.1'))
def testAddrExclude(self):
addr1 = ipaddress.ip_network('10.1.1.0/24')
addr2 = ipaddress.ip_network('10.1.1.0/26')
addr3 = ipaddress.ip_network('10.2.1.0/24')
addr4 = ipaddress.ip_address('10.1.1.0')
addr5 = ipaddress.ip_network('2001:db8::0/32')
self.assertEqual(sorted(list(addr1.address_exclude(addr2))),
[ipaddress.ip_network('10.1.1.64/26'),
ipaddress.ip_network('10.1.1.128/25')])
self.assertRaises(ValueError, list, addr1.address_exclude(addr3))
self.assertRaises(TypeError, list, addr1.address_exclude(addr4))
self.assertRaises(TypeError, list, addr1.address_exclude(addr5))
self.assertEqual(list(addr1.address_exclude(addr1)), [])
def testHash(self):
self.assertEqual(hash(ipaddress.ip_interface('10.1.1.0/24')),
hash(ipaddress.ip_interface('10.1.1.0/24')))
self.assertEqual(hash(ipaddress.ip_network('10.1.1.0/24')),
hash(ipaddress.ip_network('10.1.1.0/24')))
self.assertEqual(hash(ipaddress.ip_address('10.1.1.0')),
hash(ipaddress.ip_address('10.1.1.0')))
# i70
self.assertEqual(hash(ipaddress.ip_address('1.2.3.4')),
hash(ipaddress.ip_address(
int(ipaddress.ip_address('1.2.3.4')._ip))))
ip1 = ipaddress.ip_address('10.1.1.0')
ip2 = ipaddress.ip_address('1::')
dummy = {}
dummy[self.ipv4_address] = None
dummy[self.ipv6_address] = None
dummy[ip1] = None
dummy[ip2] = None
self.assertIn(self.ipv4_address, dummy)
self.assertIn(ip2, dummy)
def testIPBases(self):
net = self.ipv4_network
self.assertEqual('1.2.3.0/24', net.compressed)
net = self.ipv6_network
self.assertRaises(ValueError, net._string_from_ip_int, 2**128 + 1)
def testIPv6NetworkHelpers(self):
net = self.ipv6_network
self.assertEqual('2001:658:22a:cafe::/64', net.with_prefixlen)
self.assertEqual('2001:658:22a:cafe::/ffff:ffff:ffff:ffff::',
net.with_netmask)
self.assertEqual('2001:658:22a:cafe::/::ffff:ffff:ffff:ffff',
net.with_hostmask)
self.assertEqual('2001:658:22a:cafe::/64', str(net))
def testIPv4NetworkHelpers(self):
net = self.ipv4_network
self.assertEqual('1.2.3.0/24', net.with_prefixlen)
self.assertEqual('1.2.3.0/255.255.255.0', net.with_netmask)
self.assertEqual('1.2.3.0/0.0.0.255', net.with_hostmask)
self.assertEqual('1.2.3.0/24', str(net))
def testCopyConstructor(self):
addr1 = ipaddress.ip_network('10.1.1.0/24')
addr2 = ipaddress.ip_network(addr1)
addr3 = ipaddress.ip_interface('2001:658:22a:cafe:200::1/64')
addr4 = ipaddress.ip_interface(addr3)
addr5 = ipaddress.IPv4Address('1.1.1.1')
addr6 = ipaddress.IPv6Address('2001:658:22a:cafe:200::1')
self.assertEqual(addr1, addr2)
self.assertEqual(addr3, addr4)
self.assertEqual(addr5, ipaddress.IPv4Address(addr5))
self.assertEqual(addr6, ipaddress.IPv6Address(addr6))
def testCompressIPv6Address(self):
test_addresses = {
'1:2:3:4:5:6:7:8': '1:2:3:4:5:6:7:8/128',
'2001:0:0:4:0:0:0:8': '2001:0:0:4::8/128',
'2001:0:0:4:5:6:7:8': '2001::4:5:6:7:8/128',
'2001:0:3:4:5:6:7:8': '2001:0:3:4:5:6:7:8/128',
'2001:0:3:4:5:6:7:8': '2001:0:3:4:5:6:7:8/128',
'0:0:3:0:0:0:0:ffff': '0:0:3::ffff/128',
'0:0:0:4:0:0:0:ffff': '::4:0:0:0:ffff/128',
'0:0:0:0:5:0:0:ffff': '::5:0:0:ffff/128',
'1:0:0:4:0:0:7:8': '1::4:0:0:7:8/128',
'0:0:0:0:0:0:0:0': '::/128',
'0:0:0:0:0:0:0:0/0': '::/0',
'0:0:0:0:0:0:0:1': '::1/128',
'2001:0658:022a:cafe:0000:0000:0000:0000/66':
'2001:658:22a:cafe::/66',
'::1.2.3.4': '::102:304/128',
'1:2:3:4:5:ffff:1.2.3.4': '1:2:3:4:5:ffff:102:304/128',
'::7:6:5:4:3:2:1': '0:7:6:5:4:3:2:1/128',
'::7:6:5:4:3:2:0': '0:7:6:5:4:3:2:0/128',
'7:6:5:4:3:2:1::': '7:6:5:4:3:2:1:0/128',
'0:6:5:4:3:2:1::': '0:6:5:4:3:2:1:0/128',
}
for uncompressed, compressed in list(test_addresses.items()):
self.assertEqual(compressed, str(ipaddress.IPv6Interface(
uncompressed)))
def testExplodeShortHandIpStr(self):
addr1 = ipaddress.IPv6Interface('2001::1')
addr2 = ipaddress.IPv6Address('2001:0:5ef5:79fd:0:59d:a0e5:ba1')
addr3 = ipaddress.IPv6Network('2001::/96')
addr4 = ipaddress.IPv4Address('192.168.178.1')
self.assertEqual('2001:0000:0000:0000:0000:0000:0000:0001/128',
addr1.exploded)
self.assertEqual('0000:0000:0000:0000:0000:0000:0000:0001/128',
ipaddress.IPv6Interface('::1/128').exploded)
# issue 77
self.assertEqual('2001:0000:5ef5:79fd:0000:059d:a0e5:0ba1',
addr2.exploded)
self.assertEqual('2001:0000:0000:0000:0000:0000:0000:0000/96',
addr3.exploded)
self.assertEqual('192.168.178.1', addr4.exploded)
def testIntRepresentation(self):
self.assertEqual(16909060, int(self.ipv4_address))
self.assertEqual(42540616829182469433547762482097946625,
int(self.ipv6_address))
def testForceVersion(self):
self.assertEqual(ipaddress.ip_network(1).version, 4)
self.assertEqual(ipaddress.IPv6Network(1).version, 6)
def testWithStar(self):
self.assertEqual(self.ipv4_interface.with_prefixlen, "1.2.3.4/24")
self.assertEqual(self.ipv4_interface.with_netmask,
"1.2.3.4/255.255.255.0")
self.assertEqual(self.ipv4_interface.with_hostmask,
"1.2.3.4/0.0.0.255")
self.assertEqual(self.ipv6_interface.with_prefixlen,
'2001:658:22a:cafe:200::1/64')
self.assertEqual(self.ipv6_interface.with_netmask,
'2001:658:22a:cafe:200::1/ffff:ffff:ffff:ffff::')
# this probably don't make much sense, but it's included for
# compatibility with ipv4
self.assertEqual(self.ipv6_interface.with_hostmask,
'2001:658:22a:cafe:200::1/::ffff:ffff:ffff:ffff')
def testNetworkElementCaching(self):
# V4 - make sure we're empty
self.assertNotIn('network_address', self.ipv4_network._cache)
self.assertNotIn('broadcast_address', self.ipv4_network._cache)
self.assertNotIn('hostmask', self.ipv4_network._cache)
# V4 - populate and test
self.assertEqual(self.ipv4_network.network_address,
ipaddress.IPv4Address('1.2.3.0'))
self.assertEqual(self.ipv4_network.broadcast_address,
ipaddress.IPv4Address('1.2.3.255'))
self.assertEqual(self.ipv4_network.hostmask,
ipaddress.IPv4Address('0.0.0.255'))
# V4 - check we're cached
self.assertIn('broadcast_address', self.ipv4_network._cache)
self.assertIn('hostmask', self.ipv4_network._cache)
# V6 - make sure we're empty
self.assertNotIn('broadcast_address', self.ipv6_network._cache)
self.assertNotIn('hostmask', self.ipv6_network._cache)
# V6 - populate and test
self.assertEqual(self.ipv6_network.network_address,
ipaddress.IPv6Address('2001:658:22a:cafe::'))
self.assertEqual(self.ipv6_interface.network.network_address,
ipaddress.IPv6Address('2001:658:22a:cafe::'))
self.assertEqual(
self.ipv6_network.broadcast_address,
ipaddress.IPv6Address('2001:658:22a:cafe:ffff:ffff:ffff:ffff'))
self.assertEqual(self.ipv6_network.hostmask,
ipaddress.IPv6Address('::ffff:ffff:ffff:ffff'))
self.assertEqual(
self.ipv6_interface.network.broadcast_address,
ipaddress.IPv6Address('2001:658:22a:cafe:ffff:ffff:ffff:ffff'))
self.assertEqual(self.ipv6_interface.network.hostmask,
ipaddress.IPv6Address('::ffff:ffff:ffff:ffff'))
# V6 - check we're cached
self.assertIn('broadcast_address', self.ipv6_network._cache)
self.assertIn('hostmask', self.ipv6_network._cache)
self.assertIn('broadcast_address', self.ipv6_interface.network._cache)
self.assertIn('hostmask', self.ipv6_interface.network._cache)
def testTeredo(self):
# stolen from wikipedia
server = ipaddress.IPv4Address('65.54.227.120')
client = ipaddress.IPv4Address('192.0.2.45')
teredo_addr = '2001:0000:4136:e378:8000:63bf:3fff:fdd2'
self.assertEqual((server, client),
ipaddress.ip_address(teredo_addr).teredo)
bad_addr = '2000::4136:e378:8000:63bf:3fff:fdd2'
self.assertFalse(ipaddress.ip_address(bad_addr).teredo)
bad_addr = '2001:0001:4136:e378:8000:63bf:3fff:fdd2'
self.assertFalse(ipaddress.ip_address(bad_addr).teredo)
# i77
teredo_addr = ipaddress.IPv6Address('2001:0:5ef5:79fd:0:59d:a0e5:ba1')
self.assertEqual((ipaddress.IPv4Address('94.245.121.253'),
ipaddress.IPv4Address('95.26.244.94')),
teredo_addr.teredo)
def testsixtofour(self):
sixtofouraddr = ipaddress.ip_address('2002:ac1d:2d64::1')
bad_addr = ipaddress.ip_address('2000:ac1d:2d64::1')
self.assertEqual(ipaddress.IPv4Address('172.29.45.100'),
sixtofouraddr.sixtofour)
self.assertFalse(bad_addr.sixtofour)
if __name__ == '__main__':
unittest.main()
| lgpl-3.0 |
liorvh/raspberry_pwn | src/pentest/metagoofil/hachoir_parser/audio/id3.py | 9 | 16092 | """
ID3 metadata parser, supported versions: 1.O, 2.2, 2.3 and 2.4
Informations: http://www.id3.org/
Author: Victor Stinner
"""
from hachoir_core.field import (FieldSet, MatchError, ParserError,
Enum, UInt8, UInt24, UInt32,
CString, String, RawBytes,
Bit, Bits, NullBytes, NullBits)
from hachoir_core.text_handler import textHandler
from hachoir_core.tools import humanDuration
from hachoir_core.endian import NETWORK_ENDIAN
class ID3v1(FieldSet):
static_size = 128 * 8
GENRE_NAME = {
0: u"Blues",
1: u"Classic Rock",
2: u"Country",
3: u"Dance",
4: u"Disco",
5: u"Funk",
6: u"Grunge",
7: u"Hip-Hop",
8: u"Jazz",
9: u"Metal",
10: u"New Age",
11: u"Oldies",
12: u"Other",
13: u"Pop",
14: u"R&B",
15: u"Rap",
16: u"Reggae",
17: u"Rock",
18: u"Techno",
19: u"Industrial",
20: u"Alternative",
21: u"Ska",
22: u"Death Metal",
23: u"Pranks",
24: u"Soundtrack",
25: u"Euro-Techno",
26: u"Ambient",
27: u"Trip-Hop",
28: u"Vocal",
29: u"Jazz+Funk",
30: u"Fusion",
31: u"Trance",
32: u"Classical",
33: u"Instrumental",
34: u"Acid",
35: u"House",
36: u"Game",
37: u"Sound Clip",
38: u"Gospel",
39: u"Noise",
40: u"AlternRock",
41: u"Bass",
42: u"Soul",
43: u"Punk",
44: u"Space",
45: u"Meditative",
46: u"Instrumental Pop",
47: u"Instrumental Rock",
48: u"Ethnic",
49: u"Gothic",
50: u"Darkwave",
51: u"Techno-Industrial",
52: u"Electronic",
53: u"Pop-Folk",
54: u"Eurodance",
55: u"Dream",
56: u"Southern Rock",
57: u"Comedy",
58: u"Cult",
59: u"Gangsta",
60: u"Top 40",
61: u"Christian Rap",
62: u"Pop/Funk",
63: u"Jungle",
64: u"Native American",
65: u"Cabaret",
66: u"New Wave",
67: u"Psychadelic",
68: u"Rave",
69: u"Showtunes",
70: u"Trailer",
71: u"Lo-Fi",
72: u"Tribal",
73: u"Acid Punk",
74: u"Acid Jazz",
75: u"Polka",
76: u"Retro",
77: u"Musical",
78: u"Rock & Roll",
79: u"Hard Rock",
# Following are winamp extentions
80: u"Folk",
81: u"Folk-Rock",
82: u"National Folk",
83: u"Swing",
84: u"Fast Fusion",
85: u"Bebob",
86: u"Latin",
87: u"Revival",
88: u"Celtic",
89: u"Bluegrass",
90: u"Avantgarde",
91: u"Gothic Rock",
92: u"Progressive Rock",
93: u"Psychedelic Rock",
94: u"Symphonic Rock",
95: u"Slow Rock",
96: u"Big Band",
97: u"Chorus",
98: u"Easy Listening",
99: u"Acoustic",
100: u"Humour",
101: u"Speech",
102: u"Chanson",
103: u"Opera",
104: u"Chamber Music",
105: u"Sonata",
106: u"Symphony",
107: u"Booty Bass",
108: u"Primus",
109: u"Porn Groove",
110: u"Satire",
111: u"Slow Jam",
112: u"Club",
113: u"Tango",
114: u"Samba",
115: u"Folklore",
116: u"Ballad",
117: u"Power Ballad",
118: u"Rhythmic Soul",
119: u"Freestyle",
120: u"Duet",
121: u"Punk Rock",
122: u"Drum Solo",
123: u"A capella",
124: u"Euro-House",
125: u"Dance Hall",
126: u"Goa",
127: u"Drum & Bass",
128: u"Club-House",
129: u"Hardcore",
130: u"Terror",
131: u"Indie",
132: u"Britpop",
133: u"Negerpunk",
134: u"Polsk Punk",
135: u"Beat",
136: u"Christian Gangsta Rap",
137: u"Heavy Metal",
138: u"Black Metal",
139: u"Crossover",
140: u"Contemporary Christian",
141: u"Christian Rock ",
142: u"Merengue",
143: u"Salsa",
144: u"Trash Metal",
145: u"Anime",
146: u"JPop",
147: u"Synthpop"
}
def createFields(self):
yield String(self, "signature", 3, "IDv1 signature (\"TAG\")", charset="ASCII")
if self["signature"].value != "TAG":
raise MatchError("Stream doesn't look like ID3v1 (wrong signature)!")
# TODO: Charset of below strings?
yield String(self, "song", 30, "Song title", strip=" \0", charset="ISO-8859-1")
yield String(self, "author", 30, "Author", strip=" \0", charset="ISO-8859-1")
yield String(self, "album", 30, "Album title", strip=" \0", charset="ISO-8859-1")
yield String(self, "year", 4, "Year", strip=" \0", charset="ISO-8859-1")
# TODO: Write better algorithm to guess ID3v1 version
version = self.getVersion()
if version in ("v1.1", "v1.1b"):
if version == "v1.1b":
# ID3 v1.1b
yield String(self, "comment", 29, "Comment", strip=" \0", charset="ISO-8859-1")
yield UInt8(self, "track_nb", "Track number")
else:
# ID3 v1.1
yield String(self, "comment", 30, "Comment", strip=" \0", charset="ISO-8859-1")
yield Enum(UInt8(self, "genre", "Genre"), self.GENRE_NAME)
else:
# ID3 v1.0
yield String(self, "comment", 31, "Comment", strip=" \0", charset="ISO-8859-1")
def getVersion(self):
addr = self.absolute_address + 126*8
bytes = self.stream.readBytes(addr, 2)
# last byte (127) is not space?
if bytes[1] != ' ':
# byte 126 is nul?
if bytes[0] == 0x00:
return "v1.1"
else:
return "v1.1b"
else:
return "1.0"
def createDescription(self):
version = self.getVersion()
return "ID 3%s: author=%s, song=%s" % (
version, self["author"].value, self["song"].value)
def getCharset(field):
try:
key = field.value
return ID3_StringCharset.charset_name[key]
except KeyError:
raise ParserError("ID3v2: Invalid charset (%s)." % key)
class ID3_String(FieldSet):
STRIP = " \0"
def createFields(self):
yield String(self, "text", self._size/8, "Text", charset="ISO-8859-1", strip=self.STRIP)
class ID3_StringCharset(ID3_String):
STRIP = " \0"
charset_desc = {
0: "ISO-8859-1",
1: "UTF-16 with BOM",
2: "UTF-16 (big endian)",
3: "UTF-8"
}
charset_name = {
0: "ISO-8859-1",
1: "UTF-16",
2: "UTF-16-BE",
3: "UTF-8"
}
def createFields(self):
yield Enum(UInt8(self, "charset"), self.charset_desc)
size = (self.size - self.current_size)/8
if not size:
return
charset = getCharset(self["charset"])
yield String(self, "text", size, "Text", charset=charset, strip=self.STRIP)
class ID3_Comment(ID3_StringCharset):
def createFields(self):
yield Enum(UInt8(self, "charset"), self.charset_desc)
yield String(self, "lang", 3, "Language", charset="ASCII")
charset = getCharset(self["charset"])
yield CString(self, "title", "Title", charset=charset, strip=self.STRIP)
size = (self.size - self.current_size) // 8
if not size:
return
yield String(self, "text", size, "Text", charset=charset, strip=self.STRIP)
class ID3_StringTitle(ID3_StringCharset):
def createFields(self):
yield Enum(UInt8(self, "charset"), self.charset_desc)
if self.current_size == self.size:
return
charset = getCharset(self["charset"])
yield CString(self, "title", "Title", charset=charset, strip=self.STRIP)
size = (self.size - self.current_size)/8
if not size:
return
yield String(self, "text", size, "Text", charset=charset, strip=self.STRIP)
class ID3_Private(FieldSet):
def createFields(self):
size = self._size/8
# TODO: Strings charset?
if self.stream.readBytes(self.absolute_address, 9) == "PeakValue":
yield String(self, "text", 9, "Text")
size -= 9
yield String(self, "content", size, "Content")
class ID3_TrackLength(FieldSet):
def createFields(self):
yield NullBytes(self, "zero", 1)
yield textHandler(String(self, "length", self._size/8 - 1,
"Length in ms", charset="ASCII"), self.computeLength)
def computeLength(self, field):
try:
ms = int(field.value)
return humanDuration(ms)
except:
return field.value
class ID3_Picture23(FieldSet):
pict_type_name = {
0x00: "Other",
0x01: "32x32 pixels 'file icon' (PNG only)",
0x02: "Other file icon",
0x03: "Cover (front)",
0x04: "Cover (back)",
0x05: "Leaflet page",
0x06: "Media (e.g. lable side of CD)",
0x07: "Lead artist/lead performer/soloist",
0x08: "Artist/performer",
0x09: "Conductor",
0x0A: "Band/Orchestra",
0x0B: "Composer",
0x0C: "Lyricist/text writer",
0x0D: "Recording Location",
0x0E: "During recording",
0x0F: "During performance",
0x10: "Movie/video screen capture",
0x11: "A bright coloured fish",
0x12: "Illustration",
0x13: "Band/artist logotype",
0x14: "Publisher/Studio logotype"
}
def createFields(self):
yield Enum(UInt8(self, "charset"), ID3_StringCharset.charset_desc)
charset = getCharset(self["charset"])
yield String(self, "img_fmt", 3, charset="ASCII")
yield Enum(UInt8(self, "pict_type"), self.pict_type_name)
yield CString(self, "text", "Text", charset=charset, strip=" \0")
size = (self._size - self._current_size) / 8
if size:
yield RawBytes(self, "img_data", size)
class ID3_Picture24(FieldSet):
def createFields(self):
yield Enum(UInt8(self, "charset"), ID3_StringCharset.charset_desc)
charset = getCharset(self["charset"])
yield CString(self, "mime", "MIME type", charset=charset)
yield Enum(UInt8(self, "pict_type"), ID3_Picture23.pict_type_name)
yield CString(self, "description", charset=charset)
size = (self._size - self._current_size) / 8
if size:
yield RawBytes(self, "img_data", size)
class ID3_Chunk(FieldSet):
endian = NETWORK_ENDIAN
tag22_name = {
"TT2": "Track title",
"TP1": "Artist",
"TRK": "Track number",
"COM": "Comment",
"TCM": "Composer",
"TAL": "Album",
"TYE": "Year",
"TEN": "Encoder",
"TCO": "Content type",
"PIC": "Picture"
}
tag23_name = {
"COMM": "Comment",
"PRIV": "Private",
"TPE1": "Artist",
"TCOP": "Copyright",
"TALB": "Album",
"TENC": "Encoder",
"TYER": "Year",
"TSSE": "Encoder settings",
"TCOM": "Composer",
"TRCK": "Track number",
"PCNT": "Play counter",
"TCON": "Content type",
"TLEN": "Track length",
"TIT2": "Track title",
"WXXX": "User defined URL"
}
handler = {
"COMM": ID3_Comment,
"COM": ID3_Comment,
"PIC": ID3_Picture23,
"APIC": ID3_Picture24,
"PRIV": ID3_Private,
"TXXX": ID3_StringTitle,
"WOAR": ID3_String,
"WXXX": ID3_StringTitle,
}
def __init__(self, *args):
FieldSet.__init__(self, *args)
if 3 <= self["../ver_major"].value:
self._size = (10 + self["size"].value) * 8
else:
self._size = (self["size"].value + 6) * 8
def createFields(self):
if 3 <= self["../ver_major"].value:
# ID3 v2.3 and 2.4
yield Enum(String(self, "tag", 4, "Tag", charset="ASCII", strip="\0"), ID3_Chunk.tag23_name)
if 4 <= self["../ver_major"].value:
yield ID3_Size(self, "size") # ID3 v2.4
else:
yield UInt32(self, "size") # ID3 v2.3
yield Bit(self, "tag_alter", "Tag alter preservation")
yield Bit(self, "file_alter", "Tag alter preservation")
yield Bit(self, "rd_only", "Read only?")
yield NullBits(self, "padding[]", 5)
yield Bit(self, "compressed", "Frame is compressed?")
yield Bit(self, "encrypted", "Frame is encrypted?")
yield Bit(self, "group", "Grouping identity")
yield NullBits(self, "padding[]", 5)
size = self["size"].value
is_compressed = self["compressed"].value
else:
# ID3 v2.2
yield Enum(String(self, "tag", 3, "Tag", charset="ASCII", strip="\0"), ID3_Chunk.tag22_name)
yield UInt24(self, "size")
size = self["size"].value - self.current_size/8 + 6
is_compressed = False
if size:
cls = None
if not(is_compressed):
tag = self["tag"].value
if tag in ID3_Chunk.handler:
cls = ID3_Chunk.handler[tag]
elif tag[0] == "T":
cls = ID3_StringCharset
if cls:
yield cls(self, "content", "Content", size=size*8)
else:
yield RawBytes(self, "content", size, "Raw data content")
def createDescription(self):
if self["size"].value != 0:
return "ID3 Chunk: %s" % self["tag"].display
else:
return "ID3 Chunk: (terminator)"
class ID3_Size(Bits):
static_size = 32
def __init__(self, parent, name, description=None):
Bits.__init__(self, parent, name, 32, description)
def createValue(self):
data = self.parent.stream.readBytes(self.absolute_address, 4)
# TODO: Check that bit #7 of each byte is nul: not(ord(data[i]) & 127)
return reduce(lambda x, y: x*128 + y, (ord(item) for item in data ))
class ID3v2(FieldSet):
endian = NETWORK_ENDIAN
VALID_MAJOR_VERSIONS = (2, 3, 4)
def __init__(self, parent, name, size=None):
FieldSet.__init__(self, parent, name, size=size)
if not self._size:
self._size = (self["size"].value + 10) * 8
def createDescription(self):
return "ID3 v2.%s.%s" % \
(self["ver_major"].value, self["ver_minor"].value)
def createFields(self):
# Signature + version
yield String(self, "header", 3, "Header (ID3)", charset="ASCII")
yield UInt8(self, "ver_major", "Version (major)")
yield UInt8(self, "ver_minor", "Version (minor)")
# Check format
if self["header"].value != "ID3":
raise MatchError("Signature error, should be \"ID3\".")
if self["ver_major"].value not in self.VALID_MAJOR_VERSIONS \
or self["ver_minor"].value != 0:
raise MatchError(
"Unknown ID3 metadata version (2.%u.%u)"
% (self["ver_major"].value, self["ver_minor"].value))
# Flags
yield Bit(self, "unsync", "Unsynchronisation is used?")
yield Bit(self, "ext", "Extended header is used?")
yield Bit(self, "exp", "Experimental indicator")
yield NullBits(self, "padding[]", 5)
# Size
yield ID3_Size(self, "size")
# All tags
while self.current_size < self._size:
field = ID3_Chunk(self, "field[]")
yield field
if field["size"].value == 0:
break
# Search first byte of the MPEG file
padding = self.seekBit(self._size)
if padding:
yield padding
| gpl-3.0 |
twitchyliquid64/misc-scripts | s3tool/boto/mashups/order.py | 153 | 7584 | # Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
High-level abstraction of an EC2 order for servers
"""
import boto
import boto.ec2
from boto.mashups.server import Server, ServerSet
from boto.mashups.iobject import IObject
from boto.pyami.config import Config
from boto.sdb.persist import get_domain, set_domain
import time
from boto.compat import StringIO
InstanceTypes = ['m1.small', 'm1.large', 'm1.xlarge', 'c1.medium', 'c1.xlarge']
class Item(IObject):
def __init__(self):
self.region = None
self.name = None
self.instance_type = None
self.quantity = 0
self.zone = None
self.ami = None
self.groups = []
self.key = None
self.ec2 = None
self.config = None
def set_userdata(self, key, value):
self.userdata[key] = value
def get_userdata(self, key):
return self.userdata[key]
def set_region(self, region=None):
if region:
self.region = region
else:
l = [(r, r.name, r.endpoint) for r in boto.ec2.regions()]
self.region = self.choose_from_list(l, prompt='Choose Region')
def set_name(self, name=None):
if name:
self.name = name
else:
self.name = self.get_string('Name')
def set_instance_type(self, instance_type=None):
if instance_type:
self.instance_type = instance_type
else:
self.instance_type = self.choose_from_list(InstanceTypes, 'Instance Type')
def set_quantity(self, n=0):
if n > 0:
self.quantity = n
else:
self.quantity = self.get_int('Quantity')
def set_zone(self, zone=None):
if zone:
self.zone = zone
else:
l = [(z, z.name, z.state) for z in self.ec2.get_all_zones()]
self.zone = self.choose_from_list(l, prompt='Choose Availability Zone')
def set_ami(self, ami=None):
if ami:
self.ami = ami
else:
l = [(a, a.id, a.location) for a in self.ec2.get_all_images()]
self.ami = self.choose_from_list(l, prompt='Choose AMI')
def add_group(self, group=None):
if group:
self.groups.append(group)
else:
l = [(s, s.name, s.description) for s in self.ec2.get_all_security_groups()]
self.groups.append(self.choose_from_list(l, prompt='Choose Security Group'))
def set_key(self, key=None):
if key:
self.key = key
else:
l = [(k, k.name, '') for k in self.ec2.get_all_key_pairs()]
self.key = self.choose_from_list(l, prompt='Choose Keypair')
def update_config(self):
if not self.config.has_section('Credentials'):
self.config.add_section('Credentials')
self.config.set('Credentials', 'aws_access_key_id', self.ec2.aws_access_key_id)
self.config.set('Credentials', 'aws_secret_access_key', self.ec2.aws_secret_access_key)
if not self.config.has_section('Pyami'):
self.config.add_section('Pyami')
sdb_domain = get_domain()
if sdb_domain:
self.config.set('Pyami', 'server_sdb_domain', sdb_domain)
self.config.set('Pyami', 'server_sdb_name', self.name)
def set_config(self, config_path=None):
if not config_path:
config_path = self.get_filename('Specify Config file')
self.config = Config(path=config_path)
def get_userdata_string(self):
s = StringIO()
self.config.write(s)
return s.getvalue()
def enter(self, **params):
self.region = params.get('region', self.region)
if not self.region:
self.set_region()
self.ec2 = self.region.connect()
self.name = params.get('name', self.name)
if not self.name:
self.set_name()
self.instance_type = params.get('instance_type', self.instance_type)
if not self.instance_type:
self.set_instance_type()
self.zone = params.get('zone', self.zone)
if not self.zone:
self.set_zone()
self.quantity = params.get('quantity', self.quantity)
if not self.quantity:
self.set_quantity()
self.ami = params.get('ami', self.ami)
if not self.ami:
self.set_ami()
self.groups = params.get('groups', self.groups)
if not self.groups:
self.add_group()
self.key = params.get('key', self.key)
if not self.key:
self.set_key()
self.config = params.get('config', self.config)
if not self.config:
self.set_config()
self.update_config()
class Order(IObject):
def __init__(self):
self.items = []
self.reservation = None
def add_item(self, **params):
item = Item()
item.enter(**params)
self.items.append(item)
def display(self):
print('This Order consists of the following items')
print()
print('QTY\tNAME\tTYPE\nAMI\t\tGroups\t\t\tKeyPair')
for item in self.items:
print('%s\t%s\t%s\t%s\t%s\t%s' % (item.quantity, item.name, item.instance_type,
item.ami.id, item.groups, item.key.name))
def place(self, block=True):
if get_domain() is None:
print('SDB Persistence Domain not set')
domain_name = self.get_string('Specify SDB Domain')
set_domain(domain_name)
s = ServerSet()
for item in self.items:
r = item.ami.run(min_count=1, max_count=item.quantity,
key_name=item.key.name, user_data=item.get_userdata_string(),
security_groups=item.groups, instance_type=item.instance_type,
placement=item.zone.name)
if block:
states = [i.state for i in r.instances]
if states.count('running') != len(states):
print(states)
time.sleep(15)
states = [i.update() for i in r.instances]
for i in r.instances:
server = Server()
server.name = item.name
server.instance_id = i.id
server.reservation = r
server.save()
s.append(server)
if len(s) == 1:
return s[0]
else:
return s
| mit |
johankaito/fufuka | microblog/old-flask/lib/python2.7/site-packages/whoosh/query/positional.py | 52 | 9427 | # Copyright 2007 Matt Chaput. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of Matt Chaput.
from __future__ import division
import copy
from whoosh import matching
from whoosh.analysis import Token
from whoosh.compat import u
from whoosh.query import qcore, terms, compound
class Sequence(compound.CompoundQuery):
"""Matches documents containing a list of sub-queries in adjacent
positions.
This object has no sanity check to prevent you from using queries in
different fields.
"""
JOINT = " NEAR "
intersect_merge = True
def __init__(self, subqueries, slop=1, ordered=True, boost=1.0):
"""
:param subqueries: a list of :class:`whoosh.query.Query` objects to
match in sequence.
:param slop: the maximum difference in position allowed between the
subqueries.
:param ordered: if True, the position differences between subqueries
must be positive (that is, each subquery in the list must appear
after the previous subquery in the document).
:param boost: a boost factor to add to the score of documents matching
this query.
"""
compound.CompoundQuery.__init__(self, subqueries, boost=boost)
self.slop = slop
self.ordered = ordered
def __eq__(self, other):
return (other and type(self) is type(other)
and self.subqueries == other.subqueries
and self.boost == other.boost)
def __repr__(self):
return "%s(%r, slop=%d, boost=%f)" % (self.__class__.__name__,
self.subqueries, self.slop,
self.boost)
def __hash__(self):
h = hash(self.slop) ^ hash(self.boost)
for q in self.subqueries:
h ^= hash(q)
return h
def normalize(self):
# Because the subqueries are in sequence, we can't do the fancy merging
# that CompoundQuery does
return self.__class__([q.normalize() for q in self.subqueries],
self.slop, self.ordered, self.boost)
def _and_query(self):
return compound.And(self.subqueries)
def estimate_size(self, ixreader):
return self._and_query().estimate_size(ixreader)
def estimate_min_size(self, ixreader):
return self._and_query().estimate_min_size(ixreader)
def _matcher(self, subs, searcher, context):
from whoosh.query.spans import SpanNear
# Tell the sub-queries this matcher will need the current match to get
# spans
context = context.set(needs_current=True)
m = self._tree_matcher(subs, SpanNear.SpanNearMatcher, searcher,
context, None, slop=self.slop,
ordered=self.ordered)
return m
class Ordered(Sequence):
"""Matches documents containing a list of sub-queries in the given order.
"""
JOINT = " BEFORE "
def _matcher(self, subs, searcher, context):
from whoosh.query.spans import SpanBefore
return self._tree_matcher(subs, SpanBefore._Matcher, searcher,
context, None)
class Phrase(qcore.Query):
"""Matches documents containing a given phrase."""
def __init__(self, fieldname, words, slop=1, boost=1.0, char_ranges=None):
"""
:param fieldname: the field to search.
:param words: a list of words (unicode strings) in the phrase.
:param slop: the number of words allowed between each "word" in the
phrase; the default of 1 means the phrase must match exactly.
:param boost: a boost factor that to apply to the raw score of
documents matched by this query.
:param char_ranges: if a Phrase object is created by the query parser,
it will set this attribute to a list of (startchar, endchar) pairs
corresponding to the words in the phrase
"""
self.fieldname = fieldname
self.words = words
self.slop = slop
self.boost = boost
self.char_ranges = char_ranges
def __eq__(self, other):
return (other and self.__class__ is other.__class__
and self.fieldname == other.fieldname
and self.words == other.words
and self.slop == other.slop
and self.boost == other.boost)
def __repr__(self):
return "%s(%r, %r, slop=%s, boost=%f)" % (self.__class__.__name__,
self.fieldname, self.words,
self.slop, self.boost)
def __unicode__(self):
return u('%s:"%s"') % (self.fieldname, u(" ").join(self.words))
__str__ = __unicode__
def __hash__(self):
h = hash(self.fieldname) ^ hash(self.slop) ^ hash(self.boost)
for w in self.words:
h ^= hash(w)
return h
def has_terms(self):
return True
def terms(self, phrases=False):
if phrases and self.field():
for word in self.words:
yield (self.field(), word)
def tokens(self, boost=1.0):
char_ranges = self.char_ranges
startchar = endchar = None
for i, word in enumerate(self.words):
if char_ranges:
startchar, endchar = char_ranges[i]
yield Token(fieldname=self.fieldname, text=word,
boost=boost * self.boost, startchar=startchar,
endchar=endchar, chars=True)
def normalize(self):
if not self.words:
return qcore.NullQuery
if len(self.words) == 1:
t = terms.Term(self.fieldname, self.words[0])
if self.char_ranges:
t.startchar, t.endchar = self.char_ranges[0]
return t
words = [w for w in self.words if w is not None]
return self.__class__(self.fieldname, words, slop=self.slop,
boost=self.boost, char_ranges=self.char_ranges)
def replace(self, fieldname, oldtext, newtext):
q = copy.copy(self)
if q.fieldname == fieldname:
for i, word in enumerate(q.words):
if word == oldtext:
q.words[i] = newtext
return q
def _and_query(self):
return compound.And([terms.Term(self.fieldname, word)
for word in self.words])
def estimate_size(self, ixreader):
return self._and_query().estimate_size(ixreader)
def estimate_min_size(self, ixreader):
return self._and_query().estimate_min_size(ixreader)
def matcher(self, searcher, context=None):
from whoosh.query import Term, SpanNear2
fieldname = self.fieldname
if fieldname not in searcher.schema:
return matching.NullMatcher()
field = searcher.schema[fieldname]
if not field.format or not field.format.supports("positions"):
raise qcore.QueryError("Phrase search: %r field has no positions"
% self.fieldname)
terms = []
# Build a list of Term queries from the words in the phrase
reader = searcher.reader()
for word in self.words:
try:
word = field.to_bytes(word)
except ValueError:
return matching.NullMatcher()
if (fieldname, word) not in reader:
# Shortcut the query if one of the words doesn't exist.
return matching.NullMatcher()
terms.append(Term(fieldname, word))
# Create the equivalent SpanNear2 query from the terms
q = SpanNear2(terms, slop=self.slop, ordered=True, mindist=1)
# Get the matcher
m = q.matcher(searcher, context)
if self.boost != 1.0:
m = matching.WrappingMatcher(m, boost=self.boost)
return m
| apache-2.0 |
PATRIC3/p3_solr | lucene/core/src/java/org/apache/lucene/util/automaton/createLevAutomata.py | 8 | 14571 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Note, this file is known to work with rev 120 of the moman
# repository (http://bitbucket.org/jpbarrette/moman/overview)
#
# See also: http://sites.google.com/site/rrettesite/moman
import math
import os
import sys
#sys.path.insert(0, 'moman/finenight/python')
sys.path.insert(0, '../../../../../../../../build/core/moman/finenight/python')
try:
from possibleStates import genTransitions
except ImportError:
from finenight.possibleStates import genTransitions
MODE = 'array'
PACKED = True
WORD = 64
LOG2_WORD = int(math.log(WORD)/math.log(2))
#MODE = 'switch'
class LineOutput:
def __init__(self, indent=''):
self.l = []
self._indent = self.startIndent = indent
self.inComment = False
def __call__(self, s, indent=0):
if s.find('}') != -1:
assert self._indent != self.startIndent
self._indent = self._indent[:-2]
if indent != 0:
indent0 = ' ' * (len(self._indent)/2+indent)
else:
indent0 = self._indent
if s.find('/*') != -1:
if s.find('*/') == -1:
self.inComment = True
elif s.find('*/') != -1:
self.inComment = True
if self.inComment:
self.l.append(indent0 + s)
else:
self.l.append(indent0 + s.lstrip())
self.inComment = self.inComment and s.find('*/') == -1
if s.find('{') != -1:
self._indent += ' '
def __str__(self):
if True:
assert self._indent == self.startIndent, 'indent %d vs start indent %d' % \
(len(self._indent), len(self.startIndent))
return '\n'.join(self.l)
def indent(self):
self._indent += ' '
def outdent(self):
assert self._indent != self.startIndent
self._indent = self._indent[:-2]
def charVarNumber(charVar):
"""
Maps binary number (eg [1, 0, 1]) to its decimal value (5).
"""
p = 1
sum = 0
downTo = len(charVar)-1
while downTo >= 0:
sum += p * int(charVar[downTo])
p *= 2
downTo -= 1
return sum
def main():
if len(sys.argv) != 3:
print
print 'Usage: python -u %s N <True/False>' % sys.argv[0]
print
print 'NOTE: the resulting .java file is created in the current working dir!'
print
sys.exit(1)
n = int(sys.argv[1])
transpose = (sys.argv[2] == "True")
tables = genTransitions(n, transpose)
stateMap = {}
# init null state
stateMap['[]'] = -1
# init start state
stateMap['[(0, 0)]'] = 0
w = LineOutput()
w('package org.apache.lucene.util.automaton;')
w('')
w('/*')
w(' * Licensed to the Apache Software Foundation (ASF) under one or more')
w(' * contributor license agreements. See the NOTICE file distributed with')
w(' * this work for additional information regarding copyright ownership.')
w(' * The ASF licenses this file to You under the Apache License, Version 2.0')
w(' * (the "License"); you may not use this file except in compliance with')
w(' * the License. You may obtain a copy of the License at')
w(' *')
w(' * http://www.apache.org/licenses/LICENSE-2.0')
w(' *')
w(' * Unless required by applicable law or agreed to in writing, software')
w(' * distributed under the License is distributed on an "AS IS" BASIS,')
w(' * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.')
w(' * See the License for the specific language governing permissions and')
w(' * limitations under the License.')
w(' */')
w('')
w('// The following code was generated with the moman/finenight pkg')
w('// This package is available under the MIT License, see NOTICE.txt')
w('// for more details.')
w('')
w('import org.apache.lucene.util.automaton.LevenshteinAutomata.ParametricDescription;')
w('')
if transpose:
w('/** Parametric description for generating a Levenshtein automaton of degree %s, ' % n)
w(' with transpositions as primitive edits */')
className = 'Lev%dTParametricDescription' % n
else:
w('/** Parametric description for generating a Levenshtein automaton of degree %s */' % n)
className = 'Lev%dParametricDescription' % n
w('class %s extends ParametricDescription {' % className)
w('')
w('@Override')
w('int transition(int absState, int position, int vector) {')
w(' // null absState should never be passed in')
w(' assert absState != -1;')
w('')
w(' // decode absState -> state, offset')
w(' int state = absState/(w+1);')
w(' int offset = absState%(w+1);')
w(' assert offset >= 0;')
w('')
machines = []
for i, map in enumerate(tables):
if i == 0:
w('if (position == w) {')
elif i == len(tables)-1:
w('} else {')
else:
w('} else if (position == w-%d) {' % i)
if i != 0 and MODE == 'switch':
w('switch(vector) {')
l = map.items()
l.sort()
numCasesPerVector = None
numVectors = len(l)
if MODE == 'array':
toStateArray = []
toOffsetIncrArray = []
for charVar, states in l:
# somehow it's a string:
charVar = eval(charVar)
if i != 0 and MODE == 'switch':
w('case %s: // <%s>' % (charVarNumber(charVar), ','.join([str(x) for x in charVar])))
w.indent()
l = states.items()
byFromState = {}
# first pass to assign states
byAction = {}
for s, (toS, offset) in l:
state = str(s)
toState = str(toS)
if state not in stateMap:
stateMap[state] = len(stateMap)-1
if toState not in stateMap:
stateMap[toState] = len(stateMap)-1
byFromState[stateMap[state]] = (1+stateMap[toState], offset)
fromStateDesc = s[1:len(s)-1]
toStateDesc = ', '.join([str(x) for x in toS])
tup = (stateMap[toState], toStateDesc, offset)
if tup not in byAction:
byAction[tup] = []
byAction[tup].append((fromStateDesc, stateMap[state]))
if numCasesPerVector is None:
numCasesPerVector = len(l)
else:
# we require this to be uniform... empirically it seems to be!
assert numCasesPerVector == len(l)
if MODE == 'array':
for s in range(numCasesPerVector):
toState, offsetIncr = byFromState[s]
toStateArray.append(toState)
toOffsetIncrArray.append(offsetIncr)
else:
# render switches
w('switch(state) { // %s cases' % len(l))
for (toState, toStateDesc, offset), lx in byAction.items():
for fromStateDesc, fromState in lx:
w('case %s: // %s' % (fromState, fromStateDesc))
w.indent()
w(' state = %s; // %s' % (toState, toStateDesc))
if offset > 0:
w(' offset += %s;' % offset)
w('break;')
w.outdent()
w('}')
if i != 0:
w('break;')
w.outdent()
if MODE == 'array':
# strangely state can come in wildly out of bounds....
w(' if (state < %d) {' % numCasesPerVector)
w(' final int loc = vector * %d + state;' % numCasesPerVector)
if PACKED:
w(' offset += unpack(offsetIncrs%d, loc, NBITSOFFSET%d);' % (i, i))
w(' state = unpack(toStates%d, loc, NBITSSTATES%d)-1;' % (i, i))
else:
w(' offset += offsetIncrs%d[loc];' % i)
w(' state = toStates%d[loc]-1;' % i)
w(' }')
elif i != 0:
w('}')
machines.append((toStateArray, toOffsetIncrArray, numCasesPerVector, numVectors))
# ends switch statement for machine
w('}')
w('')
w(' if (state == -1) {')
w(' // null state')
w(' return -1;')
w(' } else {')
w(' // translate back to abs')
w(' return state*(w+1)+offset;')
w(' }')
# ends transition method
w('}')
subs = []
if MODE == 'array':
w.indent()
for i, (toStateArray, toOffsetIncrsArray, numCasesPerVector, numVectors) in enumerate(machines):
w('')
w.outdent()
w('// %d vectors; %d states per vector; array length = %d' % \
(numVectors, numCasesPerVector, numVectors*numCasesPerVector))
w.indent()
if PACKED:
# pack in python
l, nbits = pack(toStateArray)
subs.append(('NBITSSTATES%d' % i, str(nbits)))
w(' private final static long[] toStates%d = new long[] /*%d bits per value */ %s;' % \
(i, nbits, renderList([hex(long(x)) for x in l])))
l, nbits = pack(toOffsetIncrsArray)
subs.append(('NBITSOFFSET%d' % i, str(nbits)))
w(' private final static long[] offsetIncrs%d = new long[] /*%d bits per value */ %s;' % \
(i, nbits, renderList([hex(long(x)) for x in l])))
else:
w(' private final static int[] toStates%d = new int[] %s;' % \
(i, renderList([str(x) for x in toStateArray])))
w(' private final static int[] offsetIncrs%d = new int[] %s;' % \
(i, renderList([str(x) for x in toStateArray])))
w.outdent()
stateMap2 = dict([[v,k] for k,v in stateMap.items()])
w('')
w('// state map')
sum = 0
minErrors = []
for i in xrange(len(stateMap2)-1):
w('// %s -> %s' % (i, stateMap2[i]))
# we replace t-notation as it's not relevant here
st = stateMap2[i].replace('t', '')
v = eval(st)
minError = min([-i+e for i, e in v])
c = len(v)
sum += c
minErrors.append(minError)
w('')
w.indent()
#w('private final static int[] minErrors = new int[] {%s};' % ','.join([str(x) for x in minErrors]))
w.outdent()
w('')
w(' public %s(int w) {' % className)
w(' super(w, %d, new int[] {%s});' % (n, ','.join([str(x) for x in minErrors])), indent=1)
w(' }')
if 0:
w('')
w('@Override')
w('public int size() { // this can now move up?')
w(' return %d*(w+1);' % (len(stateMap2)-1))
w('}')
w('')
w('@Override')
w('public int getPosition(int absState) { // this can now move up?')
w(' return absState % (w+1);')
w('}')
w('')
w('@Override')
w('public boolean isAccept(int absState) { // this can now move up?')
w(' // decode absState -> state, offset')
w(' int state = absState/(w+1);')
w(' if (true || state < minErrors.length) {')
w(' int offset = absState%(w+1);')
w(' assert offset >= 0;')
w(' return w - offset + minErrors[state] <= %d;' % n)
w(' } else {')
w(' return false;')
w(' }')
w('}')
if MODE == 'array' and PACKED:
# we moved into super class
if False:
w('')
v = 2
l = []
for i in range(63):
l.append(hex(v-1))
v *= 2
w('private final static long[] MASKS = new long[] {%s};' % ','.join(l), indent=1)
w('')
# unpack in java
w('private int unpack(long[] data, int index, int bitsPerValue) {')
w(' final long bitLoc = bitsPerValue * index;')
w(' final int dataLoc = (int) (bitLoc >> %d);' % LOG2_WORD)
w(' final int bitStart = (int) (bitLoc & %d);' % (WORD-1))
w(' //System.out.println("index=" + index + " dataLoc=" + dataLoc + " bitStart=" + bitStart + " bitsPerV=" + bitsPerValue);')
w(' if (bitStart + bitsPerValue <= %d) {' % WORD)
w(' // not split')
w(' return (int) ((data[dataLoc] >> bitStart) & MASKS[bitsPerValue-1]);')
w(' } else {')
w(' // split')
w(' final int part = %d-bitStart;' % WORD)
w(' return (int) (((data[dataLoc] >> bitStart) & MASKS[part-1]) +')
w(' ((data[1+dataLoc] & MASKS[bitsPerValue-part-1]) << part));', indent=1)
w(' }')
w('}')
# class
w('}')
w('')
fileOut = '%s.java' % className
s = str(w)
for sub, repl in subs:
s = s.replace(sub, repl)
open(fileOut, 'wb').write(s)
print 'Wrote %s [%d lines; %.1f KB]' % \
(fileOut, len(w.l), os.path.getsize(fileOut)/1024.)
def renderList(l):
lx = [' ']
for i in xrange(len(l)):
if i > 0:
lx.append(',')
if i % 4 == 0:
lx.append('\n ')
lx.append(l[i])
return '{\n%s\n }' % ''.join(lx)
MASKS = []
v = 2
for i in xrange(63):
MASKS.append(v-1)
v *= 2
# packs into longs; returns long[], numBits
def pack(l):
maxV = max(l)
bitsPerValue = max(1, int(math.ceil(math.log(maxV+1)/math.log(2.0))))
bitsLeft = WORD
pendingValue = 0
packed = []
for i in xrange(len(l)):
v = l[i]
if pendingValue > 0:
bitsUsed = math.ceil(math.log(pendingValue)/math.log(2.0))
assert bitsUsed <= (WORD-bitsLeft), 'bitsLeft=%s (%s-%s=%s) bitsUsed=%s' % (bitsLeft, WORD, bitsLeft, WORD-bitsLeft, bitsUsed)
if bitsLeft >= bitsPerValue:
pendingValue += v << (WORD-bitsLeft)
bitsLeft -= bitsPerValue
if bitsLeft == 0:
packed.append(pendingValue)
bitsLeft = WORD
pendingValue = 0
else:
# split
# bottom bitsLeft go in current word:
pendingValue += (v & MASKS[bitsLeft-1]) << (WORD-bitsLeft)
packed.append(pendingValue)
pendingValue = v >> bitsLeft
bitsLeft = WORD - (bitsPerValue-bitsLeft)
if bitsLeft < WORD:
packed.append(pendingValue)
# verify(l, packed, bitsPerValue)
return packed, bitsPerValue
def verify(data, packedData, bitsPerValue):
for i in range(len(data)):
assert data[i] == unpack(packedData, i, bitsPerValue)
def unpack(data, index, bitsPerValue):
bitLoc = bitsPerValue * index
dataLoc = int(bitLoc >> LOG2_WORD)
bitStart = int(bitLoc & (WORD-1))
if bitStart + bitsPerValue <= WORD:
# not split
return int(((data[dataLoc] >> bitStart) & MASKS[bitsPerValue-1]))
else:
# split
part = WORD-bitStart;
return int((((data[dataLoc] >> bitStart) & MASKS[part-1]) +
((data[1+dataLoc] & MASKS[bitsPerValue-part-1]) << part)))
if __name__ == '__main__':
if not __debug__:
print
print 'ERROR: please run without -O'
print
sys.exit(1)
main()
| apache-2.0 |
drglove/SickRage | lib/unidecode/x011.py | 252 | 4135 | data = (
'g', # 0x00
'gg', # 0x01
'n', # 0x02
'd', # 0x03
'dd', # 0x04
'r', # 0x05
'm', # 0x06
'b', # 0x07
'bb', # 0x08
's', # 0x09
'ss', # 0x0a
'', # 0x0b
'j', # 0x0c
'jj', # 0x0d
'c', # 0x0e
'k', # 0x0f
't', # 0x10
'p', # 0x11
'h', # 0x12
'ng', # 0x13
'nn', # 0x14
'nd', # 0x15
'nb', # 0x16
'dg', # 0x17
'rn', # 0x18
'rr', # 0x19
'rh', # 0x1a
'rN', # 0x1b
'mb', # 0x1c
'mN', # 0x1d
'bg', # 0x1e
'bn', # 0x1f
'', # 0x20
'bs', # 0x21
'bsg', # 0x22
'bst', # 0x23
'bsb', # 0x24
'bss', # 0x25
'bsj', # 0x26
'bj', # 0x27
'bc', # 0x28
'bt', # 0x29
'bp', # 0x2a
'bN', # 0x2b
'bbN', # 0x2c
'sg', # 0x2d
'sn', # 0x2e
'sd', # 0x2f
'sr', # 0x30
'sm', # 0x31
'sb', # 0x32
'sbg', # 0x33
'sss', # 0x34
's', # 0x35
'sj', # 0x36
'sc', # 0x37
'sk', # 0x38
'st', # 0x39
'sp', # 0x3a
'sh', # 0x3b
'', # 0x3c
'', # 0x3d
'', # 0x3e
'', # 0x3f
'Z', # 0x40
'g', # 0x41
'd', # 0x42
'm', # 0x43
'b', # 0x44
's', # 0x45
'Z', # 0x46
'', # 0x47
'j', # 0x48
'c', # 0x49
't', # 0x4a
'p', # 0x4b
'N', # 0x4c
'j', # 0x4d
'', # 0x4e
'', # 0x4f
'', # 0x50
'', # 0x51
'ck', # 0x52
'ch', # 0x53
'', # 0x54
'', # 0x55
'pb', # 0x56
'pN', # 0x57
'hh', # 0x58
'Q', # 0x59
'[?]', # 0x5a
'[?]', # 0x5b
'[?]', # 0x5c
'[?]', # 0x5d
'[?]', # 0x5e
'', # 0x5f
'', # 0x60
'a', # 0x61
'ae', # 0x62
'ya', # 0x63
'yae', # 0x64
'eo', # 0x65
'e', # 0x66
'yeo', # 0x67
'ye', # 0x68
'o', # 0x69
'wa', # 0x6a
'wae', # 0x6b
'oe', # 0x6c
'yo', # 0x6d
'u', # 0x6e
'weo', # 0x6f
'we', # 0x70
'wi', # 0x71
'yu', # 0x72
'eu', # 0x73
'yi', # 0x74
'i', # 0x75
'a-o', # 0x76
'a-u', # 0x77
'ya-o', # 0x78
'ya-yo', # 0x79
'eo-o', # 0x7a
'eo-u', # 0x7b
'eo-eu', # 0x7c
'yeo-o', # 0x7d
'yeo-u', # 0x7e
'o-eo', # 0x7f
'o-e', # 0x80
'o-ye', # 0x81
'o-o', # 0x82
'o-u', # 0x83
'yo-ya', # 0x84
'yo-yae', # 0x85
'yo-yeo', # 0x86
'yo-o', # 0x87
'yo-i', # 0x88
'u-a', # 0x89
'u-ae', # 0x8a
'u-eo-eu', # 0x8b
'u-ye', # 0x8c
'u-u', # 0x8d
'yu-a', # 0x8e
'yu-eo', # 0x8f
'yu-e', # 0x90
'yu-yeo', # 0x91
'yu-ye', # 0x92
'yu-u', # 0x93
'yu-i', # 0x94
'eu-u', # 0x95
'eu-eu', # 0x96
'yi-u', # 0x97
'i-a', # 0x98
'i-ya', # 0x99
'i-o', # 0x9a
'i-u', # 0x9b
'i-eu', # 0x9c
'i-U', # 0x9d
'U', # 0x9e
'U-eo', # 0x9f
'U-u', # 0xa0
'U-i', # 0xa1
'UU', # 0xa2
'[?]', # 0xa3
'[?]', # 0xa4
'[?]', # 0xa5
'[?]', # 0xa6
'[?]', # 0xa7
'g', # 0xa8
'gg', # 0xa9
'gs', # 0xaa
'n', # 0xab
'nj', # 0xac
'nh', # 0xad
'd', # 0xae
'l', # 0xaf
'lg', # 0xb0
'lm', # 0xb1
'lb', # 0xb2
'ls', # 0xb3
'lt', # 0xb4
'lp', # 0xb5
'lh', # 0xb6
'm', # 0xb7
'b', # 0xb8
'bs', # 0xb9
's', # 0xba
'ss', # 0xbb
'ng', # 0xbc
'j', # 0xbd
'c', # 0xbe
'k', # 0xbf
't', # 0xc0
'p', # 0xc1
'h', # 0xc2
'gl', # 0xc3
'gsg', # 0xc4
'ng', # 0xc5
'nd', # 0xc6
'ns', # 0xc7
'nZ', # 0xc8
'nt', # 0xc9
'dg', # 0xca
'tl', # 0xcb
'lgs', # 0xcc
'ln', # 0xcd
'ld', # 0xce
'lth', # 0xcf
'll', # 0xd0
'lmg', # 0xd1
'lms', # 0xd2
'lbs', # 0xd3
'lbh', # 0xd4
'rNp', # 0xd5
'lss', # 0xd6
'lZ', # 0xd7
'lk', # 0xd8
'lQ', # 0xd9
'mg', # 0xda
'ml', # 0xdb
'mb', # 0xdc
'ms', # 0xdd
'mss', # 0xde
'mZ', # 0xdf
'mc', # 0xe0
'mh', # 0xe1
'mN', # 0xe2
'bl', # 0xe3
'bp', # 0xe4
'ph', # 0xe5
'pN', # 0xe6
'sg', # 0xe7
'sd', # 0xe8
'sl', # 0xe9
'sb', # 0xea
'Z', # 0xeb
'g', # 0xec
'ss', # 0xed
'', # 0xee
'kh', # 0xef
'N', # 0xf0
'Ns', # 0xf1
'NZ', # 0xf2
'pb', # 0xf3
'pN', # 0xf4
'hn', # 0xf5
'hl', # 0xf6
'hm', # 0xf7
'hb', # 0xf8
'Q', # 0xf9
'[?]', # 0xfa
'[?]', # 0xfb
'[?]', # 0xfc
'[?]', # 0xfd
'[?]', # 0xfe
)
| gpl-3.0 |
realtsiry/rockbox4linux | utils/common/deploy-themeeditor.py | 1 | 2023 | #!/usr/bin/python
# __________ __ ___.
# Open \______ \ ____ ____ | | _\_ |__ _______ ___
# Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
# Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
# Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
# \/ \/ \/ \/ \/
# $Id: deploy-themeeditor.py 28153 2010-09-23 18:04:57Z bluebrother $
#
# Copyright (c) 2010 Dominik Riebeling
#
# All files in this archive are subject to the GNU General Public License.
# See the file COPYING in the source tree root for full license agreement.
#
# This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
# KIND, either express or implied.
#
import deploy
import sys
deploy.program = "rbthemeeditor"
deploy.project = "utils/themeeditor/themeeditor.pro"
deploy.svnserver = "svn://svn.rockbox.org/rockbox/"
deploy.svnpaths = \
[ "utils/themeeditor/",
"lib/skin_parser/",
"docs/COPYING" ]
deploy.useupx = False
deploy.bundlecopy = {
"resources/windowicon.icns" : "Contents/Resources/",
"Info.plist" : "Contents/"
}
# Windows nees some special treatment. Differentiate between program name
# and executable filename.
if sys.platform == "win32":
deploy.progexe = "Release/" + deploy.program + ".exe"
deploy.make = "mingw32-make"
elif sys.platform == "darwin":
deploy.progexe = deploy.program + ".app"
# OS X 10.6 defaults to gcc 4.2. Building universal binaries that are
# compatible with 10.4 requires using gcc-4.0.
if not "QMAKESPEC" in deploy.environment:
deploy.environment["QMAKESPEC"] = "macx-g++40"
else:
deploy.progexe = deploy.program
# all files of the program. Will get put into an archive after building
# (zip on w32, tar.bz2 on Linux). Does not apply on Mac which uses dmg.
deploy.programfiles = [ deploy.progexe ]
deploy.nsisscript = "utils/themeeditor/themeeditor.nsi"
deploy.deploy()
| gpl-2.0 |
aewallin/openvoronoi | python_examples/arc/arc_predicates.py | 1 | 6658 | # import ocl
import openvoronoi as ovd
import ovdvtk
# import time
import vtk
# import datetime
import math
import random
"""
This script does not use OpenVoronoi, it is used merely for drawing and
verifying solutions etc.
There are functions for:
- voronoi-bisectors and verifying the parametric equations for bisectors
- voronoi-vertex solvers (separator-solver)
- bi-tangent lines/points (for medial-axis pocketing)
"""
def drawVertex(myscreen, p, vertexColor, rad=1):
myscreen.addActor(ovdvtk.Sphere(center=(p.x, p.y, 0), radius=rad, color=vertexColor))
def drawEdge(myscreen, e, edgeColor=ovdvtk.yellow):
p1 = e[0]
p2 = e[1]
myscreen.addActor(ovdvtk.Line(p1=(p1.x, p1.y, 0), p2=(p2.x, p2.y, 0), color=edgeColor))
def drawCircle(myscreen, c, circleColor):
myscreen.addActor(ovdvtk.Circle(center=(c.c.x, c.c.y, c.c.z), radius=c.r, color=circleColor))
def drawCircle(myscreen, c, r, circleColor):
myscreen.addActor(ovdvtk.Circle(center=(c.x, c.y, 0), radius=r, color=circleColor))
def drawLine(myscreen, p1, p2, lineColor=ovdvtk.yellow):
myscreen.addActor(ovdvtk.Line(p1=(p1.x, p1.y, 0), p2=(p2.x, p2.y, 0), color=lineColor))
"""
def drawArc(myscreen, p1, p2, c, cw, arcColor):
r = (p1-c).norm()
pass
"""
# draw line a x + b y + c = 0
# draws lines roughly in a 100x100 box (?)
"""
def drawLine(myscreen, l, lineColor):
# x = -by-c / a
#if l.a != 0:
if (abs(l.a) > abs(l.b)):
y=100
p1 = ovd.Point( float(-l.b*y-l.c)/l.a , y )
y=-100
p2 = ovd.Point( float(-l.b*y-l.c)/l.a , y )
myscreen.addActor( ovdvtk.Line( p1=( p1.x,p1.y,0), p2=(p2.x,p2.y,0), color=lineColor ) )
else:
x=100
p1 = ovd.Point( x, float(-l.a*x-l.c)/l.b )
x=-100
p2 = ovd.Point( x, float(-l.a*x-l.c)/l.b )
myscreen.addActor( ovdvtk.Line( p1=( p1.x,p1.y,0), p2=(p2.x,p2.y,0), color=lineColor ) )
"""
# CIRCLE definition
# circle offset is (x(t) - xc)^2 + (y(t)-yc)^2 = (r+k*t)^2
# POINT is circle with r=1 and k=1
class Circle:
def __init__(self, c=ovd.Point(0, 0), r=1, cw=1, k=1):
self.c = c
self.r = r
self.cw = cw # CW=1, CCW = -1
self.k = k # offset direction
# LINE def
# line offset is a1 x + b1 y + c1 + k1 t = 0 and a*a + b*b = 1
class Line:
def __init__(self, a, b, c, k):
self.a = float(a)
self.b = float(b)
det = self.a * self.a + self.b * self.b
self.c = float(c)
self.k = float(k) # offset to left or right of line
def arc_in_region(p1, p2, c, cw, p):
if cw:
return p.is_right(c, p1) and (not p.is_right(c, p2))
else:
return (not p.is_right(c, p1)) and p.is_right(c, p2)
def drawArcPredicate():
myscreen = ovdvtk.VTKScreen()
myscreen.camera.SetPosition(0.01, 0, 100)
myscreen.camera.SetFocalPoint(0, 0, 0)
myscreen.camera.SetClippingRange(-100, 3000)
c1 = ovd.Point(0, 0)
r1 = 20
alfa1 = (float(23) / float(360)) * 2 * math.pi
alfa2 = (float(123) / float(360)) * 2 * math.pi
alfa2, alfa1 = alfa1, alfa2 # swap
alfa1_dir = ovd.Point(math.cos(alfa1), math.sin(alfa1))
alfa2_dir = ovd.Point(math.cos(alfa2), math.sin(alfa2))
p1 = c1 + r1 * alfa1_dir
p2 = c1 + r1 * alfa2_dir
cw = True
drawVertex(myscreen, c1, ovdvtk.orange, rad=1)
fa1 = ovdvtk.FollowerText(text="c", color=ovdvtk.orange, center=(c1.x + 1, c1.y, 0), scale=1)
myscreen.addActor(fa1)
drawVertex(myscreen, p1, ovdvtk.green, rad=1)
fa2 = ovdvtk.FollowerText(text="p1", color=ovdvtk.green, center=(p1.x + 1, p1.y, 0), scale=1)
myscreen.addActor(fa2)
drawVertex(myscreen, p2, ovdvtk.red, rad=1)
fa3 = ovdvtk.FollowerText(text="p2", color=ovdvtk.red, center=(p2.x + 1, p2.y, 0), scale=1)
myscreen.addActor(fa3)
# drawArc(myscreen, p1, p2, c1, True, ovdvtk.yellow)
# ovdvtk.drawArc(myscreen, p1, p2, (p1-c1).norm(), c1, True, ovdvtk.yellow, da=0.1)
ovdvtk.drawArc(myscreen, p1, p2, (p1 - c1).norm(), c1, cw, ovdvtk.orange, da=0.1)
Nmax = 5000
for n in range(Nmax):
p = 100 * ovd.Point(random.random() - 0.5, random.random() - 0.5)
if arc_in_region(p1, p2, c1, cw, p):
drawVertex(myscreen, p, ovdvtk.lgreen, rad=0.1)
else:
drawVertex(myscreen, p, ovdvtk.pink, rad=0.1)
myscreen.render()
myscreen.iren.Start()
def closer_endpoint(p1, p2, p):
if (p1 - p).norm() < (p2 - p).norm():
return p1
else:
return p2
def projection_point(p1, p2, c1, cw, p):
if p == c1:
return p1
else:
n = (p - c1)
n.normalize()
return c1 + (p1 - c1).norm() * n
def apex_point(p1, p2, c1, cw, p):
if arc_in_region(p1, p2, c1, cw, p):
return projection_point(p1, p2, c1, cw, p)
else:
return closer_endpoint(p1, p2, p)
def drawArcPredicate2():
myscreen = ovdvtk.VTKScreen()
myscreen.camera.SetPosition(0.01, 0, 100)
myscreen.camera.SetFocalPoint(0, 0, 0)
myscreen.camera.SetClippingRange(-100, 3000)
c1 = ovd.Point(0, 0)
r1 = 20
alfa1 = (float(23) / float(360)) * 2 * math.pi
alfa2 = (float(123) / float(360)) * 2 * math.pi
# alfa2, alfa1 = alfa1, alfa2 # swap
alfa1_dir = ovd.Point(math.cos(alfa1), math.sin(alfa1))
alfa2_dir = ovd.Point(math.cos(alfa2), math.sin(alfa2))
p1 = c1 + r1 * alfa1_dir
p2 = c1 + r1 * alfa2_dir
cw = False
drawVertex(myscreen, c1, ovdvtk.orange, rad=1)
fa1 = ovdvtk.FollowerText(text="c", color=ovdvtk.orange, center=(c1.x + 1, c1.y, 0), scale=1)
myscreen.addActor(fa1)
drawVertex(myscreen, p1, ovdvtk.green, rad=1)
fa2 = ovdvtk.FollowerText(text="p1", color=ovdvtk.green, center=(p1.x + 1, p1.y, 0), scale=1)
myscreen.addActor(fa2)
drawVertex(myscreen, p2, ovdvtk.red, rad=1)
fa3 = ovdvtk.FollowerText(text="p2", color=ovdvtk.red, center=(p2.x + 1, p2.y, 0), scale=1)
myscreen.addActor(fa3)
# drawArc(myscreen, p1, p2, c1, True, ovdvtk.yellow)
# ovdvtk.drawArc(myscreen, p1, p2, (p1-c1).norm(), c1, True, ovdvtk.yellow, da=0.1)
ovdvtk.drawArc(myscreen, p1, p2, (p1 - c1).norm(), c1, cw, ovdvtk.orange, da=0.1)
Nmax = 5000
for n in range(Nmax):
p = 100 * ovd.Point(random.random() - 0.5, random.random() - 0.5)
apex = apex_point(p1, p2, c1, cw, p)
linecolor = ovdvtk.pink
if arc_in_region(p1, p2, c1, cw, p):
linecolor = ovdvtk.lgreen
drawLine(myscreen, p, apex, linecolor)
myscreen.render()
myscreen.iren.Start()
if __name__ == "__main__":
# drawArcPredicate()
drawArcPredicate2()
| lgpl-2.1 |
ilo10/scikit-learn | sklearn/externals/joblib/hashing.py | 194 | 7504 | """
Fast cryptographic hash of Python objects, with a special case for fast
hashing of numpy arrays.
"""
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# Copyright (c) 2009 Gael Varoquaux
# License: BSD Style, 3 clauses.
import warnings
import pickle
import hashlib
import sys
import types
import struct
import io
if sys.version_info[0] < 3:
Pickler = pickle.Pickler
else:
Pickler = pickle._Pickler
class _ConsistentSet(object):
""" Class used to ensure the hash of Sets is preserved
whatever the order of its items.
"""
def __init__(self, set_sequence):
self._sequence = sorted(set_sequence)
class _MyHash(object):
""" Class used to hash objects that won't normally pickle """
def __init__(self, *args):
self.args = args
class Hasher(Pickler):
""" A subclass of pickler, to do cryptographic hashing, rather than
pickling.
"""
def __init__(self, hash_name='md5'):
self.stream = io.BytesIO()
Pickler.__init__(self, self.stream, protocol=2)
# Initialise the hash obj
self._hash = hashlib.new(hash_name)
def hash(self, obj, return_digest=True):
try:
self.dump(obj)
except pickle.PicklingError as e:
warnings.warn('PicklingError while hashing %r: %r' % (obj, e))
dumps = self.stream.getvalue()
self._hash.update(dumps)
if return_digest:
return self._hash.hexdigest()
def save(self, obj):
if isinstance(obj, (types.MethodType, type({}.pop))):
# the Pickler cannot pickle instance methods; here we decompose
# them into components that make them uniquely identifiable
if hasattr(obj, '__func__'):
func_name = obj.__func__.__name__
else:
func_name = obj.__name__
inst = obj.__self__
if type(inst) == type(pickle):
obj = _MyHash(func_name, inst.__name__)
elif inst is None:
# type(None) or type(module) do not pickle
obj = _MyHash(func_name, inst)
else:
cls = obj.__self__.__class__
obj = _MyHash(func_name, inst, cls)
Pickler.save(self, obj)
# The dispatch table of the pickler is not accessible in Python
# 3, as these lines are only bugware for IPython, we skip them.
def save_global(self, obj, name=None, pack=struct.pack):
# We have to override this method in order to deal with objects
# defined interactively in IPython that are not injected in
# __main__
kwargs = dict(name=name, pack=pack)
if sys.version_info >= (3, 4):
del kwargs['pack']
try:
Pickler.save_global(self, obj, **kwargs)
except pickle.PicklingError:
Pickler.save_global(self, obj, **kwargs)
module = getattr(obj, "__module__", None)
if module == '__main__':
my_name = name
if my_name is None:
my_name = obj.__name__
mod = sys.modules[module]
if not hasattr(mod, my_name):
# IPython doesn't inject the variables define
# interactively in __main__
setattr(mod, my_name, obj)
dispatch = Pickler.dispatch.copy()
# builtin
dispatch[type(len)] = save_global
# type
dispatch[type(object)] = save_global
# classobj
dispatch[type(Pickler)] = save_global
# function
dispatch[type(pickle.dump)] = save_global
def _batch_setitems(self, items):
# forces order of keys in dict to ensure consistent hash
Pickler._batch_setitems(self, iter(sorted(items)))
def save_set(self, set_items):
# forces order of items in Set to ensure consistent hash
Pickler.save(self, _ConsistentSet(set_items))
dispatch[type(set())] = save_set
class NumpyHasher(Hasher):
""" Special case the hasher for when numpy is loaded.
"""
def __init__(self, hash_name='md5', coerce_mmap=False):
"""
Parameters
----------
hash_name: string
The hash algorithm to be used
coerce_mmap: boolean
Make no difference between np.memmap and np.ndarray
objects.
"""
self.coerce_mmap = coerce_mmap
Hasher.__init__(self, hash_name=hash_name)
# delayed import of numpy, to avoid tight coupling
import numpy as np
self.np = np
if hasattr(np, 'getbuffer'):
self._getbuffer = np.getbuffer
else:
self._getbuffer = memoryview
def save(self, obj):
""" Subclass the save method, to hash ndarray subclass, rather
than pickling them. Off course, this is a total abuse of
the Pickler class.
"""
if isinstance(obj, self.np.ndarray) and not obj.dtype.hasobject:
# Compute a hash of the object:
try:
# memoryview is not supported for some dtypes,
# e.g. datetime64, see
# https://github.com/numpy/numpy/issues/4983. The
# workaround is to view the array as bytes before
# taking the memoryview
obj_bytes_view = obj.view(self.np.uint8)
self._hash.update(self._getbuffer(obj_bytes_view))
# ValueError is raised by .view when the array is not contiguous
# BufferError is raised by Python 3 in the hash update if
# the array is Fortran rather than C contiguous
except (ValueError, BufferError):
# Cater for non-single-segment arrays: this creates a
# copy, and thus aleviates this issue.
# XXX: There might be a more efficient way of doing this
obj_bytes_view = obj.flatten().view(self.np.uint8)
self._hash.update(self._getbuffer(obj_bytes_view))
# We store the class, to be able to distinguish between
# Objects with the same binary content, but different
# classes.
if self.coerce_mmap and isinstance(obj, self.np.memmap):
# We don't make the difference between memmap and
# normal ndarrays, to be able to reload previously
# computed results with memmap.
klass = self.np.ndarray
else:
klass = obj.__class__
# We also return the dtype and the shape, to distinguish
# different views on the same data with different dtypes.
# The object will be pickled by the pickler hashed at the end.
obj = (klass, ('HASHED', obj.dtype, obj.shape, obj.strides))
Hasher.save(self, obj)
def hash(obj, hash_name='md5', coerce_mmap=False):
""" Quick calculation of a hash to identify uniquely Python objects
containing numpy arrays.
Parameters
-----------
hash_name: 'md5' or 'sha1'
Hashing algorithm used. sha1 is supposedly safer, but md5 is
faster.
coerce_mmap: boolean
Make no difference between np.memmap and np.ndarray
"""
if 'numpy' in sys.modules:
hasher = NumpyHasher(hash_name=hash_name, coerce_mmap=coerce_mmap)
else:
hasher = Hasher(hash_name=hash_name)
return hasher.hash(obj)
| bsd-3-clause |
ilmich/KISS | utils/generate-mobile-phone-regex.py | 8 | 5066 | #!/usr/bin/env python3
"""Generate Mobile Phone Regex
Usage:
generate-mobile-phone-regex.py -d [-i INPUT]
generate-mobile-phone-regex.py [-i INPUT [-o OUTPUT] [-O OUTPUT2]]
Options:
-i --input INPUT Input XML file path with phone information for all regions (from `libphonenumber`)
[default: ./libphonenumber/resources/PhoneNumberMetadata.xml]
-o --output-textable OUTPUT Output file path for regular expression that matches textable phone numbers
[default: ../app/src/main/res/raw/phone_number_textable.re]
-O --output-prefixes OUTPUT2 Output file path for file mapping country ISO codes to their national and
international phone number prefixes (for normalizing numbers)
[default: ../app/src/main/res/raw/phone_number_prefixes.csv]
-d --debug Debug output (instead of concatenating the final regular expression)
-h --help Show this screen
Remarks:
This script uses data from googles libphonenumber. Please download
PhoneNumberMetadata.xml and pass its path to the --input parameter.
See: https://github.com/googlei18n/libphonenumber
"""
import csv
import os.path
import sys
import xml.sax
from docopt import docopt
class PhoneNumberContentHandler(xml.sax.handler.ContentHandler):
def __init__(self):
xml.sax.handler.ContentHandler.__init__(self)
self._regexps = {}
self._normdata = {}
self._path = []
self._country = None
self._next_regexp = ""
def startElement(self, name, attrs):
self._path.append(name)
# Only process per-country information
if len(self._path) < 3 \
or self._path[1] != 'territories' \
or self._path[2] != 'territory':
return
# Country definition
if len(self._path) == 3:
# Remember current phone number code (might be ambigous)
self._country = attrs['countryCode']
# Create RegExps storage for country code
if self._country not in self._regexps:
self._regexps[self._country] = set()
# Remember country parameters for normalization
if len(attrs['countryCode']) == 2:
self._normdata[attrs['id']] = (
attrs['countryCode'],
attrs.get('internationalPrefix', ""),
attrs.get('nationalPrefix', "")
)
def characters(self, content):
# Store number pattern content for mobile phone numbers
if len(self._path) == 5 \
and self._path[3] == 'mobile' \
and self._path[4] == 'nationalNumberPattern':
self._next_regexp += content.strip(' \t\n')
def endElement(self, name):
self._path.pop()
# Add complete number pattern content to regexp list
if len(self._path) == 4 and self._next_regexp:
self._regexps[self._country].add(self._next_regexp)
self._next_regexp = ""
def get_regexps(self):
return sorted(self._regexps.items())
def get_normdata(self):
return sorted(self._normdata.items())
def main(debug, input, textable, normdata):
base_path = os.path.dirname(__file__)
filepath_input = input if os.path.isabs(input) else os.path.join(base_path, input)
filepath_textable = textable if os.path.isabs(textable) else os.path.join(base_path, textable)
filepath_normdata = normdata if os.path.isabs(normdata) else os.path.join(base_path, normdata)
handler = PhoneNumberContentHandler()
parser = xml.sax.make_parser()
parser.setContentHandler(handler)
parser.parse(filepath_input)
if debug:
from pprint import pprint
print(" • Textable phone number regular expression:")
pprint(handler.get_regexps())
print()
print(" • Country number prefixes:")
pprint(handler.get_normdata())
print()
return 0
with open(filepath_textable, 'w') as file:
file.write('\\+(?:')
for idx, (country, regexps) in enumerate(handler.get_regexps()):
if idx > 0:
file.write('|')
# Group regexp patterns by country dial code
file.write(country)
file.write('(?:')
for idx, regexp in enumerate(regexps):
if idx > 0:
file.write('|')
file.write(regexp)
file.write(')')
file.write(')')
with open(filepath_normdata, 'w') as file:
writer = csv.writer(file)
for country, data in handler.get_normdata():
writer.writerow((country,) + data)
if __name__ == '__main__':
arguments = docopt(__doc__)
sys.exit(main(arguments['--debug'],
arguments['--input'],
arguments['--output-textable'],
arguments['--output-prefixes']))
| gpl-3.0 |
EmreAtes/spack | var/spack/repos/builtin/packages/pigz/package.py | 1 | 1817 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Pigz(MakefilePackage):
"""A parallel implementation of gzip for modern multi-processor,
multi-core machines."""
homepage = "http://zlib.net/pigz/"
url = "http://zlib.net/pigz/pigz-2.3.4.tar.gz"
version('2.3.4', '08e6b2e682bbf65ccf12c8966d633fc6')
depends_on('zlib')
def build(self, spec, prefix):
make()
def install(self, spec, prefix):
mkdirp(prefix.bin)
mkdirp(prefix.man.man1)
install('pigz', "%s/pigz" % prefix.bin)
install('pigz.1', "%s/pigz.1" % prefix.man.man1)
| lgpl-2.1 |
vantinh1991/F240L-JB | tools/perf/util/setup.py | 4998 | 1330 | #!/usr/bin/python2
from distutils.core import setup, Extension
from os import getenv
from distutils.command.build_ext import build_ext as _build_ext
from distutils.command.install_lib import install_lib as _install_lib
class build_ext(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
self.build_lib = build_lib
self.build_temp = build_tmp
class install_lib(_install_lib):
def finalize_options(self):
_install_lib.finalize_options(self)
self.build_dir = build_lib
cflags = ['-fno-strict-aliasing', '-Wno-write-strings']
cflags += getenv('CFLAGS', '').split()
build_lib = getenv('PYTHON_EXTBUILD_LIB')
build_tmp = getenv('PYTHON_EXTBUILD_TMP')
ext_sources = [f.strip() for f in file('util/python-ext-sources')
if len(f.strip()) > 0 and f[0] != '#']
perf = Extension('perf',
sources = ext_sources,
include_dirs = ['util/include'],
extra_compile_args = cflags,
)
setup(name='perf',
version='0.1',
description='Interface with the Linux profiling infrastructure',
author='Arnaldo Carvalho de Melo',
author_email='acme@redhat.com',
license='GPLv2',
url='http://perf.wiki.kernel.org',
ext_modules=[perf],
cmdclass={'build_ext': build_ext, 'install_lib': install_lib})
| gpl-2.0 |
mbroadst/debian-qpid-python | mllib/__init__.py | 1 | 2433 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""
This module provides document parsing and transformation utilities for
both SGML and XML.
"""
import os, dom, transforms, parsers, sys
import xml.sax, types
from xml.sax.handler import ErrorHandler
from xml.sax.xmlreader import InputSource
from cStringIO import StringIO
def transform(node, *args):
result = node
for t in args:
if isinstance(t, types.ClassType):
t = t()
result = result.dispatch(t)
return result
def sgml_parse(source):
if isinstance(source, basestring):
source = StringIO(source)
fname = "<string>"
elif hasattr(source, "name"):
fname = source.name
p = parsers.SGMLParser()
num = 1
for line in source:
p.feed(line)
p.parser.line(fname, num, None)
num += 1
p.close()
return p.parser.tree
class Resolver:
def __init__(self, path):
self.path = path
def resolveEntity(self, publicId, systemId):
for p in self.path:
fname = os.path.join(p, systemId)
if os.path.exists(fname):
source = InputSource(systemId)
source.setByteStream(open(fname))
return source
return InputSource(systemId)
def xml_parse(filename, path=()):
if sys.version_info[0:2] == (2,3):
# XXX: this is for older versions of python
from urllib import pathname2url
source = "file:%s" % pathname2url( os.path.abspath( filename ) )
else:
source = filename
h = parsers.XMLParser()
p = xml.sax.make_parser()
p.setContentHandler(h)
p.setErrorHandler(ErrorHandler())
p.setEntityResolver(Resolver(path))
p.parse(source)
return h.parser.tree
def sexp(node):
s = transforms.Sexp()
node.dispatch(s)
return s.out
| apache-2.0 |
openstack/glance | glance/db/sqlalchemy/migrate_repo/versions/036_rename_metadef_schema_columns.py | 3 | 1041 | # Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy.schema import MetaData
from sqlalchemy.schema import Table
def upgrade(migrate_engine):
meta = MetaData(bind=migrate_engine)
metadef_objects = Table('metadef_objects', meta, autoload=True)
metadef_objects.c.schema.alter(name='json_schema')
metadef_properties = Table('metadef_properties', meta, autoload=True)
metadef_properties.c.schema.alter(name='json_schema')
| apache-2.0 |
usakhelo/FreeCAD | src/Mod/Fem/PyGui/_TaskPanelFemMeshRegion.py | 2 | 9258 | # ***************************************************************************
# * *
# * Copyright (c) 2016 - Bernd Hahnebach <bernd@bimstatik.org> *
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU Lesser General Public License (LGPL) *
# * as published by the Free Software Foundation; either version 2 of *
# * the License, or (at your option) any later version. *
# * for detail see the LICENCE text file. *
# * *
# * This program is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Library General Public License for more details. *
# * *
# * You should have received a copy of the GNU Library General Public *
# * License along with this program; if not, write to the Free Software *
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
# * USA *
# * *
# ***************************************************************************
__title__ = "_TaskPanelFemMeshRegion"
__author__ = "Bernd Hahnebach"
__url__ = "http://www.freecadweb.org"
## @package TaskPanelFemMeshRegion
# \ingroup FEM
import FreeCAD
import FreeCADGui
from PySide import QtGui
from PySide import QtCore
class _TaskPanelFemMeshRegion:
'''The TaskPanel for editing References property of FemMeshRegion objects'''
def __init__(self, obj):
FreeCADGui.Selection.clearSelection()
self.sel_server = None
self.obj = obj
self.selection_mode_solid = False
self.selection_mode_std_print_message = "Select Faces, Edges and Vertices by single click on them to add them to the list."
self.selection_mode_solid_print_message = "Select Solids by single click on a Face or Edge which belongs to the Solid, to add the Solid to the list."
self.form = FreeCADGui.PySideUic.loadUi(FreeCAD.getHomePath() + "Mod/Fem/PyGui/TaskPanelFemMeshRegion.ui")
QtCore.QObject.connect(self.form.if_elelen, QtCore.SIGNAL("valueChanged(Base::Quantity)"), self.elelen_changed)
QtCore.QObject.connect(self.form.rb_standard, QtCore.SIGNAL("toggled(bool)"), self.choose_selection_mode_standard)
QtCore.QObject.connect(self.form.rb_solid, QtCore.SIGNAL("toggled(bool)"), self.choose_selection_mode_solid)
QtCore.QObject.connect(self.form.pushButton_Reference, QtCore.SIGNAL("clicked()"), self.add_references)
self.form.list_References.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.form.list_References.connect(self.form.list_References, QtCore.SIGNAL("customContextMenuRequested(QPoint)"), self.references_list_right_clicked)
self.get_meshregion_props()
self.update()
def accept(self):
self.set_meshregion_props()
if self.sel_server:
FreeCADGui.Selection.removeObserver(self.sel_server)
FreeCADGui.ActiveDocument.resetEdit()
FreeCAD.ActiveDocument.recompute()
return True
def reject(self):
if self.sel_server:
FreeCADGui.Selection.removeObserver(self.sel_server)
FreeCADGui.ActiveDocument.resetEdit()
return True
def get_meshregion_props(self):
self.elelen = self.obj.CharacteristicLength
self.references = []
if self.obj.References:
self.tuplereferences = self.obj.References
self.get_references()
def set_meshregion_props(self):
self.obj.References = self.references
self.obj.CharacteristicLength = self.elelen
def update(self):
'fills the widgets'
self.form.if_elelen.setText(self.elelen.UserString)
self.rebuild_list_References()
def elelen_changed(self, base_quantity_value):
self.elelen = base_quantity_value
def choose_selection_mode_standard(self, state):
self.selection_mode_solid = not state
if self.sel_server and not self.selection_mode_solid:
print(self.selection_mode_std_print_message)
def choose_selection_mode_solid(self, state):
self.selection_mode_solid = state
if self.sel_server and self.selection_mode_solid:
print(self.selection_mode_solid_print_message)
def get_references(self):
for ref in self.tuplereferences:
for elem in ref[1]:
self.references.append((ref[0], elem))
def references_list_right_clicked(self, QPos):
self.form.contextMenu = QtGui.QMenu()
menu_item = self.form.contextMenu.addAction("Remove Reference")
if not self.references:
menu_item.setDisabled(True)
self.form.connect(menu_item, QtCore.SIGNAL("triggered()"), self.remove_reference)
parentPosition = self.form.list_References.mapToGlobal(QtCore.QPoint(0, 0))
self.form.contextMenu.move(parentPosition + QPos)
self.form.contextMenu.show()
def remove_reference(self):
if not self.references:
return
currentItemName = str(self.form.list_References.currentItem().text())
for ref in self.references:
refname_to_compare_listentry = ref[0].Name + ':' + ref[1]
if refname_to_compare_listentry == currentItemName:
self.references.remove(ref)
self.rebuild_list_References()
def add_references(self):
'''Called if Button add_reference is triggered'''
# in constraints EditTaskPanel the selection is active as soon as the taskpanel is open
# here the addReference button EditTaskPanel has to be triggered to start selection mode
FreeCADGui.Selection.clearSelection()
# start SelectionObserver and parse the function to add the References to the widget
if self.selection_mode_solid: # print message on button click
print_message = self.selection_mode_solid_print_message
else:
print_message = self.selection_mode_std_print_message
import FemSelectionObserver
self.sel_server = FemSelectionObserver.FemSelectionObserver(self.selectionParser, print_message)
def selectionParser(self, selection):
print('selection: ', selection[0].Shape.ShapeType, ' ', selection[0].Name, ' ', selection[1])
if hasattr(selection[0], "Shape") and selection[1]:
elt = selection[0].Shape.getElement(selection[1])
if self.selection_mode_solid:
# in solid selection mode use edges and faces for selection of a solid
solid_to_add = None
if elt.ShapeType == 'Edge':
found_edge = False
for i, s in enumerate(selection[0].Shape.Solids):
for e in s.Edges:
if elt.isSame(e):
if not found_edge:
solid_to_add = str(i + 1)
else:
FreeCAD.Console.PrintMessage('Edge belongs to more than one solid\n')
solid_to_add = None
found_edge = True
elif elt.ShapeType == 'Face':
found_face = False
for i, s in enumerate(selection[0].Shape.Solids):
for e in s.Faces:
if elt.isSame(e):
if not found_face:
solid_to_add = str(i + 1)
else:
FreeCAD.Console.PrintMessage('Face belongs to more than one solid\n')
solid_to_add = None
found_edge = True
if solid_to_add:
selection = (selection[0], 'Solid' + solid_to_add)
print('selection element changed to Solid: ', selection[0].Shape.ShapeType, ' ', selection[0].Name, ' ', selection[1])
else:
return
if selection not in self.references:
self.references.append(selection)
self.rebuild_list_References()
else:
FreeCAD.Console.PrintMessage(selection[0].Name + ' --> ' + selection[1] + ' is in reference list already!\n')
def rebuild_list_References(self):
self.form.list_References.clear()
items = []
for ref in self.references:
item_name = ref[0].Name + ':' + ref[1]
items.append(item_name)
for listItemName in sorted(items):
self.form.list_References.addItem(listItemName)
| lgpl-2.1 |
weolar/miniblink49 | v8_6_7/src/inspector/build/check_injected_script_source.py | 18 | 3538 | #!/usr/bin/env python
# Copyright (c) 2014 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Copied from blink:
# WebKit/Source/devtools/scripts/check_injected_script_source.py
#
import re
import sys
import os
def validate_injected_script(fileName):
f = open(fileName, "r")
lines = f.readlines()
f.close()
proto_functions = "|".join([
# Array.prototype.*
"concat", "every", "filter", "forEach", "indexOf", "join", "lastIndexOf", "map", "pop",
"push", "reduce", "reduceRight", "reverse", "shift", "slice", "some", "sort", "splice", "toLocaleString", "toString", "unshift",
# Function.prototype.*
"apply", "bind", "call", "isGenerator", "toSource",
# Object.prototype.*
"toString",
])
global_functions = "|".join([
"eval", "uneval", "isFinite", "isNaN", "parseFloat", "parseInt", "decodeURI", "decodeURIComponent",
"encodeURI", "encodeURIComponent", "escape", "unescape", "Map", "Set"
])
# Black list:
# - instanceof, since e.g. "obj instanceof Error" may throw if Error is overridden and is not a function
# - Object.prototype.toString()
# - Array.prototype.*
# - Function.prototype.*
# - Math.*
# - Global functions
black_list_call_regex = re.compile(r"\sinstanceof\s+\w*|\bMath\.\w+\(|(?<!InjectedScriptHost)\.(" + proto_functions + r")\(|[^\.]\b(" + global_functions + r")\(")
errors_found = False
for i, line in enumerate(lines):
if line.find("suppressBlacklist") != -1:
continue
for match in re.finditer(black_list_call_regex, line):
errors_found = True
print "ERROR: Black listed expression in %s at line %02d column %02d: %s" % (os.path.basename(fileName), i + 1, match.start(), match.group(0))
if not errors_found:
print "OK"
def main(argv):
if len(argv) < 2:
print('ERROR: Usage: %s path/to/injected-script-source.js' % argv[0])
return 1
validate_injected_script(argv[1])
if __name__ == '__main__':
sys.exit(main(sys.argv))
| apache-2.0 |
im-infamou5/volatility | volatility/plugins/linux/bash.py | 44 | 7893 | # Volatility
# Copyright (C) 2007-2013 Volatility Foundation
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
"""
@author: Andrew Case
@license: GNU General Public License 2.0
@contact: atcuno@gmail.com
@organization:
"""
import struct
from operator import attrgetter
import volatility.obj as obj
import volatility.debug as debug
import volatility.addrspace as addrspace
import volatility.plugins.linux.common as linux_common
import volatility.plugins.linux.pslist as linux_pslist
bash_vtypes_32 = {
'_hist_entry': [ 0xc, {
'line': [0x0, ['pointer', ['String', dict(length = 1024)]]],
'timestamp': [0x4, ['pointer', ['String', dict(length = 1024)]]],
'data': [0x8, ['pointer', ['void']]],
}],
}
bash_vtypes_64 = {
'_hist_entry': [ 24, {
'line': [0, ['pointer', ['String', dict(length = 1024)]]],
'timestamp': [8, ['pointer', ['String', dict(length = 1024)]]],
'data': [16, ['pointer', ['void']]],
}],
}
class _hist_entry(obj.CType):
"""A class for history entries"""
def is_valid(self):
# Check the basic structure members
if (not obj.CType.is_valid(self) or
not self.line.is_valid() or
len(self.line.dereference()) == 0 or
not self.timestamp.is_valid()):
return False
# A pointer to the timestamp string
ts = self.timestamp.dereference()
# At this point in time, the epoc integer size will
# never be less than 10 characters, and the stamp is
# always preceded by a pound/hash character.
if len(ts) < 10 or str(ts)[0] != "#":
return False
# The final check is to make sure the entire string
# is composed of numbers. Try to convert to an int.
try:
int(str(ts)[1:])
except ValueError:
return False
return True
@property
def time_as_integer(self):
# Get the string and remove the leading "#" from the timestamp
time_string = str(self.timestamp.dereference())[1:]
# Convert the string into an integer (number of seconds)
return int(time_string)
def time_object(self):
nsecs = self.time_as_integer
# Build a timestamp object from the integer
time_val = struct.pack("<I", nsecs)
time_buf = addrspace.BufferAddressSpace(self.obj_vm.get_config(), data = time_val)
time_obj = obj.Object("UnixTimeStamp", offset = 0, vm = time_buf, is_utc = True)
return time_obj
class BashTypes(obj.ProfileModification):
conditions = {"os" : lambda x : x in ["linux", "mac"]}
def modification(self, profile):
if profile.metadata.get('memory_model', '32bit') == "32bit":
profile.vtypes.update(bash_vtypes_32)
else:
profile.vtypes.update(bash_vtypes_64)
profile.object_classes.update({"_hist_entry": _hist_entry})
class linux_bash(linux_pslist.linux_pslist):
"""Recover bash history from bash process memory"""
def __init__(self, config, *args, **kwargs):
linux_pslist.linux_pslist.__init__(self, config, *args, **kwargs)
self._config.add_option('PRINTUNALLOC', short_option = 'P', default = None, help = 'print unallocated entries, please redirect to a file', action = 'store_true')
self._config.add_option('HISTORY_LIST', short_option = 'H', default = None, help = 'address from history_list - see the Volatility wiki', action = 'store', type = 'long')
self._config.add_option('SCAN_ALL', short_option = 'A', default = False, help = 'scan all processes, not just those named bash', action = 'store_true')
def calculate(self):
linux_common.set_plugin_members(self)
tasks = linux_pslist.linux_pslist(self._config).calculate()
for task in tasks:
proc_as = task.get_process_address_space()
# In cases when mm is an invalid pointer
if not proc_as:
continue
if not self._config.HISTORY_LIST:
# Do we scan everything or just /bin/bash instances?
if not (self._config.SCAN_ALL or str(task.comm) == "bash"):
continue
# Keep a bucket of history objects so we can order them
history_entries = []
# Brute force the history list of an address isn't provided
ts_offset = proc_as.profile.get_obj_offset("_hist_entry", "timestamp")
# Are we dealing with 32 or 64-bit pointers
if proc_as.profile.metadata.get('memory_model', '32bit') == '32bit':
pack_format = "I"
else:
pack_format = "Q"
# Look for strings that begin with pound/hash on the process heap
for ptr_hash in task.search_process_memory(["#"], heap_only = True):
# Find pointers to this strings address, also on the heap
addr = struct.pack(pack_format, ptr_hash)
for ptr_string in task.search_process_memory([addr], heap_only = True):
# Check if we found a valid history entry object
hist = obj.Object("_hist_entry",
offset = ptr_string - ts_offset,
vm = proc_as)
if hist.is_valid():
history_entries.append(hist)
# We can terminate this inner loop now
break
# Report everything we found in order
for hist in sorted(history_entries, key = attrgetter('time_as_integer')):
yield task, hist
else:
the_history_addr = the_history_addr = self._config.HISTORY_LIST
the_history = obj.Object("Pointer", vm = proc_as, offset = the_history_addr)
max_ents = 2001
the_history = obj.Object(theType = 'Array', offset = the_history,
vm = proc_as, targetType = 'Pointer',
count = max_ents)
for ptr in the_history:
if not ptr:
if self._config.PRINTUNALLOC:
continue
else:
break
hist = ptr.dereference_as("_hist_entry")
if hist.is_valid():
yield task, hist
def render_text(self, outfd, data):
self.table_header(outfd, [("Pid", "8"),
("Name", "20"),
("Command Time", "30"),
("Command", ""),])
for task, hist_entry in data:
self.table_row(outfd, task.pid, task.comm,
hist_entry.time_object(),
hist_entry.line.dereference())
| gpl-2.0 |
mluo613/osf.io | scripts/find_guids_without_referents.py | 63 | 1593 | """Finds Guids that do not have referents or that point to referents that no longer exist.
E.g. a node was created and given a guid but an error caused the node to
get deleted, leaving behind a guid that points to nothing.
"""
import sys
from modularodm import Q
from framework.guid.model import Guid
from website.app import init_app
from scripts import utils as scripts_utils
import logging
logger = logging.getLogger(__name__)
def main():
if 'dry' not in sys.argv:
scripts_utils.add_file_logger(logger, __file__)
# Set up storage backends
init_app(routes=False)
logger.info('{n} invalid GUID objects found'.format(n=len(get_targets())))
logger.info('Finished.')
def get_targets():
"""Find GUIDs with no referents and GUIDs with referents that no longer exist."""
# Use a loop because querying MODM with Guid.find(Q('referent', 'eq', None))
# only catches the first case.
ret = []
# NodeFiles were once a GuidStored object and are no longer used any more.
# However, they still exist in the production database. We just skip over them
# for now, but they can probably need to be removed in the future.
# There were also 10 osfguidfile objects that lived in a corrupt repo that
# were not migrated to OSF storage, so we skip those as well. /sloria /jmcarp
for each in Guid.find(Q('referent.1', 'nin', ['nodefile', 'osfguidfile'])):
if each.referent is None:
logger.info('GUID {} has no referent.'.format(each._id))
ret.append(each)
return ret
if __name__ == '__main__':
main()
| apache-2.0 |
banditlev/meangarden | node_modules/node-gyp/gyp/tools/pretty_gyp.py | 2618 | 4756 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Pretty-prints the contents of a GYP file."""
import sys
import re
# Regex to remove comments when we're counting braces.
COMMENT_RE = re.compile(r'\s*#.*')
# Regex to remove quoted strings when we're counting braces.
# It takes into account quoted quotes, and makes sure that the quotes match.
# NOTE: It does not handle quotes that span more than one line, or
# cases where an escaped quote is preceeded by an escaped backslash.
QUOTE_RE_STR = r'(?P<q>[\'"])(.*?)(?<![^\\][\\])(?P=q)'
QUOTE_RE = re.compile(QUOTE_RE_STR)
def comment_replace(matchobj):
return matchobj.group(1) + matchobj.group(2) + '#' * len(matchobj.group(3))
def mask_comments(input):
"""Mask the quoted strings so we skip braces inside quoted strings."""
search_re = re.compile(r'(.*?)(#)(.*)')
return [search_re.sub(comment_replace, line) for line in input]
def quote_replace(matchobj):
return "%s%s%s%s" % (matchobj.group(1),
matchobj.group(2),
'x'*len(matchobj.group(3)),
matchobj.group(2))
def mask_quotes(input):
"""Mask the quoted strings so we skip braces inside quoted strings."""
search_re = re.compile(r'(.*?)' + QUOTE_RE_STR)
return [search_re.sub(quote_replace, line) for line in input]
def do_split(input, masked_input, search_re):
output = []
mask_output = []
for (line, masked_line) in zip(input, masked_input):
m = search_re.match(masked_line)
while m:
split = len(m.group(1))
line = line[:split] + r'\n' + line[split:]
masked_line = masked_line[:split] + r'\n' + masked_line[split:]
m = search_re.match(masked_line)
output.extend(line.split(r'\n'))
mask_output.extend(masked_line.split(r'\n'))
return (output, mask_output)
def split_double_braces(input):
"""Masks out the quotes and comments, and then splits appropriate
lines (lines that matche the double_*_brace re's above) before
indenting them below.
These are used to split lines which have multiple braces on them, so
that the indentation looks prettier when all laid out (e.g. closing
braces make a nice diagonal line).
"""
double_open_brace_re = re.compile(r'(.*?[\[\{\(,])(\s*)([\[\{\(])')
double_close_brace_re = re.compile(r'(.*?[\]\}\)],?)(\s*)([\]\}\)])')
masked_input = mask_quotes(input)
masked_input = mask_comments(masked_input)
(output, mask_output) = do_split(input, masked_input, double_open_brace_re)
(output, mask_output) = do_split(output, mask_output, double_close_brace_re)
return output
def count_braces(line):
"""keeps track of the number of braces on a given line and returns the result.
It starts at zero and subtracts for closed braces, and adds for open braces.
"""
open_braces = ['[', '(', '{']
close_braces = [']', ')', '}']
closing_prefix_re = re.compile(r'(.*?[^\s\]\}\)]+.*?)([\]\}\)],?)\s*$')
cnt = 0
stripline = COMMENT_RE.sub(r'', line)
stripline = QUOTE_RE.sub(r"''", stripline)
for char in stripline:
for brace in open_braces:
if char == brace:
cnt += 1
for brace in close_braces:
if char == brace:
cnt -= 1
after = False
if cnt > 0:
after = True
# This catches the special case of a closing brace having something
# other than just whitespace ahead of it -- we don't want to
# unindent that until after this line is printed so it stays with
# the previous indentation level.
if cnt < 0 and closing_prefix_re.match(stripline):
after = True
return (cnt, after)
def prettyprint_input(lines):
"""Does the main work of indenting the input based on the brace counts."""
indent = 0
basic_offset = 2
last_line = ""
for line in lines:
if COMMENT_RE.match(line):
print line
else:
line = line.strip('\r\n\t ') # Otherwise doesn't strip \r on Unix.
if len(line) > 0:
(brace_diff, after) = count_braces(line)
if brace_diff != 0:
if after:
print " " * (basic_offset * indent) + line
indent += brace_diff
else:
indent += brace_diff
print " " * (basic_offset * indent) + line
else:
print " " * (basic_offset * indent) + line
else:
print ""
last_line = line
def main():
if len(sys.argv) > 1:
data = open(sys.argv[1]).read().splitlines()
else:
data = sys.stdin.read().splitlines()
# Split up the double braces.
lines = split_double_braces(data)
# Indent and print the output.
prettyprint_input(lines)
return 0
if __name__ == '__main__':
sys.exit(main())
| mit |
ativelkov/murano-api | tools/install_venv.py | 4 | 2585 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2010 OpenStack Foundation
# Copyright 2013 IBM Corp.
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ConfigParser
import os
import sys
import install_venv_common as install_venv # flake8: noqa
def print_help(project, venv, root):
help = """
%(project)s development environment setup is complete.
%(project)s development uses virtualenv to track and manage Python
dependencies while in development and testing.
To activate the %(project)s virtualenv for the extent of your current
shell session you can run:
$ source %(venv)s/bin/activate
Or, if you prefer, you can run commands in the virtualenv on a case by
case basis by running:
$ %(root)s/tools/with_venv.sh <your command>
"""
print help % dict(project=project, venv=venv, root=root)
def main(argv):
root = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
if os.environ.get('tools_path'):
root = os.environ['tools_path']
venv = os.path.join(root, '.venv')
if os.environ.get('venv'):
venv = os.environ['venv']
pip_requires = os.path.join(root, 'requirements.txt')
test_requires = os.path.join(root, 'test-requirements.txt')
py_version = "python%s.%s" % (sys.version_info[0], sys.version_info[1])
setup_cfg = ConfigParser.ConfigParser()
setup_cfg.read('setup.cfg')
project = setup_cfg.get('metadata', 'name')
install = install_venv.InstallVenv(
root, venv, pip_requires, test_requires, py_version, project)
options = install.parse_args(argv)
install.check_python_version()
install.check_dependencies()
install.create_virtualenv(no_site_packages=options.no_site_packages)
install.install_dependencies()
print_help(project, venv, root)
if __name__ == '__main__':
main(sys.argv)
| apache-2.0 |
domenicosolazzo/practice-django | static_site_generator/venv/lib/python2.7/site-packages/pip/download.py | 328 | 22580 | import cgi
import email.utils
import hashlib
import getpass
import mimetypes
import os
import platform
import re
import shutil
import sys
import tempfile
import pip
from pip.backwardcompat import urllib, urlparse, raw_input
from pip.exceptions import InstallationError, HashMismatch
from pip.util import (splitext, rmtree, format_size, display_path,
backup_dir, ask_path_exists, unpack_file,
create_download_cache_folder, cache_download)
from pip.vcs import vcs
from pip.log import logger
from pip._vendor import requests, six
from pip._vendor.requests.adapters import BaseAdapter
from pip._vendor.requests.auth import AuthBase, HTTPBasicAuth
from pip._vendor.requests.compat import IncompleteRead
from pip._vendor.requests.exceptions import InvalidURL, ChunkedEncodingError
from pip._vendor.requests.models import Response
from pip._vendor.requests.structures import CaseInsensitiveDict
__all__ = ['get_file_content',
'is_url', 'url_to_path', 'path_to_url',
'is_archive_file', 'unpack_vcs_link',
'unpack_file_url', 'is_vcs_url', 'is_file_url', 'unpack_http_url']
def user_agent():
"""Return a string representing the user agent."""
_implementation = platform.python_implementation()
if _implementation == 'CPython':
_implementation_version = platform.python_version()
elif _implementation == 'PyPy':
_implementation_version = '%s.%s.%s' % (sys.pypy_version_info.major,
sys.pypy_version_info.minor,
sys.pypy_version_info.micro)
if sys.pypy_version_info.releaselevel != 'final':
_implementation_version = ''.join([
_implementation_version,
sys.pypy_version_info.releaselevel,
])
elif _implementation == 'Jython':
_implementation_version = platform.python_version() # Complete Guess
elif _implementation == 'IronPython':
_implementation_version = platform.python_version() # Complete Guess
else:
_implementation_version = 'Unknown'
try:
p_system = platform.system()
p_release = platform.release()
except IOError:
p_system = 'Unknown'
p_release = 'Unknown'
return " ".join(['pip/%s' % pip.__version__,
'%s/%s' % (_implementation, _implementation_version),
'%s/%s' % (p_system, p_release)])
class MultiDomainBasicAuth(AuthBase):
def __init__(self, prompting=True):
self.prompting = prompting
self.passwords = {}
def __call__(self, req):
parsed = urlparse.urlparse(req.url)
# Get the netloc without any embedded credentials
netloc = parsed.netloc.split("@", 1)[-1]
# Set the url of the request to the url without any credentials
req.url = urlparse.urlunparse(parsed[:1] + (netloc,) + parsed[2:])
# Use any stored credentials that we have for this netloc
username, password = self.passwords.get(netloc, (None, None))
# Extract credentials embedded in the url if we have none stored
if username is None:
username, password = self.parse_credentials(parsed.netloc)
if username or password:
# Store the username and password
self.passwords[netloc] = (username, password)
# Send the basic auth with this request
req = HTTPBasicAuth(username or "", password or "")(req)
# Attach a hook to handle 401 responses
req.register_hook("response", self.handle_401)
return req
def handle_401(self, resp, **kwargs):
# We only care about 401 responses, anything else we want to just
# pass through the actual response
if resp.status_code != 401:
return resp
# We are not able to prompt the user so simple return the response
if not self.prompting:
return resp
parsed = urlparse.urlparse(resp.url)
# Prompt the user for a new username and password
username = raw_input("User for %s: " % parsed.netloc)
password = getpass.getpass("Password: ")
# Store the new username and password to use for future requests
if username or password:
self.passwords[parsed.netloc] = (username, password)
# Consume content and release the original connection to allow our new
# request to reuse the same one.
resp.content
resp.raw.release_conn()
# Add our new username and password to the request
req = HTTPBasicAuth(username or "", password or "")(resp.request)
# Send our new request
new_resp = resp.connection.send(req, **kwargs)
new_resp.history.append(resp)
return new_resp
def parse_credentials(self, netloc):
if "@" in netloc:
userinfo = netloc.rsplit("@", 1)[0]
if ":" in userinfo:
return userinfo.split(":", 1)
return userinfo, None
return None, None
class LocalFSResponse(object):
def __init__(self, fileobj):
self.fileobj = fileobj
def __getattr__(self, name):
return getattr(self.fileobj, name)
def read(self, amt=None, decode_content=None, cache_content=False):
return self.fileobj.read(amt)
# Insert Hacks to Make Cookie Jar work w/ Requests
@property
def _original_response(self):
class FakeMessage(object):
def getheaders(self, header):
return []
def get_all(self, header, default):
return []
class FakeResponse(object):
@property
def msg(self):
return FakeMessage()
return FakeResponse()
class LocalFSAdapter(BaseAdapter):
def send(self, request, stream=None, timeout=None, verify=None, cert=None,
proxies=None):
parsed_url = urlparse.urlparse(request.url)
# We only work for requests with a host of localhost
if parsed_url.netloc.lower() != "localhost":
raise InvalidURL("Invalid URL %r: Only localhost is allowed" %
request.url)
real_url = urlparse.urlunparse(parsed_url[:1] + ("",) + parsed_url[2:])
pathname = url_to_path(real_url)
resp = Response()
resp.status_code = 200
resp.url = real_url
stats = os.stat(pathname)
modified = email.utils.formatdate(stats.st_mtime, usegmt=True)
resp.headers = CaseInsensitiveDict({
"Content-Type": mimetypes.guess_type(pathname)[0] or "text/plain",
"Content-Length": stats.st_size,
"Last-Modified": modified,
})
resp.raw = LocalFSResponse(open(pathname, "rb"))
resp.close = resp.raw.close
return resp
def close(self):
pass
class PipSession(requests.Session):
timeout = None
def __init__(self, *args, **kwargs):
super(PipSession, self).__init__(*args, **kwargs)
# Attach our User Agent to the request
self.headers["User-Agent"] = user_agent()
# Attach our Authentication handler to the session
self.auth = MultiDomainBasicAuth()
# Enable file:// urls
self.mount("file://", LocalFSAdapter())
def request(self, method, url, *args, **kwargs):
# Make file:// urls not fail due to lack of a hostname
parsed = urlparse.urlparse(url)
if parsed.scheme == "file":
url = urlparse.urlunparse(parsed[:1] + ("localhost",) + parsed[2:])
# Allow setting a default timeout on a session
kwargs.setdefault("timeout", self.timeout)
# Dispatch the actual request
return super(PipSession, self).request(method, url, *args, **kwargs)
def get_file_content(url, comes_from=None, session=None):
"""Gets the content of a file; it may be a filename, file: URL, or
http: URL. Returns (location, content). Content is unicode."""
if session is None:
session = PipSession()
match = _scheme_re.search(url)
if match:
scheme = match.group(1).lower()
if (scheme == 'file' and comes_from
and comes_from.startswith('http')):
raise InstallationError(
'Requirements file %s references URL %s, which is local'
% (comes_from, url))
if scheme == 'file':
path = url.split(':', 1)[1]
path = path.replace('\\', '/')
match = _url_slash_drive_re.match(path)
if match:
path = match.group(1) + ':' + path.split('|', 1)[1]
path = urllib.unquote(path)
if path.startswith('/'):
path = '/' + path.lstrip('/')
url = path
else:
## FIXME: catch some errors
resp = session.get(url)
resp.raise_for_status()
if six.PY3:
return resp.url, resp.text
else:
return resp.url, resp.content
try:
f = open(url)
content = f.read()
except IOError:
e = sys.exc_info()[1]
raise InstallationError('Could not open requirements file: %s' % str(e))
else:
f.close()
return url, content
_scheme_re = re.compile(r'^(http|https|file):', re.I)
_url_slash_drive_re = re.compile(r'/*([a-z])\|', re.I)
def is_url(name):
"""Returns true if the name looks like a URL"""
if ':' not in name:
return False
scheme = name.split(':', 1)[0].lower()
return scheme in ['http', 'https', 'file', 'ftp'] + vcs.all_schemes
def url_to_path(url):
"""
Convert a file: URL to a path.
"""
assert url.startswith('file:'), (
"You can only turn file: urls into filenames (not %r)" % url)
path = url[len('file:'):].lstrip('/')
path = urllib.unquote(path)
if _url_drive_re.match(path):
path = path[0] + ':' + path[2:]
else:
path = '/' + path
return path
_drive_re = re.compile('^([a-z]):', re.I)
_url_drive_re = re.compile('^([a-z])[:|]', re.I)
def path_to_url(path):
"""
Convert a path to a file: URL. The path will be made absolute and have
quoted path parts.
"""
path = os.path.normpath(os.path.abspath(path))
drive, path = os.path.splitdrive(path)
filepath = path.split(os.path.sep)
url = '/'.join([urllib.quote(part) for part in filepath])
if not drive:
url = url.lstrip('/')
return 'file:///' + drive + url
def is_archive_file(name):
"""Return True if `name` is a considered as an archive file."""
archives = ('.zip', '.tar.gz', '.tar.bz2', '.tgz', '.tar', '.pybundle',
'.whl')
ext = splitext(name)[1].lower()
if ext in archives:
return True
return False
def unpack_vcs_link(link, location, only_download=False):
vcs_backend = _get_used_vcs_backend(link)
if only_download:
vcs_backend.export(location)
else:
vcs_backend.unpack(location)
def _get_used_vcs_backend(link):
for backend in vcs.backends:
if link.scheme in backend.schemes:
vcs_backend = backend(link.url)
return vcs_backend
def is_vcs_url(link):
return bool(_get_used_vcs_backend(link))
def is_file_url(link):
return link.url.lower().startswith('file:')
def _check_hash(download_hash, link):
if download_hash.digest_size != hashlib.new(link.hash_name).digest_size:
logger.fatal("Hash digest size of the package %d (%s) doesn't match the expected hash name %s!"
% (download_hash.digest_size, link, link.hash_name))
raise HashMismatch('Hash name mismatch for package %s' % link)
if download_hash.hexdigest() != link.hash:
logger.fatal("Hash of the package %s (%s) doesn't match the expected hash %s!"
% (link, download_hash.hexdigest(), link.hash))
raise HashMismatch('Bad %s hash for package %s' % (link.hash_name, link))
def _get_hash_from_file(target_file, link):
try:
download_hash = hashlib.new(link.hash_name)
except (ValueError, TypeError):
logger.warn("Unsupported hash name %s for package %s" % (link.hash_name, link))
return None
fp = open(target_file, 'rb')
while True:
chunk = fp.read(4096)
if not chunk:
break
download_hash.update(chunk)
fp.close()
return download_hash
def _download_url(resp, link, temp_location):
fp = open(temp_location, 'wb')
download_hash = None
if link.hash and link.hash_name:
try:
download_hash = hashlib.new(link.hash_name)
except ValueError:
logger.warn("Unsupported hash name %s for package %s" % (link.hash_name, link))
try:
total_length = int(resp.headers['content-length'])
except (ValueError, KeyError, TypeError):
total_length = 0
downloaded = 0
show_progress = total_length > 40 * 1000 or not total_length
show_url = link.show_url
try:
if show_progress:
## FIXME: the URL can get really long in this message:
if total_length:
logger.start_progress('Downloading %s (%s): ' % (show_url, format_size(total_length)))
else:
logger.start_progress('Downloading %s (unknown size): ' % show_url)
else:
logger.notify('Downloading %s' % show_url)
logger.info('Downloading from URL %s' % link)
def resp_read(chunk_size):
try:
# Special case for urllib3.
try:
for chunk in resp.raw.stream(
chunk_size, decode_content=False):
yield chunk
except IncompleteRead as e:
raise ChunkedEncodingError(e)
except AttributeError:
# Standard file-like object.
while True:
chunk = resp.raw.read(chunk_size)
if not chunk:
break
yield chunk
for chunk in resp_read(4096):
downloaded += len(chunk)
if show_progress:
if not total_length:
logger.show_progress('%s' % format_size(downloaded))
else:
logger.show_progress('%3i%% %s' % (100 * downloaded / total_length, format_size(downloaded)))
if download_hash is not None:
download_hash.update(chunk)
fp.write(chunk)
fp.close()
finally:
if show_progress:
logger.end_progress('%s downloaded' % format_size(downloaded))
return download_hash
def _copy_file(filename, location, content_type, link):
copy = True
download_location = os.path.join(location, link.filename)
if os.path.exists(download_location):
response = ask_path_exists(
'The file %s exists. (i)gnore, (w)ipe, (b)ackup ' %
display_path(download_location), ('i', 'w', 'b'))
if response == 'i':
copy = False
elif response == 'w':
logger.warn('Deleting %s' % display_path(download_location))
os.remove(download_location)
elif response == 'b':
dest_file = backup_dir(download_location)
logger.warn('Backing up %s to %s'
% (display_path(download_location), display_path(dest_file)))
shutil.move(download_location, dest_file)
if copy:
shutil.copy(filename, download_location)
logger.notify('Saved %s' % display_path(download_location))
def unpack_http_url(link, location, download_cache, download_dir=None,
session=None):
if session is None:
session = PipSession()
temp_dir = tempfile.mkdtemp('-unpack', 'pip-')
temp_location = None
target_url = link.url.split('#', 1)[0]
already_cached = False
cache_file = None
cache_content_type_file = None
download_hash = None
# If a download cache is specified, is the file cached there?
if download_cache:
cache_file = os.path.join(download_cache,
urllib.quote(target_url, ''))
cache_content_type_file = cache_file + '.content-type'
already_cached = (
os.path.exists(cache_file) and
os.path.exists(cache_content_type_file)
)
if not os.path.isdir(download_cache):
create_download_cache_folder(download_cache)
# If a download dir is specified, is the file already downloaded there?
already_downloaded = None
if download_dir:
already_downloaded = os.path.join(download_dir, link.filename)
if not os.path.exists(already_downloaded):
already_downloaded = None
# If already downloaded, does it's hash match?
if already_downloaded:
temp_location = already_downloaded
content_type = mimetypes.guess_type(already_downloaded)[0]
logger.notify('File was already downloaded %s' % already_downloaded)
if link.hash:
download_hash = _get_hash_from_file(temp_location, link)
try:
_check_hash(download_hash, link)
except HashMismatch:
logger.warn(
'Previously-downloaded file %s has bad hash, '
're-downloading.' % temp_location
)
temp_location = None
os.unlink(already_downloaded)
already_downloaded = None
# If not a valid download, let's confirm the cached file is valid
if already_cached and not temp_location:
with open(cache_content_type_file) as fp:
content_type = fp.read().strip()
temp_location = cache_file
logger.notify('Using download cache from %s' % cache_file)
if link.hash and link.hash_name:
download_hash = _get_hash_from_file(cache_file, link)
try:
_check_hash(download_hash, link)
except HashMismatch:
logger.warn(
'Cached file %s has bad hash, '
're-downloading.' % temp_location
)
temp_location = None
os.unlink(cache_file)
os.unlink(cache_content_type_file)
already_cached = False
# We don't have either a cached or a downloaded copy
# let's download to a tmp dir
if not temp_location:
try:
resp = session.get(target_url, stream=True)
resp.raise_for_status()
except requests.HTTPError as exc:
logger.fatal("HTTP error %s while getting %s" %
(exc.response.status_code, link))
raise
content_type = resp.headers.get('content-type', '')
filename = link.filename # fallback
# Have a look at the Content-Disposition header for a better guess
content_disposition = resp.headers.get('content-disposition')
if content_disposition:
type, params = cgi.parse_header(content_disposition)
# We use ``or`` here because we don't want to use an "empty" value
# from the filename param.
filename = params.get('filename') or filename
ext = splitext(filename)[1]
if not ext:
ext = mimetypes.guess_extension(content_type)
if ext:
filename += ext
if not ext and link.url != resp.url:
ext = os.path.splitext(resp.url)[1]
if ext:
filename += ext
temp_location = os.path.join(temp_dir, filename)
download_hash = _download_url(resp, link, temp_location)
if link.hash and link.hash_name:
_check_hash(download_hash, link)
# a download dir is specified; let's copy the archive there
if download_dir and not already_downloaded:
_copy_file(temp_location, download_dir, content_type, link)
# unpack the archive to the build dir location. even when only downloading
# archives, they have to be unpacked to parse dependencies
unpack_file(temp_location, location, content_type, link)
# if using a download cache, cache it, if needed
if cache_file and not already_cached:
cache_download(cache_file, temp_location, content_type)
if not (already_cached or already_downloaded):
os.unlink(temp_location)
os.rmdir(temp_dir)
def unpack_file_url(link, location, download_dir=None):
link_path = url_to_path(link.url_without_fragment)
already_downloaded = False
# If it's a url to a local directory
if os.path.isdir(link_path):
if os.path.isdir(location):
rmtree(location)
shutil.copytree(link_path, location, symlinks=True)
return
# if link has a hash, let's confirm it matches
if link.hash:
link_path_hash = _get_hash_from_file(link_path, link)
_check_hash(link_path_hash, link)
# If a download dir is specified, is the file already there and valid?
if download_dir:
download_path = os.path.join(download_dir, link.filename)
if os.path.exists(download_path):
content_type = mimetypes.guess_type(download_path)[0]
logger.notify('File was already downloaded %s' % download_path)
if link.hash:
download_hash = _get_hash_from_file(download_path, link)
try:
_check_hash(download_hash, link)
already_downloaded = True
except HashMismatch:
logger.warn(
'Previously-downloaded file %s has bad hash, '
're-downloading.' % link_path
)
os.unlink(download_path)
else:
already_downloaded = True
if already_downloaded:
from_path = download_path
else:
from_path = link_path
content_type = mimetypes.guess_type(from_path)[0]
# unpack the archive to the build dir location. even when only downloading
# archives, they have to be unpacked to parse dependencies
unpack_file(from_path, location, content_type, link)
# a download dir is specified and not already downloaded
if download_dir and not already_downloaded:
_copy_file(from_path, download_dir, content_type, link)
| mit |
thiriel/maps | django/db/backends/postgresql/operations.py | 229 | 9420 | import re
from django.db.backends import BaseDatabaseOperations
# This DatabaseOperations class lives in here instead of base.py because it's
# used by both the 'postgresql' and 'postgresql_psycopg2' backends.
class DatabaseOperations(BaseDatabaseOperations):
def __init__(self, connection):
super(DatabaseOperations, self).__init__()
self._postgres_version = None
self.connection = connection
def _get_postgres_version(self):
if self._postgres_version is None:
from django.db.backends.postgresql.version import get_version
cursor = self.connection.cursor()
self._postgres_version = get_version(cursor)
return self._postgres_version
postgres_version = property(_get_postgres_version)
def date_extract_sql(self, lookup_type, field_name):
# http://www.postgresql.org/docs/8.0/static/functions-datetime.html#FUNCTIONS-DATETIME-EXTRACT
if lookup_type == 'week_day':
# For consistency across backends, we return Sunday=1, Saturday=7.
return "EXTRACT('dow' FROM %s) + 1" % field_name
else:
return "EXTRACT('%s' FROM %s)" % (lookup_type, field_name)
def date_interval_sql(self, sql, connector, timedelta):
"""
implements the interval functionality for expressions
format for Postgres:
(datefield + interval '3 days 200 seconds 5 microseconds')
"""
modifiers = []
if timedelta.days:
modifiers.append(u'%s days' % timedelta.days)
if timedelta.seconds:
modifiers.append(u'%s seconds' % timedelta.seconds)
if timedelta.microseconds:
modifiers.append(u'%s microseconds' % timedelta.microseconds)
mods = u' '.join(modifiers)
conn = u' %s ' % connector
return u'(%s)' % conn.join([sql, u'interval \'%s\'' % mods])
def date_trunc_sql(self, lookup_type, field_name):
# http://www.postgresql.org/docs/8.0/static/functions-datetime.html#FUNCTIONS-DATETIME-TRUNC
return "DATE_TRUNC('%s', %s)" % (lookup_type, field_name)
def deferrable_sql(self):
return " DEFERRABLE INITIALLY DEFERRED"
def lookup_cast(self, lookup_type):
lookup = '%s'
# Cast text lookups to text to allow things like filter(x__contains=4)
if lookup_type in ('iexact', 'contains', 'icontains', 'startswith',
'istartswith', 'endswith', 'iendswith'):
lookup = "%s::text"
# Use UPPER(x) for case-insensitive lookups; it's faster.
if lookup_type in ('iexact', 'icontains', 'istartswith', 'iendswith'):
lookup = 'UPPER(%s)' % lookup
return lookup
def field_cast_sql(self, db_type):
if db_type == 'inet':
return 'HOST(%s)'
return '%s'
def last_insert_id(self, cursor, table_name, pk_name):
# Use pg_get_serial_sequence to get the underlying sequence name
# from the table name and column name (available since PostgreSQL 8)
cursor.execute("SELECT CURRVAL(pg_get_serial_sequence('%s','%s'))" % (
self.quote_name(table_name), pk_name))
return cursor.fetchone()[0]
def no_limit_value(self):
return None
def quote_name(self, name):
if name.startswith('"') and name.endswith('"'):
return name # Quoting once is enough.
return '"%s"' % name
def sql_flush(self, style, tables, sequences):
if tables:
if self.postgres_version[0:2] >= (8,1):
# Postgres 8.1+ can do 'TRUNCATE x, y, z...;'. In fact, it *has to*
# in order to be able to truncate tables referenced by a foreign
# key in any other table. The result is a single SQL TRUNCATE
# statement.
sql = ['%s %s;' % \
(style.SQL_KEYWORD('TRUNCATE'),
style.SQL_FIELD(', '.join([self.quote_name(table) for table in tables]))
)]
else:
# Older versions of Postgres can't do TRUNCATE in a single call, so
# they must use a simple delete.
sql = ['%s %s %s;' % \
(style.SQL_KEYWORD('DELETE'),
style.SQL_KEYWORD('FROM'),
style.SQL_FIELD(self.quote_name(table))
) for table in tables]
# 'ALTER SEQUENCE sequence_name RESTART WITH 1;'... style SQL statements
# to reset sequence indices
for sequence_info in sequences:
table_name = sequence_info['table']
column_name = sequence_info['column']
if not (column_name and len(column_name) > 0):
# This will be the case if it's an m2m using an autogenerated
# intermediate table (see BaseDatabaseIntrospection.sequence_list)
column_name = 'id'
sql.append("%s setval(pg_get_serial_sequence('%s','%s'), 1, false);" % \
(style.SQL_KEYWORD('SELECT'),
style.SQL_TABLE(self.quote_name(table_name)),
style.SQL_FIELD(column_name))
)
return sql
else:
return []
def sequence_reset_sql(self, style, model_list):
from django.db import models
output = []
qn = self.quote_name
for model in model_list:
# Use `coalesce` to set the sequence for each model to the max pk value if there are records,
# or 1 if there are none. Set the `is_called` property (the third argument to `setval`) to true
# if there are records (as the max pk value is already in use), otherwise set it to false.
# Use pg_get_serial_sequence to get the underlying sequence name from the table name
# and column name (available since PostgreSQL 8)
for f in model._meta.local_fields:
if isinstance(f, models.AutoField):
output.append("%s setval(pg_get_serial_sequence('%s','%s'), coalesce(max(%s), 1), max(%s) %s null) %s %s;" % \
(style.SQL_KEYWORD('SELECT'),
style.SQL_TABLE(qn(model._meta.db_table)),
style.SQL_FIELD(f.column),
style.SQL_FIELD(qn(f.column)),
style.SQL_FIELD(qn(f.column)),
style.SQL_KEYWORD('IS NOT'),
style.SQL_KEYWORD('FROM'),
style.SQL_TABLE(qn(model._meta.db_table))))
break # Only one AutoField is allowed per model, so don't bother continuing.
for f in model._meta.many_to_many:
if not f.rel.through:
output.append("%s setval(pg_get_serial_sequence('%s','%s'), coalesce(max(%s), 1), max(%s) %s null) %s %s;" % \
(style.SQL_KEYWORD('SELECT'),
style.SQL_TABLE(qn(f.m2m_db_table())),
style.SQL_FIELD('id'),
style.SQL_FIELD(qn('id')),
style.SQL_FIELD(qn('id')),
style.SQL_KEYWORD('IS NOT'),
style.SQL_KEYWORD('FROM'),
style.SQL_TABLE(qn(f.m2m_db_table()))))
return output
def savepoint_create_sql(self, sid):
return "SAVEPOINT %s" % sid
def savepoint_commit_sql(self, sid):
return "RELEASE SAVEPOINT %s" % sid
def savepoint_rollback_sql(self, sid):
return "ROLLBACK TO SAVEPOINT %s" % sid
def prep_for_iexact_query(self, x):
return x
def check_aggregate_support(self, aggregate):
"""Check that the backend fully supports the provided aggregate.
The population and sample statistics (STDDEV_POP, STDDEV_SAMP,
VAR_POP, VAR_SAMP) were first implemented in Postgres 8.2.
The implementation of population statistics (STDDEV_POP and VAR_POP)
under Postgres 8.2 - 8.2.4 is known to be faulty. Raise
NotImplementedError if this is the database in use.
"""
if aggregate.sql_function in ('STDDEV_POP', 'STDDEV_SAMP', 'VAR_POP', 'VAR_SAMP'):
if self.postgres_version[0:2] < (8,2):
raise NotImplementedError('PostgreSQL does not support %s prior to version 8.2. Please upgrade your version of PostgreSQL.' % aggregate.sql_function)
if aggregate.sql_function in ('STDDEV_POP', 'VAR_POP'):
if self.postgres_version[0:2] == (8,2):
if self.postgres_version[2] is None or self.postgres_version[2] <= 4:
raise NotImplementedError('PostgreSQL 8.2 to 8.2.4 is known to have a faulty implementation of %s. Please upgrade your version of PostgreSQL.' % aggregate.sql_function)
def max_name_length(self):
"""
Returns the maximum length of an identifier.
Note that the maximum length of an identifier is 63 by default, but can
be changed by recompiling PostgreSQL after editing the NAMEDATALEN
macro in src/include/pg_config_manual.h .
This implementation simply returns 63, but can easily be overridden by a
custom database backend that inherits most of its behavior from this one.
"""
return 63
| bsd-3-clause |
akirk/youtube-dl | test/test_http.py | 115 | 4170 | #!/usr/bin/env python
from __future__ import unicode_literals
# Allow direct execution
import os
import sys
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from youtube_dl import YoutubeDL
from youtube_dl.compat import compat_http_server, compat_urllib_request
import ssl
import threading
TEST_DIR = os.path.dirname(os.path.abspath(__file__))
class HTTPTestRequestHandler(compat_http_server.BaseHTTPRequestHandler):
def log_message(self, format, *args):
pass
def do_GET(self):
if self.path == '/video.html':
self.send_response(200)
self.send_header('Content-Type', 'text/html; charset=utf-8')
self.end_headers()
self.wfile.write(b'<html><video src="/vid.mp4" /></html>')
elif self.path == '/vid.mp4':
self.send_response(200)
self.send_header('Content-Type', 'video/mp4')
self.end_headers()
self.wfile.write(b'\x00\x00\x00\x00\x20\x66\x74[video]')
else:
assert False
class FakeLogger(object):
def debug(self, msg):
pass
def warning(self, msg):
pass
def error(self, msg):
pass
class TestHTTP(unittest.TestCase):
def setUp(self):
certfn = os.path.join(TEST_DIR, 'testcert.pem')
self.httpd = compat_http_server.HTTPServer(
('localhost', 0), HTTPTestRequestHandler)
self.httpd.socket = ssl.wrap_socket(
self.httpd.socket, certfile=certfn, server_side=True)
self.port = self.httpd.socket.getsockname()[1]
self.server_thread = threading.Thread(target=self.httpd.serve_forever)
self.server_thread.daemon = True
self.server_thread.start()
def test_nocheckcertificate(self):
if sys.version_info >= (2, 7, 9): # No certificate checking anyways
ydl = YoutubeDL({'logger': FakeLogger()})
self.assertRaises(
Exception,
ydl.extract_info, 'https://localhost:%d/video.html' % self.port)
ydl = YoutubeDL({'logger': FakeLogger(), 'nocheckcertificate': True})
r = ydl.extract_info('https://localhost:%d/video.html' % self.port)
self.assertEqual(r['url'], 'https://localhost:%d/vid.mp4' % self.port)
def _build_proxy_handler(name):
class HTTPTestRequestHandler(compat_http_server.BaseHTTPRequestHandler):
proxy_name = name
def log_message(self, format, *args):
pass
def do_GET(self):
self.send_response(200)
self.send_header('Content-Type', 'text/plain; charset=utf-8')
self.end_headers()
self.wfile.write('{self.proxy_name}: {self.path}'.format(self=self).encode('utf-8'))
return HTTPTestRequestHandler
class TestProxy(unittest.TestCase):
def setUp(self):
self.proxy = compat_http_server.HTTPServer(
('localhost', 0), _build_proxy_handler('normal'))
self.port = self.proxy.socket.getsockname()[1]
self.proxy_thread = threading.Thread(target=self.proxy.serve_forever)
self.proxy_thread.daemon = True
self.proxy_thread.start()
self.cn_proxy = compat_http_server.HTTPServer(
('localhost', 0), _build_proxy_handler('cn'))
self.cn_port = self.cn_proxy.socket.getsockname()[1]
self.cn_proxy_thread = threading.Thread(target=self.cn_proxy.serve_forever)
self.cn_proxy_thread.daemon = True
self.cn_proxy_thread.start()
def test_proxy(self):
cn_proxy = 'localhost:{0}'.format(self.cn_port)
ydl = YoutubeDL({
'proxy': 'localhost:{0}'.format(self.port),
'cn_verification_proxy': cn_proxy,
})
url = 'http://foo.com/bar'
response = ydl.urlopen(url).read().decode('utf-8')
self.assertEqual(response, 'normal: {0}'.format(url))
req = compat_urllib_request.Request(url)
req.add_header('Ytdl-request-proxy', cn_proxy)
response = ydl.urlopen(req).read().decode('utf-8')
self.assertEqual(response, 'cn: {0}'.format(url))
if __name__ == '__main__':
unittest.main()
| unlicense |
bobrock/eden | modules/unit_tests/s3/s3hierarchy.py | 14 | 78245 | # -*- coding: utf-8 -*-
#
# S3Hierarchy Unit Tests
#
# To run this script use:
# python web2py.py -S eden -M -R applications/eden/modules/unit_tests/s3/s3hierarchy.py
#
import unittest
from lxml import etree
from s3dal import Field, Query
from s3.s3utils import *
from s3.s3rest import s3_request
from s3 import FS, S3Hierarchy, S3HierarchyFilter, s3_meta_fields
# =============================================================================
class S3HierarchyTests(unittest.TestCase):
""" Tests for standard hierarchies """
# -------------------------------------------------------------------------
@classmethod
def setUpClass(cls):
s3db = current.s3db
s3db.define_table("test_hierarchy",
Field("name"),
Field("category"),
Field("type"),
Field("parent", "reference test_hierarchy"),
*s3_meta_fields())
s3db.define_table("test_hierarchy_reference",
Field("test_hierarchy_id", "reference test_hierarchy",
ondelete = "RESTRICT",
),
Field("test_hierarchy_multi_id", "list:reference test_hierarchy"),
*s3_meta_fields())
xmlstr = """
<s3xml>
<resource name="test_hierarchy" uuid="HIERARCHY1">
<data field="name">Type 1</data>
<data field="category">Cat 0</data>
<data field="type">A</data>
</resource>
<resource name="test_hierarchy" uuid="HIERARCHY1-1">
<data field="name">Type 1-1</data>
<data field="category">Cat 1</data>
<data field="type">C</data>
<reference field="parent" resource="test_hierarchy" uuid="HIERARCHY1"/>
</resource>
<resource name="test_hierarchy" uuid="HIERARCHY1-1-1">
<data field="name">Type 1-1-1</data>
<data field="category">Cat 2</data>
<data field="type">B</data>
<reference field="parent" resource="test_hierarchy" uuid="HIERARCHY1-1"/>
</resource>
<resource name="test_hierarchy" uuid="HIERARCHY1-1-2">
<data field="name">Type 1-1-2</data>
<data field="category">Cat 2</data>
<data field="type">A</data>
<reference field="parent" resource="test_hierarchy" uuid="HIERARCHY1-1"/>
</resource>
<resource name="test_hierarchy" uuid="HIERARCHY1-2">
<data field="name">Type 1-2</data>
<data field="category">Cat 1</data>
<data field="type">B</data>
<reference field="parent" resource="test_hierarchy" uuid="HIERARCHY1"/>
</resource>
<resource name="test_hierarchy" uuid="HIERARCHY1-2-1">
<data field="name">Type 1-2-1</data>
<data field="category">Cat 2</data>
<data field="type">B</data>
<reference field="parent" resource="test_hierarchy" uuid="HIERARCHY1-2"/>
</resource>
<resource name="test_hierarchy" uuid="HIERARCHY1-2-2">
<data field="name">Type 1-2-2</data>
<data field="category">Cat 2</data>
<data field="type">C</data>
<reference field="parent" resource="test_hierarchy" uuid="HIERARCHY1-2"/>
</resource>
<resource name="test_hierarchy" uuid="HIERARCHY2">
<data field="name">Type 2</data>
<data field="category">Cat 0</data>
<data field="type">B</data>
</resource>
<resource name="test_hierarchy" uuid="HIERARCHY2-1">
<data field="name">Type 2-1</data>
<data field="category">Cat 1</data>
<data field="type">A</data>
<reference field="parent" resource="test_hierarchy" uuid="HIERARCHY2"/>
</resource>
<resource name="test_hierarchy" uuid="HIERARCHY2-1-1">
<data field="name">Type 2-1-1</data>
<data field="category">Cat 2</data>
<data field="type">C</data>
<reference field="parent" resource="test_hierarchy" uuid="HIERARCHY2-1"/>
</resource>
<resource name="test_hierarchy" uuid="HIERARCHY2-1-2">
<data field="name">Type 2-1-2</data>
<data field="category">Cat 2</data>
<data field="type">D</data>
<reference field="parent" resource="test_hierarchy" uuid="HIERARCHY2-1"/>
</resource>
</s3xml>
"""
xmltree = etree.ElementTree(etree.fromstring(xmlstr))
current.auth.override = True
resource = s3db.resource("test_hierarchy")
resource.import_xml(xmltree)
current.db.commit()
# -------------------------------------------------------------------------
@classmethod
def tearDownClass(cls):
db = current.db
db.test_hierarchy_reference.drop()
db.test_hierarchy.drop(mode="cascade")
current.db.commit()
# -------------------------------------------------------------------------
def setUp(self):
current.auth.override = True
db = current.db
if not hasattr(self, "rows"):
table = db.test_hierarchy
rows = db((table.id>0) & (table.deleted != True)).select()
self.rows = {}
self.uids = {}
self.ids = {}
for row in rows:
uid = row.uuid
self.rows[uid] = row
self.uids[uid] = row.id
self.ids[row.id] = uid
current.s3db.configure("test_hierarchy",
hierarchy=("parent", "category"))
# -------------------------------------------------------------------------
def tearDown(self):
current.auth.override = False
# -------------------------------------------------------------------------
def testHierarchyConstruction(self):
""" Test hierarchy construction """
uids = self.uids
assertEqual = self.assertEqual
assertTrue = self.assertTrue
h = S3Hierarchy("test_hierarchy")
roots = h.roots
assertEqual(len(roots), 2)
assertTrue(uids["HIERARCHY1"] in roots)
assertTrue(uids["HIERARCHY2"] in roots)
nodes = h.nodes
assertEqual(len(nodes), len(uids))
assertTrue(all(node_id in nodes for node_id in uids.values()))
# -------------------------------------------------------------------------
def testPreprocessCreateNode(self):
""" Test preprocessing of a create-node request """
r = s3_request("test", "hierarchy", http="POST")
parent_node = self.rows["HIERARCHY1"]
parent_id = parent_node.id
h = S3Hierarchy("test_hierarchy")
link = h.preprocess_create_node(r, parent_id)
self.assertEqual(link, None)
assertEqual = self.assertEqual
post_vars = r.post_vars
assertEqual(post_vars["parent"], parent_id)
field = r.table.parent
assertEqual(field.default, parent_id)
assertEqual(field.update, parent_id)
self.assertFalse(field.readable)
self.assertFalse(field.writable)
# -------------------------------------------------------------------------
def testDeleteBranch(self):
""" Test recursive deletion of a hierarchy branch """
# Add additional nodes
xmlstr = """
<s3xml>
<resource name="test_hierarchy" uuid="HIERARCHY1-3">
<data field="name">Type 1-3</data>
<reference field="parent" resource="test_hierarchy" uuid="HIERARCHY1"/>
</resource>
<resource name="test_hierarchy" uuid="HIERARCHY1-3-1">
<data field="name">Type 1-3-1</data>
<reference field="parent" resource="test_hierarchy" uuid="HIERARCHY1-3"/>
</resource>
<resource name="test_hierarchy" uuid="HIERARCHY1-3-2">
<data field="name">Type 1-3-2</data>
<reference field="parent" resource="test_hierarchy" uuid="HIERARCHY1-3"/>
</resource>
</s3xml>
"""
xmltree = etree.ElementTree(etree.fromstring(xmlstr))
resource = current.s3db.resource("test_hierarchy")
resource.import_xml(xmltree)
# Commit here, otherwise failing deletion will roll back the import, too
db = current.db
db.commit()
assertTrue = self.assertTrue
assertFalse = self.assertFalse
assertEqual = self.assertEqual
table = db.test_hierarchy
try:
# Capture the uuids
rows = db(table.uuid.like("HIERARCHY1-3%")).select()
uids = {}
for row in rows:
assertFalse(row.deleted)
uids[row.uuid] = row.id
# Mark as dirty after import
h = S3Hierarchy("test_hierarchy")
h.dirty("test_hierarchy")
# Verify that branch node has been added to the hierarchy
branches = h.children(self.uids["HIERARCHY1"])
assertTrue(uids["HIERARCHY1-3"] in branches)
# Verify that children have been added, too
children = h.children(uids["HIERARCHY1-3"])
assertEqual(len(children), 2)
# Delete the branch
success = h.delete([uids["HIERARCHY1-3"]])
assertEqual(success, 3)
# Verify that branch has been deleted
branches = h.children(self.uids["HIERARCHY1"])
assertFalse(uids["HIERARCHY1-3"] in branches)
# Child nodes must be gone as well
nodes = h.nodes
assertTrue(all(uids[uid] not in nodes for uid in uids))
# Verify that the nodes are deleted from database too
rows = db(table.uuid.like("HIERARCHY1-3%")).select()
for row in rows:
assertTrue(row.deleted)
uids[row.uuid] = row.id
finally:
# Cleanup
db(table.uuid.like("HIERARCHY1-3%")).delete()
# -------------------------------------------------------------------------
def testDeleteBranchFailure(self):
"""
Test proper handling of deletion cascade failure due to
db integrity constraints
"""
# Add additional nodes
xmlstr = """
<s3xml>
<resource name="test_hierarchy" uuid="HIERARCHY1-4">
<data field="name">Type 1-4</data>
<reference field="parent" resource="test_hierarchy" uuid="HIERARCHY1"/>
</resource>
<resource name="test_hierarchy" uuid="HIERARCHY1-4-1">
<data field="name">Type 1-4-1</data>
<reference field="parent" resource="test_hierarchy" uuid="HIERARCHY1-4"/>
</resource>
<resource name="test_hierarchy" uuid="HIERARCHY1-4-2">
<data field="name">Type 1-4-2</data>
<reference field="parent" resource="test_hierarchy" uuid="HIERARCHY1-4"/>
</resource>
<resource name="test_hierarchy_reference" uuid="REF1">
<reference field="test_hierarchy_id" uuid="HIERARCHY1-4-1"/>
</resource>
</s3xml>
"""
xmltree = etree.ElementTree(etree.fromstring(xmlstr))
db = current.db
s3db = current.s3db
resource = s3db.resource("test_hierarchy")
resource.import_xml(xmltree)
resource = s3db.resource("test_hierarchy_reference")
resource.import_xml(xmltree)
# Commit here, otherwise failing deletion will roll back the import, too
db.commit()
assertTrue = self.assertTrue
assertFalse = self.assertFalse
assertEqual = self.assertEqual
table = db.test_hierarchy
try:
# Capture the uuids
rows = db(table.uuid.like("HIERARCHY1-4%")).select()
uids = {}
for row in rows:
assertFalse(row.deleted)
uids[row.uuid] = row.id
# Mark as dirty after import
h = S3Hierarchy("test_hierarchy")
h.dirty("test_hierarchy")
# Verify that branch node has been added to the hierarchy
branches = h.children(self.uids["HIERARCHY1"])
assertTrue(uids["HIERARCHY1-4"] in branches)
# Verify that children have been added, too
children = h.children(uids["HIERARCHY1-4"])
assertEqual(len(children), 2)
# Try delete the branch => should fail
success = h.delete([uids["HIERARCHY1-4"]])
assertEqual(success, None)
# Verify that branch has not been deleted
branches = h.children(self.uids["HIERARCHY1"])
assertTrue(uids["HIERARCHY1-4"] in branches)
# Child nodes must still be in the hierarchy
nodes = h.nodes
assertTrue(all(uids[uid] in nodes for uid in uids))
# Verify that the nodes are not deleted from database either
rows = db(table.uuid.like("HIERARCHY1-4%")).select()
for row in rows:
assertFalse(row.deleted)
# Remove the blocker
db(db.test_hierarchy_reference.uuid == "REF1").delete()
# Try again to delete the branch => should succeed now
success = h.delete([uids["HIERARCHY1-4"]])
assertEqual(success, 3)
# Verify that branch has been deleted
branches = h.children(self.uids["HIERARCHY1"])
assertFalse(uids["HIERARCHY1-4"] in branches)
# Child nodes must be gone as well
nodes = h.nodes
assertTrue(all(uids[uid] not in nodes for uid in uids))
# Verify that the nodes are deleted from database too
rows = db(table.uuid.like("HIERARCHY1-4%")).select()
for row in rows:
assertTrue(row.deleted)
uids[row.uuid] = row.id
finally:
# Cleanup
db(table.uuid.like("HIERARCHY1-4%")).delete()
# -------------------------------------------------------------------------
def testCategory(self):
""" Test node category lookup """
uids = self.uids
rows = self.rows
assertEqual = self.assertEqual
h = S3Hierarchy("test_hierarchy")
for uid in uids:
category = h.category(uids[uid])
assertEqual(category, rows[uid].category)
# -------------------------------------------------------------------------
def testParent(self):
""" Test parent lookup """
ids = self.ids
uids = self.uids
rows = self.rows
assertEqual = self.assertEqual
h = S3Hierarchy("test_hierarchy")
for uid in uids:
parent, category = h.parent(uids[uid], classify=True)
assertEqual(parent, rows[uid].parent)
if parent:
parent_uid = ids[parent]
assertEqual(category, rows[parent_uid].category)
# -------------------------------------------------------------------------
def testChildren(self):
""" Test child node lookup """
uids = self.uids
rows = self.rows
assertEqual = self.assertEqual
h = S3Hierarchy("test_hierarchy")
for uid in uids:
assertEqual(h.children(uids[uid]),
set(row.id for row in rows.values()
if row.parent == uids[uid]))
# -------------------------------------------------------------------------
def testPath(self):
""" Test node path lookup """
uids = self.uids
rows = self.rows
assertEqual = self.assertEqual
# Standard path from root
node = uids["HIERARCHY2-1-2"]
h = S3Hierarchy("test_hierarchy")
path = h.path(node)
assertEqual(path, [uids["HIERARCHY2"],
uids["HIERARCHY2-1"],
uids["HIERARCHY2-1-2"]
])
# Path from category root
node = uids["HIERARCHY1-1-1"]
path = h.path(node, category="Cat 1", classify=True)
classified = lambda uid: (uids[uid], rows[uid].category)
assertEqual(path, [classified("HIERARCHY1-1"),
classified("HIERARCHY1-1-1"),
])
# Path of root
node = uids["HIERARCHY2"]
path = h.path(node, category="Cat 1", classify=True)
classified = lambda uid: (uids[uid], rows[uid].category)
assertEqual(path, [classified("HIERARCHY2")])
# -------------------------------------------------------------------------
def testRoot(self):
""" Test root node lookup """
uids = self.uids
rows = self.rows
assertEqual = self.assertEqual
# Top root
node = uids["HIERARCHY1-1-1"]
h = S3Hierarchy("test_hierarchy")
root = h.root(node)
assertEqual(root, uids["HIERARCHY1"])
# Root by category
node = uids["HIERARCHY2-1"]
root = h.root(node, classify=True)
assertEqual(root, (uids["HIERARCHY2"], rows["HIERARCHY2"].category))
# Root of root
node = uids["HIERARCHY1"]
root = h.root(node)
assertEqual(root, uids["HIERARCHY1"])
# None
root = h.root(None)
assertEqual(root, None)
# -------------------------------------------------------------------------
def testDepth(self):
""" Test determination of the maximum depth beneath a node """
uids = self.uids
rows = self.rows
assertEqual = self.assertEqual
h = S3Hierarchy("test_hierarchy")
# Top root
node = uids["HIERARCHY1"]
assertEqual(h.depth(node), 2)
# Sub-node
node = uids["HIERARCHY2-1"]
assertEqual(h.depth(node), 1)
# Leaf
node = uids["HIERARCHY1-1-1"]
assertEqual(h.depth(node), 0)
# None (processes all roots)
assertEqual(h.depth(None), 2)
# -------------------------------------------------------------------------
def testSiblings(self):
""" Test lookup of sibling nodes """
uids = self.uids
rows = self.rows
assertEqual = self.assertEqual
h = S3Hierarchy("test_hierarchy")
for uid in uids:
parent = rows[uid].parent
siblings = set(row.id for row in rows.values()
if row.parent == parent)
assertEqual(h.siblings(uids[uid], inclusive=True), siblings)
siblings.discard(uids[uid])
assertEqual(h.siblings(uids[uid], inclusive=False), siblings)
# -------------------------------------------------------------------------
def testFindAll(self):
""" Test lookup of descendant nodes """
uids = self.uids
h = S3Hierarchy("test_hierarchy")
assertEqual = self.assertEqual
root = uids["HIERARCHY1"]
nodes = h.findall(root)
expected = ["HIERARCHY1-1",
"HIERARCHY1-1-1",
"HIERARCHY1-1-2",
"HIERARCHY1-2",
"HIERARCHY1-2-1",
"HIERARCHY1-2-2",
]
assertEqual(nodes, set(uids[uid] for uid in expected))
root = uids["HIERARCHY1"]
nodes = h.findall(root, inclusive=True)
expected = ["HIERARCHY1",
"HIERARCHY1-1",
"HIERARCHY1-1-1",
"HIERARCHY1-1-2",
"HIERARCHY1-2",
"HIERARCHY1-2-1",
"HIERARCHY1-2-2",
]
assertEqual(nodes, set(uids[uid] for uid in expected))
root = uids["HIERARCHY2"]
nodes = h.findall(root, category="Cat 1")
expected = ["HIERARCHY2-1",
]
assertEqual(nodes, set(uids[uid] for uid in expected))
root = uids["HIERARCHY1"]
nodes = h.findall(root, category="Cat 4")
assertEqual(nodes, set())
# -------------------------------------------------------------------------
def testExportNode(self):
""" Test export of nodes """
assertEqual = self.assertEqual
assertTrue = self.assertTrue
assertFalse = self.assertFalse
h = S3Hierarchy("test_hierarchy")
data = dict((self.uids[uid], self.rows[uid]) for uid in self.uids)
# Export the rows beneath node HIERARCHY1
root = self.uids["HIERARCHY1"]
output = h.export_node(root,
depth=2,
prefix="_export",
data=data,
hcol = "test_hierarchy.name",
columns=["test_hierarchy.category"],
)
# Should give 7 rows
assertEqual(len(output), 7)
for row in output:
next_level = True
for i in xrange(2):
hcol = "_export.%s" % i
# All hierarchy columns must be present
assertTrue(hcol in row)
label = row[hcol]
# The row should belong to this branch
if label != "" and next_level:
assertEqual(label[:6], "Type 1")
else:
# Levels below the last level must be empty
next_level = False
assertEqual(label, "")
assertTrue("test_hierarchy.category" in row)
assertFalse("test_hierarchy.name" in row)
# -------------------------------------------------------------------------
def testFilteringLeafOnly(self):
""" Test filtering of the tree with leafonly=True """
uids = self.uids
assertEqual = self.assertEqual
assertTrue = self.assertTrue
h = S3Hierarchy("test_hierarchy",
filter = FS("type") == "D",
leafonly = True)
# Check nodes
nodes = h.nodes
expected = ["HIERARCHY2",
"HIERARCHY2-1",
"HIERARCHY2-1-2"]
assertEqual(len(nodes), len(expected))
assertTrue(all(uids[uid] in nodes for uid in expected))
# Check consistency
for node in nodes.values():
assertTrue(all(child_id in nodes for child_id in node["s"]))
parent_id = node["p"]
if parent_id:
assertTrue(parent_id in nodes)
# -------------------------------------------------------------------------
def testFilteringAnyNode(self):
""" Test filtering of the tree with leafonly=False """
uids = self.uids
h = S3Hierarchy("test_hierarchy",
filter = FS("type") == "C",
leafonly = False)
assertEqual = self.assertEqual
assertTrue = self.assertTrue
# Check nodes
nodes = h.nodes
expected = ["HIERARCHY1",
"HIERARCHY1-1",
"HIERARCHY1-2",
"HIERARCHY1-2-2",
"HIERARCHY2",
"HIERARCHY2-1",
"HIERARCHY2-1-1"]
assertEqual(len(nodes), len(expected))
assertTrue(all(uids[uid] in nodes for uid in expected))
# Check consistency
for node in nodes.values():
assertTrue(all(child_id in nodes for child_id in node["s"]))
parent_id = node["p"]
if parent_id:
assertTrue(parent_id in nodes)
# =============================================================================
class S3LinkedHierarchyTests(unittest.TestCase):
""" Tests for linktable-based hierarchies """
# -------------------------------------------------------------------------
@classmethod
def setUpClass(cls):
s3db = current.s3db
s3db.define_table("test_lhierarchy",
Field("name"),
Field("category"),
Field("type"),
*s3_meta_fields())
s3db.define_table("test_lhierarchy_link",
Field("parent_id", "reference test_lhierarchy"),
Field("child_id", "reference test_lhierarchy"),
*s3_meta_fields())
# Component for import
s3db.add_components("test_lhierarchy",
test_lhierarchy = {"name": "parent",
"link": "test_lhierarchy_link",
"joinby": "child_id",
"key": "parent_id",
},
),
xmlstr = """
<s3xml>
<resource name="test_lhierarchy" uuid="LHIERARCHY1">
<data field="name">Type 1</data>
<data field="category">Cat 0</data>
<data field="type">A</data>
</resource>
<resource name="test_lhierarchy" uuid="LHIERARCHY1-1">
<data field="name">Type 1-1</data>
<data field="category">Cat 1</data>
<data field="type">C</data>
<resource name="test_lhierarchy_link">
<reference field="parent_id"
resource="test_lhierarchy" uuid="LHIERARCHY1"/>
</resource>
</resource>
<resource name="test_lhierarchy" uuid="LHIERARCHY1-1-1">
<data field="name">Type 1-1-1</data>
<data field="category">Cat 2</data>
<data field="type">B</data>
<resource name="test_lhierarchy_link">
<reference field="parent_id"
resource="test_lhierarchy" uuid="LHIERARCHY1-1"/>
</resource>
</resource>
<resource name="test_lhierarchy" uuid="LHIERARCHY1-1-2">
<data field="name">Type 1-1-2</data>
<data field="category">Cat 2</data>
<data field="type">A</data>
<resource name="test_lhierarchy_link">
<reference field="parent_id"
resource="test_lhierarchy" uuid="LHIERARCHY1-1"/>
</resource>
</resource>
<resource name="test_lhierarchy" uuid="LHIERARCHY1-2">
<data field="name">Type 1-2</data>
<data field="category">Cat 1</data>
<data field="type">B</data>
<resource name="test_lhierarchy_link">
<reference field="parent_id"
resource="test_lhierarchy" uuid="LHIERARCHY1"/>
</resource>
</resource>
<resource name="test_lhierarchy" uuid="LHIERARCHY1-2-1">
<data field="name">Type 1-2-1</data>
<data field="category">Cat 2</data>
<data field="type">B</data>
<resource name="test_lhierarchy_link">
<reference field="parent_id"
resource="test_lhierarchy" uuid="LHIERARCHY1-2"/>
</resource>
</resource>
<resource name="test_lhierarchy" uuid="LHIERARCHY1-2-2">
<data field="name">Type 1-2-2</data>
<data field="category">Cat 2</data>
<data field="type">C</data>
<resource name="test_lhierarchy_link">
<reference field="parent_id"
resource="test_lhierarchy" uuid="LHIERARCHY1-2"/>
</resource>
</resource>
<resource name="test_lhierarchy" uuid="LHIERARCHY2">
<data field="name">Type 2</data>
<data field="category">Cat 0</data>
<data field="type">B</data>
</resource>
<resource name="test_lhierarchy" uuid="LHIERARCHY2-1">
<data field="name">Type 2-1</data>
<data field="category">Cat 1</data>
<data field="type">A</data>
<resource name="test_lhierarchy_link">
<reference field="parent_id"
resource="test_lhierarchy" uuid="LHIERARCHY2"/>
</resource>
</resource>
<resource name="test_lhierarchy" uuid="LHIERARCHY2-1-1">
<data field="name">Type 2-1-1</data>
<data field="category">Cat 2</data>
<data field="type">C</data>
<resource name="test_lhierarchy_link">
<reference field="parent_id"
resource="test_lhierarchy" uuid="LHIERARCHY2-1"/>
</resource>
</resource>
<resource name="test_lhierarchy" uuid="LHIERARCHY2-1-2">
<data field="name">Type 2-1-2</data>
<data field="category">Cat 2</data>
<data field="type">D</data>
<resource name="test_lhierarchy_link">
<reference field="parent_id"
resource="test_lhierarchy" uuid="LHIERARCHY2-1"/>
</resource>
</resource>
</s3xml>
"""
xmltree = etree.ElementTree(etree.fromstring(xmlstr))
current.auth.override = True
resource = s3db.resource("test_lhierarchy")
resource.import_xml(xmltree)
# -------------------------------------------------------------------------
@classmethod
def tearDownClass(cls):
db = current.db
db.test_lhierarchy_link.drop()
db.test_lhierarchy.drop()
# -------------------------------------------------------------------------
def setUp(self):
current.auth.override = True
db = current.db
if not hasattr(self, "rows"):
table = db.test_lhierarchy
linktable = db.test_lhierarchy_link
left = linktable.on(linktable.child_id == table.id)
rows = db(db.test_lhierarchy.id>0).select(table.id,
table.uuid,
table.category,
linktable.child_id,
linktable.parent_id,
left=left)
self.rows = {}
self.links = {}
self.uids = {}
self.ids = {}
for row in rows:
record = row.test_lhierarchy
uid = record.uuid
self.rows[uid] = record
self.links[uid] = row.test_lhierarchy_link
self.uids[uid] = record.id
self.ids[record.id] = uid
current.s3db.configure("test_lhierarchy",
hierarchy=("child_id:test_lhierarchy_link.parent_id",
"category"))
# -------------------------------------------------------------------------
def tearDown(self):
current.auth.override = False
# -------------------------------------------------------------------------
def testHierarchyConstruction(self):
""" Test hierarchy construction """
uids = self.uids
h = S3Hierarchy("test_lhierarchy")
assertEqual = self.assertEqual
assertTrue = self.assertTrue
roots = h.roots
assertEqual(len(roots), 2)
assertTrue(uids["LHIERARCHY1"] in roots)
assertTrue(uids["LHIERARCHY2"] in roots)
nodes = h.nodes
assertEqual(len(nodes), len(uids))
assertTrue(all(node_id in nodes for node_id in uids.values()))
# -------------------------------------------------------------------------
def testPreprocessCreateNode(self):
""" Test preprocessing of a create-node request """
r = s3_request("test", "lhierarchy", http="POST")
parent_node = self.rows["LHIERARCHY1"]
h = S3Hierarchy("test_lhierarchy")
link = h.preprocess_create_node(r, parent_node.id)
self.assertNotEqual(link, None)
assertEqual = self.assertEqual
assertEqual(link["linktable"], "test_lhierarchy_link")
assertEqual(link["lkey"], "child_id")
assertEqual(link["rkey"], "parent_id")
assertEqual(link["parent_id"], parent_node.id)
# -------------------------------------------------------------------------
def testPostprocessCreateNode(self):
""" Test postprocessing of a create-node request """
r = s3_request("test", "lhierarchy", http="POST")
parent_node = self.rows["LHIERARCHY1"]
h = S3Hierarchy("test_lhierarchy")
link = h.preprocess_create_node(r, parent_node.id)
row = None
record_id = None
db = current.db
table = db.test_lhierarchy
linktable = db.test_lhierarchy_link
try:
record = {"uuid": "LHIERARCHYNEW", "name": "NewNode"}
record_id = table.insert(**record)
record["id"] = record_id
h.postprocess_create_node(link, record)
query = (linktable.parent_id == parent_node.id) & \
(linktable.child_id == record_id)
row = db(query).select(linktable.id, limitby=(0, 1)).first()
self.assertNotEqual(row, None)
finally:
if row:
row.delete_record()
if record_id:
db(table.id == record_id).delete()
# -------------------------------------------------------------------------
def testCategory(self):
""" Test node category lookup """
uids = self.uids
rows = self.rows
assertEqual = self.assertEqual
h = S3Hierarchy("test_lhierarchy")
for uid in uids:
category = h.category(uids[uid])
assertEqual(category, rows[uid].category)
# -------------------------------------------------------------------------
def testParent(self):
""" Test parent lookup """
uids = self.uids
rows = self.rows
links = self.links
assertEqual = self.assertEqual
h = S3Hierarchy("test_lhierarchy")
for uid in uids:
parent, category = h.parent(uids[uid], classify=True)
assertEqual(parent, links[uid].parent_id)
if parent:
parent_uid = self.ids[parent]
assertEqual(category, rows[parent_uid].category)
# -------------------------------------------------------------------------
def testChildren(self):
""" Test child node lookup """
uids = self.uids
links = self.links
assertEqual = self.assertEqual
h = S3Hierarchy("test_lhierarchy")
for uid in uids:
assertEqual(h.children(uids[uid]),
set(link.child_id for link in links.values()
if link.parent_id == uids[uid]))
# -------------------------------------------------------------------------
def testPath(self):
""" Test node path lookup """
uids = self.uids
rows = self.rows
assertEqual = self.assertEqual
# Standard path from root
node = uids["LHIERARCHY2-1-2"]
h = S3Hierarchy("test_lhierarchy")
path = h.path(node)
assertEqual(path, [uids["LHIERARCHY2"],
uids["LHIERARCHY2-1"],
uids["LHIERARCHY2-1-2"]
])
# Path from category root
node = uids["LHIERARCHY1-1-1"]
path = h.path(node, category="Cat 1", classify=True)
classified = lambda uid: (uids[uid], rows[uid].category)
assertEqual(path, [classified("LHIERARCHY1-1"),
classified("LHIERARCHY1-1-1"),
])
# Path of root
node = uids["LHIERARCHY2"]
path = h.path(node, category="Cat 1", classify=True)
classified = lambda uid: (uids[uid], rows[uid].category)
assertEqual(path, [classified("LHIERARCHY2")])
# -------------------------------------------------------------------------
def testRoot(self):
""" Test root node lookup """
uids = self.uids
rows = self.rows
assertEqual = self.assertEqual
assertTrue = self.assertTrue
# Top root
node = uids["LHIERARCHY1-1-1"]
h = S3Hierarchy("test_lhierarchy")
root = h.root(node)
assertEqual(root, uids["LHIERARCHY1"])
# Root by category
node = uids["LHIERARCHY2-1"]
root = h.root(node, classify=True)
assertEqual(root, (uids["LHIERARCHY2"],
rows["LHIERARCHY2"].category))
# Root of root
node = uids["LHIERARCHY1"]
root = h.root(node)
assertEqual(root, uids["LHIERARCHY1"])
# None
root = h.root(None)
assertEqual(root, None)
# -------------------------------------------------------------------------
def testSiblings(self):
""" Test lookup of sibling nodes """
uids = self.uids
ids = self.ids
links = self.links
assertEqual = self.assertEqual
h = S3Hierarchy("test_lhierarchy")
for uid in uids:
parent = links[uid].parent_id
siblings = set(node for node, _uid in ids.items()
if links[_uid].parent_id == parent)
assertEqual(h.siblings(uids[uid], inclusive=True), siblings)
siblings.discard(uids[uid])
assertEqual(h.siblings(uids[uid], inclusive=False), siblings)
# -------------------------------------------------------------------------
def testFindAll(self):
""" Test lookup of descendant nodes """
uids = self.uids
assertEqual = self.assertEqual
h = S3Hierarchy("test_lhierarchy")
root = uids["LHIERARCHY1"]
nodes = h.findall(root)
expected = ["LHIERARCHY1-1",
"LHIERARCHY1-1-1",
"LHIERARCHY1-1-2",
"LHIERARCHY1-2",
"LHIERARCHY1-2-1",
"LHIERARCHY1-2-2",
]
assertEqual(nodes, set(uids[uid] for uid in expected))
root = uids["LHIERARCHY1"]
nodes = h.findall(root, inclusive=True)
expected = ["LHIERARCHY1",
"LHIERARCHY1-1",
"LHIERARCHY1-1-1",
"LHIERARCHY1-1-2",
"LHIERARCHY1-2",
"LHIERARCHY1-2-1",
"LHIERARCHY1-2-2",
]
assertEqual(nodes, set(uids[uid] for uid in expected))
root = uids["LHIERARCHY2"]
nodes = h.findall(root, category="Cat 1")
expected = ["LHIERARCHY2-1",
]
assertEqual(nodes, set(uids[uid] for uid in expected))
root = uids["LHIERARCHY1"]
nodes = h.findall(root, category="Cat 4")
assertEqual(nodes, set())
# -------------------------------------------------------------------------
def testFilteringLeafOnly(self):
""" Test filtering of the tree with leafonly=True """
uids = self.uids
assertEqual = self.assertEqual
assertTrue = self.assertTrue
h = S3Hierarchy("test_lhierarchy",
filter = FS("type") == "D",
leafonly = True)
# Check nodes
nodes = h.nodes
expected = ["LHIERARCHY2",
"LHIERARCHY2-1",
"LHIERARCHY2-1-2"]
assertEqual(len(nodes), len(expected))
assertTrue(all(uids[uid] in nodes for uid in expected))
# Check consistency
for node in nodes.values():
assertTrue(all(child_id in nodes for child_id in node["s"]))
parent_id = node["p"]
if parent_id:
assertTrue(parent_id in nodes)
# -------------------------------------------------------------------------
def testFilteringAnyNode(self):
""" Test filtering of the tree with leafonly=False """
uids = self.uids
assertEqual = self.assertEqual
assertTrue = self.assertTrue
h = S3Hierarchy("test_lhierarchy",
filter = FS("type") == "C",
leafonly = False)
# Check nodes
nodes = h.nodes
expected = ["LHIERARCHY1",
"LHIERARCHY1-1",
"LHIERARCHY1-2",
"LHIERARCHY1-2-2",
"LHIERARCHY2",
"LHIERARCHY2-1",
"LHIERARCHY2-1-1"]
assertEqual(len(nodes), len(expected))
assertTrue(all(uids[uid] in nodes for uid in expected))
# Check consistency
for node in nodes.values():
assertTrue(all(child_id in nodes for child_id in node["s"]))
parent_id = node["p"]
if parent_id:
assertTrue(parent_id in nodes)
# =============================================================================
class S3TypeOfTests(unittest.TestCase):
""" Tests for __typeof query operator """
# -------------------------------------------------------------------------
@classmethod
def setUpClass(cls):
s3db = current.s3db
s3db.define_table("typeof_nonhierarchy",
Field("name"),
*s3_meta_fields())
s3db.define_table("typeof_hierarchy",
Field("name"),
Field("parent", "reference typeof_hierarchy"),
Field("typeof_nonhierarchy_id", "reference typeof_nonhierarchy"),
Field("typeof_nonhierarchy_multi_id", "list:reference typeof_nonhierarchy"),
Field.Method("vsfield", lambda row: "test"),
Field.Method("vmfield", lambda row: ["test1", "test2", "test3"]),
*s3_meta_fields())
s3db.define_table("typeof_hierarchy_reference",
Field("typeof_hierarchy_id", "reference typeof_hierarchy"),
Field("typeof_hierarchy_multi_id", "list:reference typeof_hierarchy"),
*s3_meta_fields())
xmlstr = """
<s3xml>
<resource name="typeof_nonhierarchy" uuid="NONHIERARCHY1">
<data field="name">NonHierarchy1</data>
</resource>
<resource name="typeof_nonhierarchy" uuid="NONHIERARCHY2">
<data field="name">NonHierarchy2</data>
</resource>
<resource name="typeof_hierarchy" uuid="HIERARCHY1">
<data field="name">Type 1</data>
</resource>
<resource name="typeof_hierarchy" uuid="HIERARCHY1-1">
<data field="name">Type 1-1</data>
<reference field="parent" resource="typeof_hierarchy" uuid="HIERARCHY1"/>
<reference field="typeof_nonhierarchy_id" resource="typeof_nonhierarchy" uuid="NONHIERARCHY1"/>
</resource>
<resource name="typeof_hierarchy" uuid="HIERARCHY1-1-1">
<data field="name">Type 1-1-1</data>
<reference field="parent" resource="typeof_hierarchy" uuid="HIERARCHY1-1"/>
</resource>
<resource name="typeof_hierarchy" uuid="HIERARCHY1-1-2">
<data field="name">Type 1-1-2</data>
<reference field="parent" resource="typeof_hierarchy" uuid="HIERARCHY1-1"/>
</resource>
<resource name="typeof_hierarchy" uuid="HIERARCHY1-2">
<data field="name">Type 1-2</data>
<reference field="parent" resource="typeof_hierarchy" uuid="HIERARCHY1"/>
</resource>
<resource name="typeof_hierarchy" uuid="HIERARCHY1-2-1">
<data field="name">Type 1-2-1</data>
<reference field="parent" resource="typeof_hierarchy" uuid="HIERARCHY1-2"/>
</resource>
<resource name="typeof_hierarchy" uuid="HIERARCHY1-2-2">
<data field="name">Type 1-2-2</data>
<reference field="parent" resource="typeof_hierarchy" uuid="HIERARCHY1-2"/>
</resource>
<resource name="typeof_hierarchy" uuid="HIERARCHY2">
<data field="name">Type 2</data>
</resource>
<resource name="typeof_hierarchy" uuid="HIERARCHY2-1">
<data field="name">Type 2-1</data>
<reference field="typeof_nonhierarchy_multi_id" resource="typeof_nonhierarchy"
uuid="["NONHIERARCHY1","NONHIERARCHY2"]"/>
<reference field="parent" resource="typeof_hierarchy" uuid="HIERARCHY2"/>
</resource>
<resource name="typeof_hierarchy" uuid="HIERARCHY2-1-1">
<data field="name">Type 2-1-1</data>
<reference field="parent" resource="typeof_hierarchy" uuid="HIERARCHY2-1"/>
</resource>
<resource name="typeof_hierarchy" uuid="HIERARCHY2-1-2">
<data field="name">Type 2-1-2</data>
<reference field="parent" resource="typeof_hierarchy" uuid="HIERARCHY2-1"/>
</resource>
</s3xml>
"""
xmltree = etree.ElementTree(etree.fromstring(xmlstr))
current.auth.override = True
resource = s3db.resource("typeof_hierarchy")
resource.import_xml(xmltree)
# -------------------------------------------------------------------------
@classmethod
def tearDownClass(cls):
db = current.db
db.typeof_hierarchy.drop(mode="cascade")
db.typeof_hierarchy_reference.drop()
db.typeof_nonhierarchy.drop()
# -------------------------------------------------------------------------
def setUp(self):
current.auth.override = True
db = current.db
if not hasattr(self, "rows"):
rows = db(db.typeof_hierarchy.id>0).select()
self.rows = {}
self.uids = {}
self.ids = {}
for row in rows:
uid = row.uuid
self.rows[uid] = row
self.uids[uid] = row.id
self.ids[row.id] = uid
if not hasattr(self, "lookup_uids"):
rows = db(db.typeof_nonhierarchy.id>0).select()
self.lookup_uids = {}
for row in rows:
uid = row.uuid
self.lookup_uids[uid] = row.id
current.s3db.configure("typeof_hierarchy", hierarchy="parent")
# -------------------------------------------------------------------------
def tearDown(self):
current.auth.override = False
# -------------------------------------------------------------------------
def testTypeOfReferenceSingle(self):
"""
Test resolution of __typeof queries, for field in referencing
table, with single value
"""
db = current.db
uids = self.uids
resource = current.s3db.resource("typeof_hierarchy_reference")
# Test with field in referencing table
expr = FS("typeof_hierarchy_id").typeof(uids["HIERARCHY1"])
query = expr.query(resource)
table = resource.table
expected = set(uids[uid] for uid in ("HIERARCHY1",
"HIERARCHY1-1",
"HIERARCHY1-1-1",
"HIERARCHY1-1-2",
"HIERARCHY1-2",
"HIERARCHY1-2-1",
"HIERARCHY1-2-2",
))
expected_query = table.typeof_hierarchy_id.belongs(expected)
self.assertEquivalent(query, expected_query)
# -------------------------------------------------------------------------
def testTypeOfReferenceMultiple(self):
"""
Test resolution of __typeof queries, for field in referencing
table, with multiple values
"""
db = current.db
uids = self.uids
resource = current.s3db.resource("typeof_hierarchy_reference")
# Test with field in referencing table
expr = FS("typeof_hierarchy_id").typeof((uids["HIERARCHY1-2"],
uids["HIERARCHY2-1"],
))
query = expr.query(resource)
table = resource.table
expected = set(uids[uid] for uid in ("HIERARCHY1-2",
"HIERARCHY1-2-1",
"HIERARCHY1-2-2",
"HIERARCHY2-1",
"HIERARCHY2-1-1",
"HIERARCHY2-1-2",
))
expected_query = table.typeof_hierarchy_id.belongs(expected)
self.assertEquivalent(query, expected_query)
# -------------------------------------------------------------------------
def testTypeOfReferenceNone(self):
"""
Test resolution of __typeof queries, for field in referencing
table, with None value
"""
db = current.db
uids = self.uids
resource = current.s3db.resource("typeof_hierarchy_reference")
# Test with None
expr = FS("typeof_hierarchy_id").typeof(None)
query = expr.query(resource)
table = resource.table
expected = set(uids[uid] for uid in ("HIERARCHY1-2",
"HIERARCHY1-2-1",
"HIERARCHY1-2-2",
))
expected_query = (table.typeof_hierarchy_id == None)
self.assertEquivalent(query, expected_query)
# Test with list
expr = FS("typeof_hierarchy_id").typeof([None])
query = expr.query(resource)
table = resource.table
expected = set(uids[uid] for uid in ("HIERARCHY1-2",
"HIERARCHY1-2-1",
"HIERARCHY1-2-2",
))
expected_query = (table.typeof_hierarchy_id == None)
self.assertEquivalent(query, expected_query)
# Test with multiple values
expr = FS("typeof_hierarchy_id").typeof([None, uids["HIERARCHY1-2"]])
query = expr.query(resource)
table = resource.table
expected = set(uids[uid] for uid in ("HIERARCHY1-2",
"HIERARCHY1-2-1",
"HIERARCHY1-2-2",
))
expected_query = (table.typeof_hierarchy_id.belongs(expected)) | \
(table.typeof_hierarchy_id == None)
self.assertEquivalent(query, expected_query)
# -------------------------------------------------------------------------
def testTypeOfReferenceNoHierarchy(self):
"""
Test resolution of __typeof queries, for field in referencing
table, with no hierarchy configured
"""
db = current.db
uids = self.uids
# Remove hierarchy setting
current.s3db.clear_config("typeof_hierarchy", "hierarchy")
resource = current.s3db.resource("typeof_hierarchy_reference")
# Test with field in referencing table, single value
expr = FS("typeof_hierarchy_id").typeof(uids["HIERARCHY1-2"])
query = expr.query(resource)
expected = uids["HIERARCHY1-2"]
expected_query = resource.table.typeof_hierarchy_id == expected
self.assertEquivalent(query, expected_query)
# Test with field in referencing table, multiple values
expr = FS("typeof_hierarchy_id").typeof((uids["HIERARCHY1-2"],
uids["HIERARCHY2-1"]
))
query = expr.query(resource)
expected = set(uids[uid] for uid in ("HIERARCHY1-2",
"HIERARCHY2-1",
))
expected_query = resource.table.typeof_hierarchy_id.belongs(expected)
self.assertEquivalent(query, expected_query)
# -------------------------------------------------------------------------
def testTypeOfLookupTableSingle(self):
"""
Test resolution of __typeof queries, for field in lookup table,
with single value
"""
db = current.db
uids = self.uids
resource = current.s3db.resource("typeof_hierarchy_reference")
# Test with field in hierarchy table
expr = FS("typeof_hierarchy_id$name").typeof("Type 1")
query = expr.query(resource)
table = db.typeof_hierarchy
expected = set(uids[uid] for uid in ("HIERARCHY1",
"HIERARCHY1-1",
"HIERARCHY1-1-1",
"HIERARCHY1-1-2",
"HIERARCHY1-2",
"HIERARCHY1-2-1",
"HIERARCHY1-2-2",
))
expected_query = table.id.belongs(expected)
self.assertEquivalent(query, expected_query)
# -------------------------------------------------------------------------
def testTypeOfLookupTableMultiple(self):
"""
Test resolution of __typeof queries, for field in lookup table,
with multiple values
"""
db = current.db
uids = self.uids
resource = current.s3db.resource("typeof_hierarchy_reference")
# Test with field in hierarchy table
expr = FS("typeof_hierarchy_id$name").typeof(("Type 1-2", "Type 2-1"))
query = expr.query(resource)
table = db.typeof_hierarchy
expected = set(uids[uid] for uid in ("HIERARCHY1-2",
"HIERARCHY1-2-1",
"HIERARCHY1-2-2",
"HIERARCHY2-1",
"HIERARCHY2-1-1",
"HIERARCHY2-1-2",
))
expected_query = table.id.belongs(expected)
self.assertEquivalent(query, expected_query)
# -------------------------------------------------------------------------
def testTypeOfLookupTableSingleWildcard(self):
"""
Test resolution of __typeof queries, for field in lookup table,
with single value with wildcards
"""
db = current.db
uids = self.uids
resource = current.s3db.resource("typeof_hierarchy_reference")
# Test with field in hierarchy table, with wildcard
expr = FS("typeof_hierarchy_id$name").typeof("Type 1-*")
query = expr.query(resource)
table = db.typeof_hierarchy
expected = set(uids[uid] for uid in ("HIERARCHY1-1",
"HIERARCHY1-1-1",
"HIERARCHY1-1-2",
"HIERARCHY1-2",
"HIERARCHY1-2-1",
"HIERARCHY1-2-2",
))
expected_query = table.id.belongs(expected)
self.assertEquivalent(query, expected_query)
# -------------------------------------------------------------------------
def testTypeOfLookupTableMultipleWildcard(self):
"""
Test resolution of __typeof queries, for field in lookup table,
with multiple values with wildcards
"""
db = current.db
uids = self.uids
resource = current.s3db.resource("typeof_hierarchy_reference")
# Test with field in hierarchy table, with wildcard
expr = FS("typeof_hierarchy_id$name").typeof(("Type 1-1-*", "Type 2-1*"))
query = expr.query(resource)
table = db.typeof_hierarchy
expected = set(uids[uid] for uid in ("HIERARCHY1-1-1",
"HIERARCHY1-1-2",
"HIERARCHY2-1",
"HIERARCHY2-1-1",
"HIERARCHY2-1-2",
))
expected_query = table.id.belongs(expected)
self.assertEquivalent(query, expected_query)
# -------------------------------------------------------------------------
def testTypeOfLookupTableSingleUnresolvable(self):
"""
Test resolution of __typeof queries, for field in lookup table,
with unresolvable value
"""
db = current.db
uids = self.uids
resource = current.s3db.resource("typeof_hierarchy_reference")
# Test with field in hierarchy table, with wildcard, no match
expr = FS("typeof_hierarchy_id$name").typeof("Type 1-3*")
query = expr.query(resource)
table = db.typeof_hierarchy
expected_query = table.id.belongs(set())
self.assertEquivalent(query, expected_query)
# -------------------------------------------------------------------------
def testTypeOfLookupTableNone(self):
"""
Test resolution of __typeof queries, for field in lookup table,
with None value
"""
db = current.db
uids = self.uids
resource = current.s3db.resource("typeof_hierarchy_reference")
# Test with None
expr = FS("typeof_hierarchy_id$name").typeof(None)
query = expr.query(resource)
table = db.typeof_hierarchy
expected_query = table.id.belongs(set())
self.assertEquivalent(query, expected_query)
# Test with list
expr = FS("typeof_hierarchy_id$name").typeof([None])
query = expr.query(resource)
#table = db.typeof_hierarchy
expected_query = table.id.belongs(set())
self.assertEquivalent(query, expected_query)
# Test with multiple values
expr = FS("typeof_hierarchy_id$name").typeof([None, "Type 1-1-2"])
query = expr.query(resource)
#table = db.typeof_hierarchy
expected_query = (table.id == uids["HIERARCHY1-1-2"])
self.assertEquivalent(query, expected_query)
# -------------------------------------------------------------------------
def testTypeOfLookupTableReference(self):
"""
Test resolution of __typeof queries, for reference field
in lookup table
"""
db = current.db
uids = self.uids
lookup_uids = self.lookup_uids
resource = current.s3db.resource("typeof_hierarchy_reference")
# Test with single value
lookup = lookup_uids["NONHIERARCHY1"]
expr = FS("typeof_hierarchy_id$typeof_nonhierarchy_id").typeof(lookup)
query = expr.query(resource)
table = db.typeof_hierarchy
expected = set(uids[uid] for uid in ("HIERARCHY1-1",
"HIERARCHY1-1-1",
"HIERARCHY1-1-2",
))
expected_query = table.id.belongs(expected)
# Test with multiple values
lookup = (lookup_uids["NONHIERARCHY1"],
lookup_uids["NONHIERARCHY2"])
expr = FS("typeof_hierarchy_id$typeof_nonhierarchy_id").typeof(lookup)
query = expr.query(resource)
table = db.typeof_hierarchy
expected = set(uids[uid] for uid in ("HIERARCHY1-1",
"HIERARCHY1-1-1",
"HIERARCHY1-1-2",
))
expected_query = table.id.belongs(expected)
self.assertEquivalent(query, expected_query)
# -------------------------------------------------------------------------
def testTypeOfLookupTableListReference(self):
"""
Test resolution of __typeof queries, for list:reference field
in lookup table
"""
db = current.db
uids = self.uids
lookup_uids = self.lookup_uids
resource = current.s3db.resource("typeof_hierarchy_reference")
# Test with single value
lookup = lookup_uids["NONHIERARCHY1"]
expr = FS("typeof_hierarchy_id$typeof_nonhierarchy_multi_id").typeof(lookup)
query = expr.query(resource)
table = db.typeof_hierarchy
expected = set(uids[uid] for uid in ("HIERARCHY2-1",
"HIERARCHY2-1-1",
"HIERARCHY2-1-2",
))
expected_query = table.id.belongs(expected)
# Test with multiple values
lookup = (lookup_uids["NONHIERARCHY1"],
lookup_uids["NONHIERARCHY2"])
expr = FS("typeof_hierarchy_id$typeof_nonhierarchy_multi_id").typeof(lookup)
query = expr.query(resource)
table = db.typeof_hierarchy
expected = set(uids[uid] for uid in ("HIERARCHY2-1",
"HIERARCHY2-1-1",
"HIERARCHY2-1-2",
))
expected_query = table.id.belongs(expected)
self.assertEquivalent(query, expected_query)
# -------------------------------------------------------------------------
def testTypeOfLookupTableNoHierarchy(self):
"""
Test resolution of __typeof queries, for field in lookup
table, with no hierarchy configured
"""
db = current.db
uids = self.uids
# Remove hierarchy setting
current.s3db.clear_config("typeof_hierarchy", "hierarchy")
resource = current.s3db.resource("typeof_hierarchy_reference")
# Test with field in lookup table, single value
expr = FS("typeof_hierarchy_id$name").typeof("Type 1-2")
query = expr.query(resource)
table = db.typeof_hierarchy
expected_query = (table.name == "Type 1-2")
self.assertEquivalent(query, expected_query)
# Test with field in lookup table
expr = FS("typeof_hierarchy_id$name").typeof(("Type 1-2", "Type 2-1"))
query = expr.query(resource)
table = db.typeof_hierarchy
expected_query = table.name.belongs(("Type 1-2", "Type 2-1"))
self.assertEquivalent(query, expected_query)
# Test with field in lookup table, multiple values + wildcards
expr = FS("typeof_hierarchy_id$name").typeof(("Type 1-*", "Type 2-1"))
query = expr.query(resource)
table = db.typeof_hierarchy
expected_query = (table.name.like("Type 1-%")) | \
(table.name == "Type 2-1")
self.assertEquivalent(query, expected_query)
# -------------------------------------------------------------------------
def testTypeOfListReferenceSingle(self):
"""
Test resolution of __typeof queries, for list:reference,
with single value
"""
db = current.db
uids = self.uids
resource = current.s3db.resource("typeof_hierarchy_reference")
# Test with single value
expr = FS("typeof_hierarchy_multi_id").typeof(uids["HIERARCHY1"])
query = expr.query(resource)
table = resource.table
expected = set(uids[uid] for uid in ("HIERARCHY1",
"HIERARCHY1-1",
"HIERARCHY1-1-1",
"HIERARCHY1-1-2",
"HIERARCHY1-2",
"HIERARCHY1-2-1",
"HIERARCHY1-2-2",
))
found = self.inspect_multi_query(query,
field = table.typeof_hierarchy_multi_id,
conjunction = db._adapter.OR,
op = db._adapter.CONTAINS)
self.assertEqual(found, expected)
# -------------------------------------------------------------------------
def testTypeOfListReferenceMultiple(self):
"""
Test resolution of __typeof queries, for list:reference,
with multiple values
"""
db = current.db
uids = self.uids
resource = current.s3db.resource("typeof_hierarchy_reference")
# Test with multiple values
expr = FS("typeof_hierarchy_multi_id").typeof((uids["HIERARCHY1-2"],
uids["HIERARCHY2-1"]))
query = expr.query(resource)
table = resource.table
expected = set(uids[uid] for uid in ("HIERARCHY1-2",
"HIERARCHY1-2-1",
"HIERARCHY1-2-2",
"HIERARCHY2-1",
"HIERARCHY2-1-1",
"HIERARCHY2-1-2",
))
found = self.inspect_multi_query(query,
field = table.typeof_hierarchy_multi_id,
conjunction = db._adapter.OR,
op = db._adapter.CONTAINS)
self.assertEqual(found, expected)
# -------------------------------------------------------------------------
def testTypeOfListReferenceNone(self):
"""
Test resolution of __typeof queries, for list:reference,
with None value
"""
db = current.db
uids = self.uids
resource = current.s3db.resource("typeof_hierarchy_reference")
# Test with None
expr = FS("typeof_hierarchy_multi_id").typeof(None)
query = expr.query(resource)
table = resource.table
expected_query = table.id.belongs(set())
self.assertEquivalent(query, expected_query)
# Test with list
expr = FS("typeof_hierarchy_multi_id").typeof([None])
query = expr.query(resource)
#table = resource.table
expected_query = table.id.belongs(set())
self.assertEquivalent(query, expected_query)
# Test with multiple values
expr = FS("typeof_hierarchy_multi_id").typeof((None,
uids["HIERARCHY2-1"]))
query = expr.query(resource)
#table = resource.table
expected = set(uids[uid] for uid in ("HIERARCHY2-1",
"HIERARCHY2-1-1",
"HIERARCHY2-1-2",
))
found = self.inspect_multi_query(query,
field = table.typeof_hierarchy_multi_id,
conjunction = db._adapter.OR,
op = db._adapter.CONTAINS)
self.assertEqual(found, expected)
# -------------------------------------------------------------------------
def testTypeOfListReferenceNoHierarchy(self):
"""
Test resolution of __typeof queries, for list:reference,
with single value
"""
db = current.db
uids = self.uids
# Remove hierarchy setting
current.s3db.clear_config("typeof_hierarchy", "hierarchy")
resource = current.s3db.resource("typeof_hierarchy_reference")
# Test with single value
expr = FS("typeof_hierarchy_multi_id").typeof(uids["HIERARCHY1"])
query = expr.query(resource)
table = resource.table
expected = set(uids[uid] for uid in ("HIERARCHY1",))
found = self.inspect_multi_query(query,
field = table.typeof_hierarchy_multi_id,
conjunction = db._adapter.OR,
op = db._adapter.CONTAINS)
self.assertEqual(found, expected)
# Test with multiple values
expr = FS("typeof_hierarchy_multi_id").typeof((uids["HIERARCHY1-2"],
uids["HIERARCHY2-1"]))
query = expr.query(resource)
table = resource.table
expected = set(uids[uid] for uid in ("HIERARCHY1-2",
"HIERARCHY2-1",
))
found = self.inspect_multi_query(query,
field = table.typeof_hierarchy_multi_id,
conjunction = db._adapter.OR,
op = db._adapter.CONTAINS)
self.assertEqual(found, expected)
# -------------------------------------------------------------------------
def testVirtualFieldSingle(self):
""" Test fallbacks for __typeof with single value virtual field """
resource = current.s3db.resource("typeof_hierarchy")
row = self.rows["HIERARCHY1"]
# vsfield returns "test"
expr = FS("vsfield").typeof("test")
result = expr(resource, row, virtual=True)
self.assertTrue(result)
expr = FS("vsfield").typeof("other")
result = expr(resource, row, virtual=True)
self.assertFalse(result)
expr = FS("vsfield").typeof(["test", "test1", "test2"])
result = expr(resource, row, virtual=True)
self.assertTrue(result)
expr = FS("vsfield").typeof(["other", "other1", "other2"])
result = expr(resource, row, virtual=True)
self.assertFalse(result)
# -------------------------------------------------------------------------
def testVirtualFieldMultiple(self):
""" Test fallbacks for __typeof with multi-value virtual field """
resource = current.s3db.resource("typeof_hierarchy")
row = self.rows["HIERARCHY2"]
# vmfield returns ["test1", "test2", "test3"]
expr = FS("vmfield").typeof("test1")
result = expr(resource, row, virtual=True)
self.assertTrue(result)
expr = FS("vmfield").typeof("other")
result = expr(resource, row, virtual=True)
self.assertFalse(result)
expr = FS("vmfield").typeof(["test1", "other"])
result = expr(resource, row, virtual=True)
self.assertTrue(result)
expr = FS("vmfield").typeof(["other1", "other2"])
result = expr(resource, row, virtual=True)
self.assertFalse(result)
# -------------------------------------------------------------------------
def testHierarchyFilterTypeOf(self):
""" Test S3HierarchyFilter recognition of typeof queries """
uids = self.uids
resource = current.s3db.resource("typeof_hierarchy_reference")
filter_widget = S3HierarchyFilter("typeof_hierarchy_id")
# Test with belongs on filter field
ids = str(uids["HIERARCHY1-1"])
get_vars = {"~.typeof_hierarchy_id__belongs": ids}
variable = filter_widget.variable(resource, get_vars)
expected = set(ids)
values = filter_widget._values(get_vars, variable)
self.assertEqual(values, [ids])
# Test with typeof on filter field
ids = str(uids["HIERARCHY1-1"])
get_vars = {"~.typeof_hierarchy_id__typeof": ids}
variable = filter_widget.variable(resource, get_vars)
expected = set(str(uids[uid]) for uid in ("HIERARCHY1-1",
"HIERARCHY1-1-1",
"HIERARCHY1-1-2",
))
values = filter_widget._values(get_vars, variable)
self.assertEqual(set(values), expected)
# Test with typeof on filter field, multiple values incl. None
ids = ",".join(str(_id) for _id in (uids["HIERARCHY1-1"],
uids["HIERARCHY2-1"],
None))
get_vars = {"~.typeof_hierarchy_id__typeof": ids}
variable = filter_widget.variable(resource, get_vars)
expected = set(str(uids[uid]) for uid in ("HIERARCHY1-1",
"HIERARCHY1-1-1",
"HIERARCHY1-1-2",
"HIERARCHY2-1",
"HIERARCHY2-1-1",
"HIERARCHY2-1-2",
))
expected.add(None)
values = filter_widget._values(get_vars, variable)
self.assertEqual(set(values), expected)
# Test with typeof on field in lookup table
get_vars = {"~.typeof_hierarchy_id$name__typeof": "Type 1-1"}
variable = filter_widget.variable(resource, get_vars)
expected = set(str(uids[uid]) for uid in ("HIERARCHY1-1",
"HIERARCHY1-1-1",
"HIERARCHY1-1-2",
))
values = filter_widget._values(get_vars, variable)
self.assertEqual(set(values), expected)
# Test with typeof on field in lookup table, multiple values
get_vars = {"~.typeof_hierarchy_id$name__typeof": "Type 1-1,Type 2-1"}
variable = filter_widget.variable(resource, get_vars)
expected = set(str(uids[uid]) for uid in ("HIERARCHY1-1",
"HIERARCHY1-1-1",
"HIERARCHY1-1-2",
"HIERARCHY2-1",
"HIERARCHY2-1-1",
"HIERARCHY2-1-2",
))
values = filter_widget._values(get_vars, variable)
self.assertEqual(set(values), expected)
# Test with typeof on field in lookup table, unresolvable
get_vars = {"~.typeof_hierarchy_id$name__typeof": "Type 1-3"}
variable = filter_widget.variable(resource, get_vars)
expected = set()
values = filter_widget._values(get_vars, variable)
self.assertEqual(set(values), expected)
# Test with typeof on field in lookup table, None
get_vars = {"~.typeof_hierarchy_id$name__typeof": "None"}
variable = filter_widget.variable(resource, get_vars)
expected = set()
values = filter_widget._values(get_vars, variable)
self.assertEqual(set(values), expected)
# Test preferrence of belongs in mixed queries
ids = str(uids["HIERARCHY1-1"])
get_vars = {"~.typeof_hierarchy_id__belongs": ids,
"~.typeof_hierarchy_id$name__typeof": "Type 1-1",
}
variable = filter_widget.variable(resource, get_vars)
expected = set(ids)
values = filter_widget._values(get_vars, variable)
self.assertEqual(values, [ids])
# -------------------------------------------------------------------------
def inspect_multi_query(self, query, field=None, conjunction=None, op=None):
"""
Inspect a list:reference multi-value containment query
@param query: the query
@param field: the list:reference field
@param conjunction: the conjunction operator (AND or OR)
@param op: the containment operator (usually CONTAINS)
"""
found = set()
first = query.first
second = query.second
assertEqual = self.assertEqual
inspect_multi_query = self.inspect_multi_query
if isinstance(first, Query) and isinstance(second, Query):
assertEqual(query.op, conjunction)
found |= inspect_multi_query(first,
conjunction = conjunction,
op = op)
found |= inspect_multi_query(second,
conjunction = conjunction,
op = op)
else:
assertEqual(query.first, field)
assertEqual(query.op, op)
found.add(int(query.second))
return found
# -------------------------------------------------------------------------
def equivalent(self, l, r):
"""
Check whether two queries are equivalent
"""
first = l.first
second = l.second
equivalent = self.equivalent
if l.op != r.op:
return False
if isinstance(first, Query):
if isinstance(second, Query):
return equivalent(l.first, r.first) and \
equivalent(l.second, r.second) or \
equivalent(l.second, r.first) and \
equivalent(l.first, r.second)
else:
return equivalent(l.first, r.first)
else:
return l.first == r.first and l.second == r.second
# -------------------------------------------------------------------------
def assertEquivalent(self, query, expected_query):
"""
Shortcut for query equivalence assertion
"""
self.assertTrue(self.equivalent(query, expected_query),
msg = "%s != %s" % (query, expected_query))
# =============================================================================
def run_suite(*test_classes):
""" Run the test suite """
loader = unittest.TestLoader()
suite = unittest.TestSuite()
for test_class in test_classes:
tests = loader.loadTestsFromTestCase(test_class)
suite.addTests(tests)
if suite is not None:
unittest.TextTestRunner(verbosity=2).run(suite)
return
if __name__ == "__main__":
run_suite(
S3HierarchyTests,
S3LinkedHierarchyTests,
S3TypeOfTests,
)
# END ========================================================================
| mit |
snbueno/anaconda | pyanaconda/timezone.py | 2 | 6647 | #
# Copyright (C) 2012 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): Vratislav Podzimek <vpodzime@redhat.com>
#
"""
Module providing functions for getting the list of timezones, writing timezone
configuration, valid timezones recognition etc.
"""
import os
import pytz
import langtable
from collections import OrderedDict
from pyanaconda import iutil
from pyanaconda.constants import THREAD_STORAGE
from pyanaconda.flags import flags
from pyanaconda.threads import threadMgr
from blivet import arch
import logging
log = logging.getLogger("anaconda")
# The following zones are not in pytz.common_timezones and
# Etc category in pytz.all_timezones includes some more,
# however confusing ones (like UCT, GMT+0, GMT-0,...)
ETC_ZONES = ['GMT+1', 'GMT+2', 'GMT+3', 'GMT+4', 'GMT+5', 'GMT+6', 'GMT+7',
'GMT+8', 'GMT+9', 'GMT+10', 'GMT+11', 'GMT+12',
'GMT-1', 'GMT-2', 'GMT-3', 'GMT-4', 'GMT-5', 'GMT-6', 'GMT-7',
'GMT-8', 'GMT-9', 'GMT-10', 'GMT-11', 'GMT-12', 'GMT-13',
'GMT-14', 'UTC', 'GMT']
NTP_PACKAGE = "chrony"
NTP_SERVICE = "chronyd"
class TimezoneConfigError(Exception):
"""Exception class for timezone configuration related problems"""
pass
def time_initialize(timezone, storage, bootloader):
"""
Try to guess if RTC uses UTC time or not, set timezone.isUtc properly and
set system time from RTC using the UTC guess.
Guess is done by searching for bootable ntfs devices.
:param timezone: ksdata.timezone object
:param storage: blivet.Blivet instance
:param bootloader: bootloader.Bootloader instance
"""
if arch.isS390():
# nothing to do on s390(x) were hwclock doesn't exist
return
if not timezone.isUtc and not flags.automatedInstall:
# if set in the kickstart, no magic needed here
threadMgr.wait(THREAD_STORAGE)
ntfs_devs = filter(lambda dev: dev.format.name == "ntfs",
storage.devices)
timezone.isUtc = not bootloader.has_windows(ntfs_devs)
cmd = "hwclock"
args = ["--hctosys"]
if timezone.isUtc:
args.append("--utc")
else:
args.append("--localtime")
iutil.execWithRedirect(cmd, args)
def write_timezone_config(timezone, root):
"""
Write timezone configuration for the system specified by root.
:param timezone: ksdata.timezone object
:param root: path to the root
:raise: TimezoneConfigError
"""
# we want to create a relative symlink
tz_file = "/usr/share/zoneinfo/" + timezone.timezone
rooted_tz_file = os.path.normpath(root + tz_file)
relative_path = os.path.normpath("../" + tz_file)
link_path = os.path.normpath(root + "/etc/localtime")
if not os.access(rooted_tz_file, os.R_OK):
log.error("Timezone to be linked (%s) doesn't exist", rooted_tz_file)
else:
try:
# os.symlink fails if link_path exists, so try to remove it first
os.remove(link_path)
except OSError:
pass
try:
os.symlink(relative_path, link_path)
except OSError as oserr:
log.error("Error when symlinking timezone (from %s): %s",
rooted_tz_file, oserr.strerror)
if arch.isS390():
# there is no HW clock on s390(x)
return
try:
fobj = open(os.path.normpath(root + "/etc/adjtime"), "r")
lines = fobj.readlines()
fobj.close()
except IOError:
lines = [ "0.0 0 0.0\n", "0\n" ]
try:
with open(os.path.normpath(root + "/etc/adjtime"), "w") as fobj:
fobj.write(lines[0])
fobj.write(lines[1])
if timezone.isUtc:
fobj.write("UTC\n")
else:
fobj.write("LOCAL\n")
except IOError as ioerr:
msg = "Error while writing /etc/adjtime file: %s" % ioerr.strerror
raise TimezoneConfigError(msg)
def save_hw_clock(timezone):
"""
Save system time to HW clock.
:param timezone: ksdata.timezone object
"""
if arch.isS390():
return
cmd = "hwclock"
args = ["--systohc"]
if timezone.isUtc:
args.append("--utc")
else:
args.append("--local")
iutil.execWithRedirect(cmd, args)
def get_preferred_timezone(territory):
"""
Get the preferred timezone for a given territory. Note that this function
simply returns the first timezone in the list of timezones for a given
territory.
:param territory: territory to get preferred timezone for
:type territory: str
:return: preferred timezone for the given territory or None if no found
:rtype: str or None
"""
timezones = langtable.list_timezones(territoryId=territory)
if not timezones:
return None
return timezones[0]
def get_all_regions_and_timezones():
"""
Get a dictionary mapping the regions to the list of their timezones.
:rtype: dict
"""
result = OrderedDict()
for tz in pytz.common_timezones:
parts = tz.split("/", 1)
if len(parts) > 1:
if parts[0] not in result:
result[parts[0]] = set()
result[parts[0]].add(parts[1])
result["Etc"] = set(ETC_ZONES)
return result
def is_valid_timezone(timezone):
"""
Check if a given string is an existing timezone.
:type timezone: str
:rtype: bool
"""
etc_zones = ["Etc/" + zone for zone in ETC_ZONES]
return timezone in pytz.common_timezones + etc_zones
def get_timezone(timezone):
"""
Return a tzinfo object for a given timezone name.
:param str timezone: the timezone name
:rtype: datetime.tzinfo
"""
return pytz.timezone(timezone)
| gpl-2.0 |
jcrugzz/lpvisualization | django/conf/locale/pl/formats.py | 238 | 1288 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j E Y'
TIME_FORMAT = 'H:i:s'
DATETIME_FORMAT = 'j E Y H:i:s'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'd-m-Y'
SHORT_DATETIME_FORMAT = 'd-m-Y H:i:s'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%d.%m.%Y', '%d.%m.%y', # '25.10.2006', '25.10.06'
'%Y-%m-%d', '%y-%m-%d', # '2006-10-25', '06-10-25'
# '%d. %B %Y', '%d. %b. %Y', # '25. October 2006', '25. Oct. 2006'
)
TIME_INPUT_FORMATS = (
'%H:%M:%S', # '14:30:59'
'%H:%M', # '14:30'
)
DATETIME_INPUT_FORMATS = (
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = u' '
NUMBER_GROUPING = 3
| bsd-3-clause |
marcore/edx-platform | lms/djangoapps/branding/tests/test_page.py | 9 | 12064 | """
Tests for branding page
"""
import datetime
from django.conf import settings
from django.contrib.auth.models import AnonymousUser
from django.http import HttpResponseRedirect
from django.test.utils import override_settings
from django.test.client import RequestFactory
from pytz import UTC
from mock import patch, Mock
from nose.plugins.attrib import attr
from edxmako.shortcuts import render_to_response
from branding.views import index
import student.views
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
from django.core.urlresolvers import reverse
from courseware.tests.helpers import LoginEnrollmentTestCase
from util.milestones_helpers import set_prerequisite_courses
from milestones.tests.utils import MilestonesTestCaseMixin
FEATURES_WITH_STARTDATE = settings.FEATURES.copy()
FEATURES_WITH_STARTDATE['DISABLE_START_DATES'] = False
FEATURES_WO_STARTDATE = settings.FEATURES.copy()
FEATURES_WO_STARTDATE['DISABLE_START_DATES'] = True
def mock_render_to_response(*args, **kwargs):
"""
Mock the render_to_response function
"""
return render_to_response(*args, **kwargs)
RENDER_MOCK = Mock(side_effect=mock_render_to_response)
@attr('shard_1')
class AnonymousIndexPageTest(ModuleStoreTestCase):
"""
Tests that anonymous users can access the '/' page, Need courses with start date
"""
def setUp(self):
super(AnonymousIndexPageTest, self).setUp()
self.factory = RequestFactory()
self.course = CourseFactory.create(
days_early_for_beta=5,
enrollment_start=datetime.datetime.now(UTC) + datetime.timedelta(days=3),
user_id=self.user.id,
)
@override_settings(FEATURES=FEATURES_WITH_STARTDATE)
def test_none_user_index_access_with_startdate_fails(self):
"""
This is a regression test for a bug where the incoming user is
anonymous and start dates are being checked. It replaces a previous
test as it solves the issue in a different way
"""
self.client.logout()
response = self.client.get(reverse('root'))
self.assertEqual(response.status_code, 200)
@override_settings(FEATURES=FEATURES_WITH_STARTDATE)
def test_anon_user_with_startdate_index(self):
response = self.client.get('/')
self.assertEqual(response.status_code, 200)
@override_settings(FEATURES=FEATURES_WO_STARTDATE)
def test_anon_user_no_startdate_index(self):
response = self.client.get('/')
self.assertEqual(response.status_code, 200)
def test_allow_x_frame_options(self):
"""
Check the x-frame-option response header
"""
# check to see that the default setting is to ALLOW iframing
resp = self.client.get('/')
self.assertEquals(resp['X-Frame-Options'], 'ALLOW')
@override_settings(X_FRAME_OPTIONS='DENY')
def test_deny_x_frame_options(self):
"""
Check the x-frame-option response header
"""
# check to see that the override value is honored
resp = self.client.get('/')
self.assertEquals(resp['X-Frame-Options'], 'DENY')
def test_edge_redirect_to_login(self):
"""
Test edge homepage redirect to lms login.
"""
request = self.factory.get('/')
request.user = AnonymousUser()
# HTTP Host changed to edge.
request.META["HTTP_HOST"] = "edge.edx.org"
response = index(request)
# Response should be instance of HttpResponseRedirect.
self.assertIsInstance(response, HttpResponseRedirect)
# Location should be "/login".
self.assertEqual(response._headers.get("location")[1], "/login") # pylint: disable=protected-access
@attr('shard_1')
class PreRequisiteCourseCatalog(ModuleStoreTestCase, LoginEnrollmentTestCase, MilestonesTestCaseMixin):
"""
Test to simulate and verify fix for disappearing courses in
course catalog when using pre-requisite courses
"""
@patch.dict(settings.FEATURES, {'ENABLE_PREREQUISITE_COURSES': True, 'MILESTONES_APP': True})
def test_course_with_prereq(self):
"""
Simulate having a course which has closed enrollments that has
a pre-req course
"""
pre_requisite_course = CourseFactory.create(
org='edX',
course='900',
display_name='pre requisite course',
emit_signals=True,
)
pre_requisite_courses = [unicode(pre_requisite_course.id)]
# for this failure to occur, the enrollment window needs to be in the past
course = CourseFactory.create(
org='edX',
course='1000',
display_name='course that has pre requisite',
# closed enrollment
enrollment_start=datetime.datetime(2013, 1, 1),
enrollment_end=datetime.datetime(2014, 1, 1),
start=datetime.datetime(2013, 1, 1),
end=datetime.datetime(2030, 1, 1),
pre_requisite_courses=pre_requisite_courses,
emit_signals=True,
)
set_prerequisite_courses(course.id, pre_requisite_courses)
resp = self.client.get('/')
self.assertEqual(resp.status_code, 200)
# make sure both courses are visible in the catalog
self.assertIn('pre requisite course', resp.content)
self.assertIn('course that has pre requisite', resp.content)
@attr('shard_1')
class IndexPageCourseCardsSortingTests(ModuleStoreTestCase):
"""
Test for Index page course cards sorting
"""
def setUp(self):
super(IndexPageCourseCardsSortingTests, self).setUp()
self.starting_later = CourseFactory.create(
org='MITx',
number='1000',
display_name='Starting later, Announced later',
metadata={
'start': datetime.datetime.now(UTC) + datetime.timedelta(days=4),
'announcement': datetime.datetime.now(UTC) + datetime.timedelta(days=3),
},
emit_signals=True,
)
self.starting_earlier = CourseFactory.create(
org='MITx',
number='1001',
display_name='Starting earlier, Announced earlier',
metadata={
'start': datetime.datetime.now(UTC) + datetime.timedelta(days=2),
'announcement': datetime.datetime.now(UTC) + datetime.timedelta(days=1),
},
emit_signals=True,
)
self.course_with_default_start_date = CourseFactory.create(
org='MITx',
number='1002',
display_name='Tech Beta Course',
emit_signals=True,
)
self.factory = RequestFactory()
@patch('student.views.render_to_response', RENDER_MOCK)
@patch('courseware.views.views.render_to_response', RENDER_MOCK)
@patch.dict('django.conf.settings.FEATURES', {'ENABLE_COURSE_DISCOVERY': False})
def test_course_discovery_off(self):
"""
Asserts that the Course Discovery UI elements follow the
feature flag settings
"""
response = self.client.get('/')
self.assertEqual(response.status_code, 200)
# assert that the course discovery UI is not present
self.assertNotIn('Search for a course', response.content)
# check the /courses view
response = self.client.get(reverse('branding.views.courses'))
self.assertEqual(response.status_code, 200)
# assert that the course discovery UI is not present
self.assertNotIn('Search for a course', response.content)
self.assertNotIn('<aside aria-label="Refine Your Search" class="search-facets phone-menu">', response.content)
# make sure we have the special css class on the section
self.assertIn('<div class="courses no-course-discovery"', response.content)
@patch('student.views.render_to_response', RENDER_MOCK)
@patch('courseware.views.views.render_to_response', RENDER_MOCK)
@patch.dict('django.conf.settings.FEATURES', {'ENABLE_COURSE_DISCOVERY': True})
def test_course_discovery_on(self):
"""
Asserts that the Course Discovery UI elements follow the
feature flag settings
"""
response = self.client.get('/')
self.assertEqual(response.status_code, 200)
# assert that the course discovery UI is not present
self.assertIn('Search for a course', response.content)
# check the /courses view
response = self.client.get(reverse('branding.views.courses'))
self.assertEqual(response.status_code, 200)
# assert that the course discovery UI is present
self.assertIn('Search for a course', response.content)
self.assertIn('<aside aria-label="Refine Your Search" class="search-facets phone-menu">', response.content)
self.assertIn('<div class="courses"', response.content)
@patch('student.views.render_to_response', RENDER_MOCK)
@patch('courseware.views.views.render_to_response', RENDER_MOCK)
@patch.dict('django.conf.settings.FEATURES', {'ENABLE_COURSE_DISCOVERY': False})
def test_course_cards_sorted_by_default_sorting(self):
response = self.client.get('/')
self.assertEqual(response.status_code, 200)
((template, context), _) = RENDER_MOCK.call_args # pylint: disable=unpacking-non-sequence
self.assertEqual(template, 'index.html')
# by default the courses will be sorted by their creation dates, earliest first.
self.assertEqual(context['courses'][0].id, self.starting_earlier.id)
self.assertEqual(context['courses'][1].id, self.starting_later.id)
self.assertEqual(context['courses'][2].id, self.course_with_default_start_date.id)
# check the /courses view
response = self.client.get(reverse('branding.views.courses'))
self.assertEqual(response.status_code, 200)
((template, context), _) = RENDER_MOCK.call_args # pylint: disable=unpacking-non-sequence
self.assertEqual(template, 'courseware/courses.html')
# by default the courses will be sorted by their creation dates, earliest first.
self.assertEqual(context['courses'][0].id, self.starting_earlier.id)
self.assertEqual(context['courses'][1].id, self.starting_later.id)
self.assertEqual(context['courses'][2].id, self.course_with_default_start_date.id)
@patch('student.views.render_to_response', RENDER_MOCK)
@patch('courseware.views.views.render_to_response', RENDER_MOCK)
@patch.dict('django.conf.settings.FEATURES', {'ENABLE_COURSE_SORTING_BY_START_DATE': False})
@patch.dict('django.conf.settings.FEATURES', {'ENABLE_COURSE_DISCOVERY': False})
def test_course_cards_sorted_by_start_date_disabled(self):
response = self.client.get('/')
self.assertEqual(response.status_code, 200)
((template, context), _) = RENDER_MOCK.call_args # pylint: disable=unpacking-non-sequence
self.assertEqual(template, 'index.html')
# now the courses will be sorted by their announcement dates.
self.assertEqual(context['courses'][0].id, self.starting_later.id)
self.assertEqual(context['courses'][1].id, self.starting_earlier.id)
self.assertEqual(context['courses'][2].id, self.course_with_default_start_date.id)
# check the /courses view as well
response = self.client.get(reverse('branding.views.courses'))
self.assertEqual(response.status_code, 200)
((template, context), _) = RENDER_MOCK.call_args # pylint: disable=unpacking-non-sequence
self.assertEqual(template, 'courseware/courses.html')
# now the courses will be sorted by their announcement dates.
self.assertEqual(context['courses'][0].id, self.starting_later.id)
self.assertEqual(context['courses'][1].id, self.starting_earlier.id)
self.assertEqual(context['courses'][2].id, self.course_with_default_start_date.id)
| agpl-3.0 |
jicruz/heroku-bot | lib/pip/_vendor/requests/packages/chardet/eucjpprober.py | 2919 | 3678 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from . import constants
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import EUCJPDistributionAnalysis
from .jpcntx import EUCJPContextAnalysis
from .mbcssm import EUCJPSMModel
class EUCJPProber(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(EUCJPSMModel)
self._mDistributionAnalyzer = EUCJPDistributionAnalysis()
self._mContextAnalyzer = EUCJPContextAnalysis()
self.reset()
def reset(self):
MultiByteCharSetProber.reset(self)
self._mContextAnalyzer.reset()
def get_charset_name(self):
return "EUC-JP"
def feed(self, aBuf):
aLen = len(aBuf)
for i in range(0, aLen):
# PY3K: aBuf is a byte array, so aBuf[i] is an int, not a byte
codingState = self._mCodingSM.next_state(aBuf[i])
if codingState == constants.eError:
if constants._debug:
sys.stderr.write(self.get_charset_name()
+ ' prober hit error at byte ' + str(i)
+ '\n')
self._mState = constants.eNotMe
break
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == constants.eStart:
charLen = self._mCodingSM.get_current_charlen()
if i == 0:
self._mLastChar[1] = aBuf[0]
self._mContextAnalyzer.feed(self._mLastChar, charLen)
self._mDistributionAnalyzer.feed(self._mLastChar, charLen)
else:
self._mContextAnalyzer.feed(aBuf[i - 1:i + 1], charLen)
self._mDistributionAnalyzer.feed(aBuf[i - 1:i + 1],
charLen)
self._mLastChar[0] = aBuf[aLen - 1]
if self.get_state() == constants.eDetecting:
if (self._mContextAnalyzer.got_enough_data() and
(self.get_confidence() > constants.SHORTCUT_THRESHOLD)):
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
contxtCf = self._mContextAnalyzer.get_confidence()
distribCf = self._mDistributionAnalyzer.get_confidence()
return max(contxtCf, distribCf)
| gpl-3.0 |
laborautonomo/youtube-dl | youtube_dl/extractor/roxwel.py | 9 | 2026 | import re
import json
from .common import InfoExtractor
from ..utils import unified_strdate, determine_ext
class RoxwelIE(InfoExtractor):
_VALID_URL = r'https?://www\.roxwel\.com/player/(?P<filename>.+?)(\.|\?|$)'
_TEST = {
u'url': u'http://www.roxwel.com/player/passionpittakeawalklive.html',
u'file': u'passionpittakeawalklive.flv',
u'md5': u'd9dea8360a1e7d485d2206db7fe13035',
u'info_dict': {
u'title': u'Take A Walk (live)',
u'uploader': u'Passion Pit',
u'description': u'Passion Pit performs "Take A Walk\" live at The Backyard in Austin, Texas. ',
},
u'skip': u'Requires rtmpdump',
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
filename = mobj.group('filename')
info_url = 'http://www.roxwel.com/api/videos/%s' % filename
info_page = self._download_webpage(info_url, filename,
u'Downloading video info')
self.report_extraction(filename)
info = json.loads(info_page)
rtmp_rates = sorted([int(r.replace('flv_', '')) for r in info['media_rates'] if r.startswith('flv_')])
best_rate = rtmp_rates[-1]
url_page_url = 'http://roxwel.com/pl_one_time.php?filename=%s&quality=%s' % (filename, best_rate)
rtmp_url = self._download_webpage(url_page_url, filename, u'Downloading video url')
ext = determine_ext(rtmp_url)
if ext == 'f4v':
rtmp_url = rtmp_url.replace(filename, 'mp4:%s' % filename)
return {'id': filename,
'title': info['title'],
'url': rtmp_url,
'ext': 'flv',
'description': info['description'],
'thumbnail': info.get('player_image_url') or info.get('image_url_large'),
'uploader': info['artist'],
'uploader_id': info['artistname'],
'upload_date': unified_strdate(info['dbdate']),
}
| unlicense |
mohammed-alfatih/servo | tests/wpt/web-platform-tests/webvtt/webvtt-file-format-parsing/webvtt-cue-text-parsing-rules/buildtests.py | 132 | 1947 | #!/usr/bin/python
import os
import urllib
import hashlib
doctmpl = """<!doctype html>
<title>WebVTT cue data parser test %s</title>
<style>video { display:none }</style>
<script src=/resources/testharness.js></script>
<script src=/resources/testharnessreport.js></script>
<script src=/html/syntax/parsing/template.js></script>
<script src=/html/syntax/parsing/common.js></script>
<script src=../common.js></script>
<div id=log></div>
<script>
runTests([
%s
]);
</script>"""
testobj = "{name:'%s', input:'%s', expected:'%s'}"
def appendtest(tests, input, expected):
tests.append(testobj % (hashlib.sha1(input).hexdigest(), urllib.quote(input[:-1]), urllib.quote(expected[:-1])))
files = os.listdir('dat/')
for file in files:
if os.path.isdir('dat/'+file) or file[0] == ".":
continue
tests = []
input = ""
expected = ""
state = ""
f = open('dat/'+file)
while 1:
line = f.readline()
if not line:
if state != "":
appendtest(tests, input, expected)
input = ""
expected = ""
state = ""
break
if line[0] == "#":
state = line
if line == "#document-fragment\n":
expected = expected + line
elif state == "#data\n":
input = input + line
elif state == "#errors\n":
pass
elif state == "#document-fragment\n":
if line == "\n":
appendtest(tests, input, expected)
input = ""
expected = ""
state = ""
else:
expected = expected + line
else:
raise Exception("failed to parse file "+file+" line:"+line+" (state: "+state+")")
f.close()
barename = file.replace(".dat", "")
out = open('tests/'+barename+".html", "w")
out.write(doctmpl % (barename, ",\n".join(tests)))
out.close()
| mpl-2.0 |
edx/edx-enterprise | integrated_channels/degreed/client.py | 1 | 9678 | # -*- coding: utf-8 -*-
"""
Client for connecting to Degreed.
"""
import datetime
import logging
import time
import requests
from six.moves.urllib.parse import urljoin # pylint: disable=import-error
from django.apps import apps
from integrated_channels.exceptions import ClientError
from integrated_channels.integrated_channel.client import IntegratedChannelApiClient
LOGGER = logging.getLogger(__name__)
class DegreedAPIClient(IntegratedChannelApiClient):
"""
Client for connecting to Degreed.
Specifically, this class supports obtaining access tokens and posting to the courses and
completion status endpoints.
"""
CONTENT_PROVIDER_SCOPE = 'provider_content'
COMPLETION_PROVIDER_SCOPE = 'provider_completion'
SESSION_TIMEOUT = 60
def __init__(self, enterprise_configuration):
"""
Instantiate a new client.
Args:
enterprise_configuration (DegreedEnterpriseCustomerConfiguration): An enterprise customers's
configuration model for connecting with Degreed
"""
super().__init__(enterprise_configuration)
self.global_degreed_config = apps.get_model('degreed', 'DegreedGlobalConfiguration').current()
self.session = None
self.expires_at = None
def create_assessment_reporting(self, user_id, payload):
"""
Not implemented yet.
"""
def cleanup_duplicate_assignment_records(self, courses):
"""
Not implemented yet.
"""
LOGGER.error("Degreed integrated channel does not yet support assignment deduplication.")
def create_course_completion(self, user_id, payload): # pylint: disable=unused-argument
"""
Send a completion status payload to the Degreed Completion Status endpoint
Args:
user_id: Unused.
payload: JSON encoded object (serialized from DegreedLearnerDataTransmissionAudit)
containing completion status fields per Degreed documentation.
Returns:
A tuple containing the status code and the body of the response.
Raises:
HTTPError: if we received a failure response code from Degreed
"""
return self._post(
urljoin(
self.enterprise_configuration.degreed_base_url,
self.global_degreed_config.completion_status_api_path
),
payload,
self.COMPLETION_PROVIDER_SCOPE
)
def delete_course_completion(self, user_id, payload): # pylint: disable=unused-argument
"""
Delete a completion status previously sent to the Degreed Completion Status endpoint
Args:
user_id: Unused.
payload: JSON encoded object (serialized from DegreedLearnerDataTransmissionAudit)
containing the required completion status fields for deletion per Degreed documentation.
Returns:
A tuple containing the status code and the body of the response.
Raises:
HTTPError: if we received a failure response code from Degreed
"""
return self._delete(
urljoin(
self.enterprise_configuration.degreed_base_url,
self.global_degreed_config.completion_status_api_path
),
payload,
self.COMPLETION_PROVIDER_SCOPE
)
def create_content_metadata(self, serialized_data):
"""
Create content metadata using the Degreed course content API.
Args:
serialized_data: JSON-encoded object containing content metadata.
Raises:
ClientError: If Degreed API request fails.
"""
self._sync_content_metadata(serialized_data, 'post')
def update_content_metadata(self, serialized_data):
"""
Update content metadata using the Degreed course content API.
Args:
serialized_data: JSON-encoded object containing content metadata.
Raises:
ClientError: If Degreed API request fails.
"""
self._sync_content_metadata(serialized_data, 'post')
def delete_content_metadata(self, serialized_data):
"""
Delete content metadata using the Degreed course content API.
Args:
serialized_data: JSON-encoded object containing content metadata.
Raises:
ClientError: If Degreed API request fails.
"""
self._sync_content_metadata(serialized_data, 'delete')
def _sync_content_metadata(self, serialized_data, http_method):
"""
Synchronize content metadata using the Degreed course content API.
Args:
serialized_data: JSON-encoded object containing content metadata.
http_method: The HTTP method to use for the API request.
Raises:
ClientError: If Degreed API request fails.
"""
try:
status_code, response_body = getattr(self, '_' + http_method)(
urljoin(self.enterprise_configuration.degreed_base_url, self.global_degreed_config.course_api_path),
serialized_data,
self.CONTENT_PROVIDER_SCOPE
)
except requests.exceptions.RequestException as exc:
raise ClientError(
'DegreedAPIClient request failed: {error} {message}'.format(
error=exc.__class__.__name__,
message=str(exc)
)
) from exc
if status_code >= 400:
raise ClientError(
'DegreedAPIClient request failed with status {status_code}: {message}'.format(
status_code=status_code,
message=response_body
)
)
def _post(self, url, data, scope):
"""
Make a POST request using the session object to a Degreed endpoint.
Args:
url (str): The url to send a POST request to.
data (str): The json encoded payload to POST.
scope (str): Must be one of the scopes Degreed expects:
- `CONTENT_PROVIDER_SCOPE`
- `COMPLETION_PROVIDER_SCOPE`
"""
self._create_session(scope)
response = self.session.post(url, data=data)
return response.status_code, response.text
def _delete(self, url, data, scope):
"""
Make a DELETE request using the session object to a Degreed endpoint.
Args:
url (str): The url to send a DELETE request to.
data (str): The json encoded payload to DELETE.
scope (str): Must be one of the scopes Degreed expects:
- `CONTENT_PROVIDER_SCOPE`
- `COMPLETION_PROVIDER_SCOPE`
"""
self._create_session(scope)
response = self.session.delete(url, data=data)
return response.status_code, response.text
def _create_session(self, scope):
"""
Instantiate a new session object for use in connecting with Degreed
"""
now = datetime.datetime.utcnow()
if self.session is None or self.expires_at is None or now >= self.expires_at:
# Create a new session with a valid token
if self.session:
self.session.close()
oauth_access_token, expires_at = self._get_oauth_access_token(
self.enterprise_configuration.key,
self.enterprise_configuration.secret,
self.enterprise_configuration.degreed_user_id,
self.enterprise_configuration.degreed_user_password,
scope
)
session = requests.Session()
session.timeout = self.SESSION_TIMEOUT
session.headers['Authorization'] = 'Bearer {}'.format(oauth_access_token)
session.headers['content-type'] = 'application/json'
self.session = session
self.expires_at = expires_at
def _get_oauth_access_token(self, client_id, client_secret, user_id, user_password, scope):
""" Retrieves OAuth 2.0 access token using the client credentials grant.
Args:
client_id (str): API client ID
client_secret (str): API client secret
user_id (str): Degreed company ID
user_password (str): Degreed user password
scope (str): Must be one of the scopes Degreed expects:
- `CONTENT_PROVIDER_SCOPE`
- `COMPLETION_PROVIDER_SCOPE`
Returns:
tuple: Tuple containing access token string and expiration datetime.
Raises:
HTTPError: If we received a failure response code from Degreed.
ClientError: If an unexpected response format was received that we could not parse.
"""
response = requests.post(
urljoin(self.enterprise_configuration.degreed_base_url, self.global_degreed_config.oauth_api_path),
data={
'grant_type': 'password',
'username': user_id,
'password': user_password,
'scope': scope,
},
auth=(client_id, client_secret),
headers={'Content-Type': 'application/x-www-form-urlencoded'}
)
try:
data = response.json()
expires_at = data['expires_in'] + int(time.time())
return data['access_token'], datetime.datetime.utcfromtimestamp(expires_at)
except (KeyError, ValueError) as error:
raise ClientError(response.text, response.status_code) from error
| agpl-3.0 |
JohnWinter/ThinkStats2 | code/survival.py | 65 | 17881 | """This file contains code for use with "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2014 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function, division
import numpy as np
import pandas
import nsfg
import thinkstats2
import thinkplot
"""
Outcome codes from http://www.icpsr.umich.edu/nsfg6/Controller?
displayPage=labelDetails&fileCode=PREG§ion=&subSec=8016&srtLabel=611932
1 LIVE BIRTH 9148
2 INDUCED ABORTION 1862
3 STILLBIRTH 120
4 MISCARRIAGE 1921
5 ECTOPIC PREGNANCY 190
6 CURRENT PREGNANCY 352
"""
FORMATS = ['pdf', 'eps', 'png']
class SurvivalFunction(object):
"""Represents a survival function."""
def __init__(self, cdf, label=''):
self.cdf = cdf
self.label = label or cdf.label
@property
def ts(self):
return self.cdf.xs
@property
def ss(self):
return 1 - self.cdf.ps
def __getitem__(self, t):
return self.Prob(t)
def Prob(self, t):
"""Returns S(t), the probability that corresponds to value t.
t: time
returns: float probability
"""
return 1 - self.cdf.Prob(t)
def Probs(self, xs):
"""Gets probabilities for a sequence of values."""
return [self.Prob(x) for x in xs]
def Mean(self):
"""Mean survival time."""
return self.cdf.Mean()
def Items(self):
"""Sorted list of (t, s) pairs."""
return zip(self.ts, self.ss)
def Render(self):
"""Generates a sequence of points suitable for plotting.
returns: tuple of (sorted times, survival function)
"""
return self.ts, self.ss
def MakeHazard(self, label=''):
"""Computes the hazard function.
sf: survival function
returns: Pmf that maps times to hazard rates
"""
ss = self.ss
lams = {}
for i, t in enumerate(self.ts[:-1]):
hazard = (ss[i] - ss[i+1]) / ss[i]
lams[t] = hazard
return HazardFunction(lams, label=label)
def MakePmf(self, filler=None):
"""Makes a PMF of lifetimes.
filler: value to replace missing values
returns: Pmf
"""
pmf = thinkstats2.Pmf()
for val, prob in self.cdf.Items():
pmf.Set(val, prob)
cutoff = self.cdf.ps[-1]
if filler is not None:
pmf[filler] = 1-cutoff
return pmf
def RemainingLifetime(self, filler=None, func=thinkstats2.Pmf.Mean):
"""Computes remaining lifetime as a function of age.
func: function from conditional Pmf to expected liftime
returns: Series that maps from age to remaining lifetime
"""
pmf = self.MakePmf(filler=filler)
d = {}
for t in sorted(pmf.Values())[:-1]:
pmf[t] = 0
pmf.Normalize()
d[t] = func(pmf) - t
#print(t, d[t])
return pandas.Series(d)
class HazardFunction(object):
"""Represents a hazard function."""
def __init__(self, d, label=''):
"""Initialize the hazard function.
d: dictionary (or anything that can initialize a series)
label: string
"""
self.series = pandas.Series(d)
self.label = label
def __getitem__(self, t):
return self.series[t]
def Render(self):
"""Generates a sequence of points suitable for plotting.
returns: tuple of (sorted times, hazard function)
"""
return self.series.index, self.series.values
def MakeSurvival(self, label=''):
"""Makes the survival function.
returns: SurvivalFunction
"""
ts = self.series.index
ss = (1 - self.series).cumprod()
cdf = thinkstats2.Cdf(ts, 1-ss)
sf = SurvivalFunction(cdf, label=label)
return sf
def Extend(self, other):
"""Extends this hazard function by copying the tail from another.
other: HazardFunction
"""
last = self.series.index[-1]
more = other.series[other.series.index > last]
self.series = pandas.concat([self.series, more])
def ConditionalSurvival(pmf, t0):
"""Computes conditional survival function.
Probability that duration exceeds t0+t, given that
duration >= t0.
pmf: Pmf of durations
t0: minimum time
returns: tuple of (ts, conditional survivals)
"""
cond = thinkstats2.Pmf()
for t, p in pmf.Items():
if t >= t0:
cond.Set(t-t0, p)
return SurvivalFunction(thinkstats2.Cdf(cond))
def PlotConditionalSurvival(durations):
"""Plots conditional survival curves for a range of t0.
durations: list of durations
"""
pmf = thinkstats2.Pmf(durations)
times = [8, 16, 24, 32]
thinkplot.PrePlot(len(times))
for t0 in times:
sf = ConditionalSurvival(pmf, t0)
label = 't0=%d' % t0
thinkplot.Plot(sf, label=label)
thinkplot.Show()
def PlotSurvival(complete):
"""Plots survival and hazard curves.
complete: list of complete lifetimes
"""
thinkplot.PrePlot(3, rows=2)
cdf = thinkstats2.Cdf(complete, label='cdf')
sf = SurvivalFunction(cdf, label='survival')
print(cdf[13])
print(sf[13])
thinkplot.Plot(sf)
thinkplot.Cdf(cdf, alpha=0.2)
thinkplot.Config()
thinkplot.SubPlot(2)
hf = sf.MakeHazard(label='hazard')
print(hf[39])
thinkplot.Plot(hf)
thinkplot.Config(ylim=[0, 0.75])
def PlotHazard(complete, ongoing):
"""Plots the hazard function and survival function.
complete: list of complete lifetimes
ongoing: list of ongoing lifetimes
"""
# plot S(t) based on only complete pregnancies
cdf = thinkstats2.Cdf(complete)
sf = SurvivalFunction(cdf)
thinkplot.Plot(sf, label='old S(t)', alpha=0.1)
thinkplot.PrePlot(2)
# plot the hazard function
hf = EstimateHazardFunction(complete, ongoing)
thinkplot.Plot(hf, label='lams(t)', alpha=0.5)
# plot the survival function
sf = hf.MakeSurvival()
thinkplot.Plot(sf, label='S(t)')
thinkplot.Show(xlabel='t (weeks)')
def EstimateHazardFunction(complete, ongoing, label='', shift=1e-7):
"""Estimates the hazard function by Kaplan-Meier.
http://en.wikipedia.org/wiki/Kaplan%E2%80%93Meier_estimator
complete: list of complete lifetimes
ongoing: list of ongoing lifetimes
label: string
shift: presumed additional survival of ongoing
"""
# pmf and sf of complete lifetimes
n = len(complete)
hist_complete = thinkstats2.Hist(complete)
sf_complete = SurvivalFunction(thinkstats2.Cdf(complete))
# sf for ongoing lifetimes
# The shift is a regrettable hack needed to deal with simultaneity.
# If a case is complete at some t and another case is ongoing
# at t, we presume that the ongoing case exceeds t+shift.
m = len(ongoing)
cdf = thinkstats2.Cdf(ongoing).Shift(shift)
sf_ongoing = SurvivalFunction(cdf)
lams = {}
for t, ended in sorted(hist_complete.Items()):
at_risk = ended + n * sf_complete[t] + m * sf_ongoing[t]
lams[t] = ended / at_risk
#print(t, ended, n * sf_complete[t], m * sf_ongoing[t], at_risk)
return HazardFunction(lams, label=label)
def CleanData(resp):
"""Cleans a respondent DataFrame.
resp: DataFrame of respondents
"""
resp.cmmarrhx.replace([9997, 9998, 9999], np.nan, inplace=True)
resp['agemarry'] = (resp.cmmarrhx - resp.cmbirth) / 12.0
resp['age'] = (resp.cmintvw - resp.cmbirth) / 12.0
month0 = pandas.to_datetime('1899-12-15')
dates = [month0 + pandas.DateOffset(months=cm)
for cm in resp.cmbirth]
resp['decade'] = (pandas.DatetimeIndex(dates).year - 1900) // 10
def AddLabelsByDecade(groups, **options):
"""Draws fake points in order to add labels to the legend.
groups: GroupBy object
"""
thinkplot.PrePlot(len(groups))
for name, _ in groups:
label = '%d0s' % name
thinkplot.Plot([15], [1], label=label, **options)
def EstimateSurvivalByDecade(groups, **options):
"""Groups respondents by decade and plots survival curves.
groups: GroupBy object
"""
thinkplot.PrePlot(len(groups))
for _, group in groups:
_, sf = EstimateSurvival(group)
thinkplot.Plot(sf, **options)
def PlotPredictionsByDecade(groups, **options):
"""Groups respondents by decade and plots survival curves.
groups: GroupBy object
"""
hfs = []
for _, group in groups:
hf, sf = EstimateSurvival(group)
hfs.append(hf)
thinkplot.PrePlot(len(hfs))
for i, hf in enumerate(hfs):
if i > 0:
hf.Extend(hfs[i-1])
sf = hf.MakeSurvival()
thinkplot.Plot(sf, **options)
def ResampleSurvival(resp, iters=101):
"""Resamples respondents and estimates the survival function.
resp: DataFrame of respondents
iters: number of resamples
"""
_, sf = EstimateSurvival(resp)
thinkplot.Plot(sf)
low, high = resp.agemarry.min(), resp.agemarry.max()
ts = np.arange(low, high, 1/12.0)
ss_seq = []
for _ in range(iters):
sample = thinkstats2.ResampleRowsWeighted(resp)
_, sf = EstimateSurvival(sample)
ss_seq.append(sf.Probs(ts))
low, high = thinkstats2.PercentileRows(ss_seq, [5, 95])
thinkplot.FillBetween(ts, low, high, color='gray', label='90% CI')
thinkplot.Save(root='survival3',
xlabel='age (years)',
ylabel='prob unmarried',
xlim=[12, 46],
ylim=[0, 1],
formats=FORMATS)
def EstimateSurvival(resp):
"""Estimates the survival curve.
resp: DataFrame of respondents
returns: pair of HazardFunction, SurvivalFunction
"""
complete = resp[resp.evrmarry == 1].agemarry
ongoing = resp[resp.evrmarry == 0].age
hf = EstimateHazardFunction(complete, ongoing)
sf = hf.MakeSurvival()
return hf, sf
def PlotMarriageData(resp):
"""Plots hazard and survival functions.
resp: DataFrame of respondents
"""
hf, sf = EstimateSurvival(resp)
thinkplot.PrePlot(rows=2)
thinkplot.Plot(hf)
thinkplot.Config(legend=False)
thinkplot.SubPlot(2)
thinkplot.Plot(sf)
thinkplot.Save(root='survival2',
xlabel='age (years)',
ylabel='prob unmarried',
ylim=[0, 1],
legend=False,
formats=FORMATS)
return sf
def PlotPregnancyData(preg):
"""Plots survival and hazard curves based on pregnancy lengths.
preg:
"""
complete = preg.query('outcome in [1, 3, 4]').prglngth
print('Number of complete pregnancies', len(complete))
ongoing = preg[preg.outcome == 6].prglngth
print('Number of ongoing pregnancies', len(ongoing))
PlotSurvival(complete)
thinkplot.Save(root='survival1',
xlabel='t (weeks)',
formats=FORMATS)
hf = EstimateHazardFunction(complete, ongoing)
sf = hf.MakeSurvival()
return sf
def PlotRemainingLifetime(sf1, sf2):
"""Plots remaining lifetimes for pregnancy and age at first marriage.
sf1: SurvivalFunction for pregnancy length
sf2: SurvivalFunction for age at first marriage
"""
thinkplot.PrePlot(cols=2)
rem_life1 = sf1.RemainingLifetime()
thinkplot.Plot(rem_life1)
thinkplot.Config(title='pregnancy length',
xlabel='weeks',
ylabel='mean remaining weeks')
thinkplot.SubPlot(2)
func = lambda pmf: pmf.Percentile(50)
rem_life2 = sf2.RemainingLifetime(filler=np.inf, func=func)
thinkplot.Plot(rem_life2)
thinkplot.Config(title='age at first marriage',
ylim=[0, 15],
xlim=[11, 31],
xlabel='age (years)',
ylabel='median remaining years')
thinkplot.Save(root='survival6',
formats=FORMATS)
def ReadFemResp(dct_file='2002FemResp.dct',
dat_file='2002FemResp.dat.gz',
**options):
"""Reads the NSFG respondent data.
dct_file: string file name
dat_file: string file name
returns: DataFrame
"""
dct = thinkstats2.ReadStataDct(dct_file, encoding='iso-8859-1')
df = dct.ReadFixedWidth(dat_file, compression='gzip', **options)
CleanData(df)
return df
def ReadFemResp2002():
"""Reads respondent data from NSFG Cycle 6.
returns: DataFrame
"""
usecols = ['cmmarrhx', 'cmdivorcx', 'cmbirth', 'cmintvw',
'evrmarry', 'finalwgt']
resp = ReadFemResp(usecols=usecols)
CleanData(resp)
return resp
def ReadFemResp2010():
"""Reads respondent data from NSFG Cycle 7.
returns: DataFrame
"""
usecols = ['cmmarrhx', 'cmdivorcx', 'cmbirth', 'cmintvw',
'evrmarry', 'wgtq1q16']
resp = ReadFemResp('2006_2010_FemRespSetup.dct',
'2006_2010_FemResp.dat.gz',
usecols=usecols)
resp['finalwgt'] = resp.wgtq1q16
CleanData(resp)
return resp
def ReadFemResp2013():
"""Reads respondent data from NSFG Cycle 8.
returns: DataFrame
"""
usecols = ['cmmarrhx', 'cmdivorcx', 'cmbirth', 'cmintvw',
'evrmarry', 'wgt2011_2013']
resp = ReadFemResp('2011_2013_FemRespSetup.dct',
'2011_2013_FemRespData.dat.gz',
usecols=usecols)
resp['finalwgt'] = resp.wgt2011_2013
CleanData(resp)
return resp
def ReadFemResp1995():
"""Reads respondent data from NSFG Cycle 5.
returns: DataFrame
"""
dat_file = '1995FemRespData.dat.gz'
names = ['a_doi', 'timesmar', 'mardat01', 'bdaycenm', 'post_wt']
colspecs = [(12359, 12363),
(3538, 3540),
(11758, 11762),
(13, 16),
(12349, 12359)]
df = pandas.read_fwf(dat_file,
compression='gzip',
colspecs=colspecs,
names=names)
df['cmmarrhx'] = df.mardat01
df['cmbirth'] = df.bdaycenm
df['cmintvw'] = df.a_doi
df['finalwgt'] = df.post_wt
df.timesmar.replace([98, 99], np.nan, inplace=True)
df['evrmarry'] = (df.timesmar > 0).astype(int)
CleanData(df)
return df
def ReadFemResp1982():
"""Reads respondent data from NSFG Cycle 4.
returns: DataFrame
"""
dat_file = '1982NSFGData.dat.gz'
names = ['cmmarrhx', 'MARNO', 'cmintvw', 'cmbirth', 'finalwgt']
#actual = ['MARIMO', 'MARNO', 'TL', 'TL', 'W5']
colspecs = [(1028, 1031),
(1258, 1259),
(841, 844),
(12, 15),
(976, 982)]
df = pandas.read_fwf(dat_file, compression='gzip', colspecs=colspecs, names=names)
df.MARNO.replace([98, 99], np.nan, inplace=True)
df['evrmarry'] = (df.MARNO > 0).astype(int)
CleanData(df)
return df[:7969]
def ReadFemResp1988():
"""Reads respondent data from NSFG Cycle 4.
returns: DataFrame
"""
dat_file = '1988FemRespData.dat.gz'
names = ['F_13'] #['CMOIMO', 'F_13', 'F19M1MO', 'A_3']
# colspecs = [(799, 803)],
colspecs = [(20, 22)]#,
# (1538, 1542),
# (26, 30),
# (2568, 2574)]
df = pandas.read_fwf(dat_file, compression='gzip', colspecs=colspecs, names=names)
# df['cmmarrhx'] = df.F19M1MO
# df['cmbirth'] = df.A_3
# df['cmintvw'] = df.CMOIMO
# df['finalwgt'] = df.W5
df.F_13.replace([98, 99], np.nan, inplace=True)
df['evrmarry'] = (df.F_13 > 0).astype(int)
# CleanData(df)
return df
def PlotResampledByDecade(resps, iters=11, predict_flag=False, omit=None):
"""Plots survival curves for resampled data.
resps: list of DataFrames
iters: number of resamples to plot
predict_flag: whether to also plot predictions
"""
for i in range(iters):
samples = [thinkstats2.ResampleRowsWeighted(resp)
for resp in resps]
sample = pandas.concat(samples, ignore_index=True)
groups = sample.groupby('decade')
if omit:
groups = [(name, group) for name, group in groups
if name not in omit]
# TODO: refactor this to collect resampled estimates and
# plot shaded areas
if i == 0:
AddLabelsByDecade(groups, alpha=0.7)
if predict_flag:
PlotPredictionsByDecade(groups, alpha=0.1)
EstimateSurvivalByDecade(groups, alpha=0.1)
else:
EstimateSurvivalByDecade(groups, alpha=0.2)
def main():
thinkstats2.RandomSeed(17)
preg = nsfg.ReadFemPreg()
sf1 = PlotPregnancyData(preg)
# make the plots based on Cycle 6
resp6 = ReadFemResp2002()
sf2 = PlotMarriageData(resp6)
ResampleSurvival(resp6)
PlotRemainingLifetime(sf1, sf2)
# read Cycles 5 and 7
resp5 = ReadFemResp1995()
resp7 = ReadFemResp2010()
# plot resampled survival functions by decade
resps = [resp5, resp6, resp7]
PlotResampledByDecade(resps)
thinkplot.Save(root='survival4',
xlabel='age (years)',
ylabel='prob unmarried',
xlim=[13, 45],
ylim=[0, 1],
formats=FORMATS)
# plot resampled survival functions by decade, with predictions
PlotResampledByDecade(resps, predict_flag=True, omit=[5])
thinkplot.Save(root='survival5',
xlabel='age (years)',
ylabel='prob unmarried',
xlim=[13, 45],
ylim=[0, 1],
formats=FORMATS)
if __name__ == '__main__':
main()
| gpl-3.0 |
momm3/WelcomeBot | welcomebot/Lib/site-packages/urllib3/util/ssl_.py | 42 | 12073 | from __future__ import absolute_import
import errno
import warnings
import hmac
from binascii import hexlify, unhexlify
from hashlib import md5, sha1, sha256
from ..exceptions import SSLError, InsecurePlatformWarning, SNIMissingWarning
SSLContext = None
HAS_SNI = False
IS_PYOPENSSL = False
IS_SECURETRANSPORT = False
# Maps the length of a digest to a possible hash function producing this digest
HASHFUNC_MAP = {
32: md5,
40: sha1,
64: sha256,
}
def _const_compare_digest_backport(a, b):
"""
Compare two digests of equal length in constant time.
The digests must be of type str/bytes.
Returns True if the digests match, and False otherwise.
"""
result = abs(len(a) - len(b))
for l, r in zip(bytearray(a), bytearray(b)):
result |= l ^ r
return result == 0
_const_compare_digest = getattr(hmac, 'compare_digest',
_const_compare_digest_backport)
try: # Test for SSL features
import ssl
from ssl import wrap_socket, CERT_NONE, PROTOCOL_SSLv23
from ssl import HAS_SNI # Has SNI?
except ImportError:
pass
try:
from ssl import OP_NO_SSLv2, OP_NO_SSLv3, OP_NO_COMPRESSION
except ImportError:
OP_NO_SSLv2, OP_NO_SSLv3 = 0x1000000, 0x2000000
OP_NO_COMPRESSION = 0x20000
# A secure default.
# Sources for more information on TLS ciphers:
#
# - https://wiki.mozilla.org/Security/Server_Side_TLS
# - https://www.ssllabs.com/projects/best-practices/index.html
# - https://hynek.me/articles/hardening-your-web-servers-ssl-ciphers/
#
# The general intent is:
# - Prefer cipher suites that offer perfect forward secrecy (DHE/ECDHE),
# - prefer ECDHE over DHE for better performance,
# - prefer any AES-GCM and ChaCha20 over any AES-CBC for better performance and
# security,
# - prefer AES-GCM over ChaCha20 because hardware-accelerated AES is common,
# - disable NULL authentication, MD5 MACs and DSS for security reasons.
DEFAULT_CIPHERS = ':'.join([
'ECDH+AESGCM',
'ECDH+CHACHA20',
'DH+AESGCM',
'DH+CHACHA20',
'ECDH+AES256',
'DH+AES256',
'ECDH+AES128',
'DH+AES',
'RSA+AESGCM',
'RSA+AES',
'!aNULL',
'!eNULL',
'!MD5',
])
try:
from ssl import SSLContext # Modern SSL?
except ImportError:
import sys
class SSLContext(object): # Platform-specific: Python 2 & 3.1
supports_set_ciphers = ((2, 7) <= sys.version_info < (3,) or
(3, 2) <= sys.version_info)
def __init__(self, protocol_version):
self.protocol = protocol_version
# Use default values from a real SSLContext
self.check_hostname = False
self.verify_mode = ssl.CERT_NONE
self.ca_certs = None
self.options = 0
self.certfile = None
self.keyfile = None
self.ciphers = None
def load_cert_chain(self, certfile, keyfile):
self.certfile = certfile
self.keyfile = keyfile
def load_verify_locations(self, cafile=None, capath=None):
self.ca_certs = cafile
if capath is not None:
raise SSLError("CA directories not supported in older Pythons")
def set_ciphers(self, cipher_suite):
if not self.supports_set_ciphers:
raise TypeError(
'Your version of Python does not support setting '
'a custom cipher suite. Please upgrade to Python '
'2.7, 3.2, or later if you need this functionality.'
)
self.ciphers = cipher_suite
def wrap_socket(self, socket, server_hostname=None, server_side=False):
warnings.warn(
'A true SSLContext object is not available. This prevents '
'urllib3 from configuring SSL appropriately and may cause '
'certain SSL connections to fail. You can upgrade to a newer '
'version of Python to solve this. For more information, see '
'https://urllib3.readthedocs.io/en/latest/advanced-usage.html'
'#ssl-warnings',
InsecurePlatformWarning
)
kwargs = {
'keyfile': self.keyfile,
'certfile': self.certfile,
'ca_certs': self.ca_certs,
'cert_reqs': self.verify_mode,
'ssl_version': self.protocol,
'server_side': server_side,
}
if self.supports_set_ciphers: # Platform-specific: Python 2.7+
return wrap_socket(socket, ciphers=self.ciphers, **kwargs)
else: # Platform-specific: Python 2.6
return wrap_socket(socket, **kwargs)
def assert_fingerprint(cert, fingerprint):
"""
Checks if given fingerprint matches the supplied certificate.
:param cert:
Certificate as bytes object.
:param fingerprint:
Fingerprint as string of hexdigits, can be interspersed by colons.
"""
fingerprint = fingerprint.replace(':', '').lower()
digest_length = len(fingerprint)
hashfunc = HASHFUNC_MAP.get(digest_length)
if not hashfunc:
raise SSLError(
'Fingerprint of invalid length: {0}'.format(fingerprint))
# We need encode() here for py32; works on py2 and p33.
fingerprint_bytes = unhexlify(fingerprint.encode())
cert_digest = hashfunc(cert).digest()
if not _const_compare_digest(cert_digest, fingerprint_bytes):
raise SSLError('Fingerprints did not match. Expected "{0}", got "{1}".'
.format(fingerprint, hexlify(cert_digest)))
def resolve_cert_reqs(candidate):
"""
Resolves the argument to a numeric constant, which can be passed to
the wrap_socket function/method from the ssl module.
Defaults to :data:`ssl.CERT_NONE`.
If given a string it is assumed to be the name of the constant in the
:mod:`ssl` module or its abbrevation.
(So you can specify `REQUIRED` instead of `CERT_REQUIRED`.
If it's neither `None` nor a string we assume it is already the numeric
constant which can directly be passed to wrap_socket.
"""
if candidate is None:
return CERT_NONE
if isinstance(candidate, str):
res = getattr(ssl, candidate, None)
if res is None:
res = getattr(ssl, 'CERT_' + candidate)
return res
return candidate
def resolve_ssl_version(candidate):
"""
like resolve_cert_reqs
"""
if candidate is None:
return PROTOCOL_SSLv23
if isinstance(candidate, str):
res = getattr(ssl, candidate, None)
if res is None:
res = getattr(ssl, 'PROTOCOL_' + candidate)
return res
return candidate
def create_urllib3_context(ssl_version=None, cert_reqs=None,
options=None, ciphers=None):
"""All arguments have the same meaning as ``ssl_wrap_socket``.
By default, this function does a lot of the same work that
``ssl.create_default_context`` does on Python 3.4+. It:
- Disables SSLv2, SSLv3, and compression
- Sets a restricted set of server ciphers
If you wish to enable SSLv3, you can do::
from urllib3.util import ssl_
context = ssl_.create_urllib3_context()
context.options &= ~ssl_.OP_NO_SSLv3
You can do the same to enable compression (substituting ``COMPRESSION``
for ``SSLv3`` in the last line above).
:param ssl_version:
The desired protocol version to use. This will default to
PROTOCOL_SSLv23 which will negotiate the highest protocol that both
the server and your installation of OpenSSL support.
:param cert_reqs:
Whether to require the certificate verification. This defaults to
``ssl.CERT_REQUIRED``.
:param options:
Specific OpenSSL options. These default to ``ssl.OP_NO_SSLv2``,
``ssl.OP_NO_SSLv3``, ``ssl.OP_NO_COMPRESSION``.
:param ciphers:
Which cipher suites to allow the server to select.
:returns:
Constructed SSLContext object with specified options
:rtype: SSLContext
"""
context = SSLContext(ssl_version or ssl.PROTOCOL_SSLv23)
# Setting the default here, as we may have no ssl module on import
cert_reqs = ssl.CERT_REQUIRED if cert_reqs is None else cert_reqs
if options is None:
options = 0
# SSLv2 is easily broken and is considered harmful and dangerous
options |= OP_NO_SSLv2
# SSLv3 has several problems and is now dangerous
options |= OP_NO_SSLv3
# Disable compression to prevent CRIME attacks for OpenSSL 1.0+
# (issue #309)
options |= OP_NO_COMPRESSION
context.options |= options
if getattr(context, 'supports_set_ciphers', True): # Platform-specific: Python 2.6
context.set_ciphers(ciphers or DEFAULT_CIPHERS)
context.verify_mode = cert_reqs
if getattr(context, 'check_hostname', None) is not None: # Platform-specific: Python 3.2
# We do our own verification, including fingerprints and alternative
# hostnames. So disable it here
context.check_hostname = False
return context
def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None,
ca_certs=None, server_hostname=None,
ssl_version=None, ciphers=None, ssl_context=None,
ca_cert_dir=None):
"""
All arguments except for server_hostname, ssl_context, and ca_cert_dir have
the same meaning as they do when using :func:`ssl.wrap_socket`.
:param server_hostname:
When SNI is supported, the expected hostname of the certificate
:param ssl_context:
A pre-made :class:`SSLContext` object. If none is provided, one will
be created using :func:`create_urllib3_context`.
:param ciphers:
A string of ciphers we wish the client to support. This is not
supported on Python 2.6 as the ssl module does not support it.
:param ca_cert_dir:
A directory containing CA certificates in multiple separate files, as
supported by OpenSSL's -CApath flag or the capath argument to
SSLContext.load_verify_locations().
"""
context = ssl_context
if context is None:
# Note: This branch of code and all the variables in it are no longer
# used by urllib3 itself. We should consider deprecating and removing
# this code.
context = create_urllib3_context(ssl_version, cert_reqs,
ciphers=ciphers)
if ca_certs or ca_cert_dir:
try:
context.load_verify_locations(ca_certs, ca_cert_dir)
except IOError as e: # Platform-specific: Python 2.6, 2.7, 3.2
raise SSLError(e)
# Py33 raises FileNotFoundError which subclasses OSError
# These are not equivalent unless we check the errno attribute
except OSError as e: # Platform-specific: Python 3.3 and beyond
if e.errno == errno.ENOENT:
raise SSLError(e)
raise
elif getattr(context, 'load_default_certs', None) is not None:
# try to load OS default certs; works well on Windows (require Python3.4+)
context.load_default_certs()
if certfile:
context.load_cert_chain(certfile, keyfile)
if HAS_SNI: # Platform-specific: OpenSSL with enabled SNI
return context.wrap_socket(sock, server_hostname=server_hostname)
warnings.warn(
'An HTTPS request has been made, but the SNI (Subject Name '
'Indication) extension to TLS is not available on this platform. '
'This may cause the server to present an incorrect TLS '
'certificate, which can cause validation failures. You can upgrade to '
'a newer version of Python to solve this. For more information, see '
'https://urllib3.readthedocs.io/en/latest/advanced-usage.html'
'#ssl-warnings',
SNIMissingWarning
)
return context.wrap_socket(sock)
| mit |
nipunbatra/bayespy | bayespy/inference/vmp/nodes/categorical.py | 2 | 6193 | ######################################################################
# Copyright (C) 2011,2012,2014 Jaakko Luttinen
#
# This file is licensed under Version 3.0 of the GNU General Public
# License. See LICENSE for a text of the license.
######################################################################
######################################################################
# This file is part of BayesPy.
#
# BayesPy is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# BayesPy is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with BayesPy. If not, see <http://www.gnu.org/licenses/>.
######################################################################
"""
Module for the categorical distribution node.
"""
import numpy as np
#from .expfamily import ExponentialFamily
#from .expfamily import ExponentialFamilyDistribution
from .expfamily import useconstructor
from .multinomial import (MultinomialMoments,
MultinomialDistribution,
Multinomial)
#from .dirichlet import Dirichlet, DirichletMoments
from .node import ensureparents
from bayespy.utils import random
from bayespy.utils import utils
class CategoricalMoments(MultinomialMoments):
"""
Class for the moments of categorical variables.
"""
def __init__(self, categories):
"""
Create moments object for categorical variables
"""
self.D = categories
super().__init__()
def compute_fixed_moments(self, x):
"""
Compute the moments for a fixed value
"""
# Check that x is valid
x = np.asanyarray(x)
if not utils.isinteger(x):
raise ValueError("Values must be integers")
if np.any(x < 0) or np.any(x >= self.D):
raise ValueError("Invalid category index")
u0 = np.zeros((np.size(x), self.D))
u0[[np.arange(np.size(x)), np.ravel(x)]] = 1
u0 = np.reshape(u0, np.shape(x) + (self.D,))
return [u0]
def compute_dims_from_values(self, x):
"""
Return the shape of the moments for a fixed value.
The observations are scalar.
"""
return ( (self.D,), )
class CategoricalDistribution(MultinomialDistribution):
"""
Class for the VMP formulas of categorical variables.
"""
def __init__(self, categories):
"""
Create VMP formula node for a categorical variable
`categories` is the total number of categories.
"""
if not isinstance(categories, int):
raise ValueError("Number of categories must be integer")
if categories < 0:
raise ValueError("Number of categoriess must be non-negative")
self.D = categories
super().__init__(1)
def compute_message_to_parent(self, parent, index, u, u_p):
"""
Compute the message to a parent node.
"""
return super().compute_message_to_parent(parent, index, u, u_p)
def compute_phi_from_parents(self, u_p, mask=True):
"""
Compute the natural parameter vector given parent moments.
"""
return super().compute_phi_from_parents(u_p, mask=mask)
def compute_moments_and_cgf(self, phi, mask=True):
"""
Compute the moments and :math:`g(\phi)`.
"""
return super().compute_moments_and_cgf(phi, mask=mask)
def compute_cgf_from_parents(self, u_p):
"""
Compute :math:`\mathrm{E}_{q(p)}[g(p)]`
"""
return super().compute_cgf_from_parents(u_p)
def compute_fixed_moments_and_f(self, x, mask=True):
"""
Compute the moments and :math:`f(x)` for a fixed value.
"""
# Check the validity of x
x = np.asanyarray(x)
if not utils.isinteger(x):
raise ValueError("Values must be integers")
if np.any(x < 0) or np.any(x >= self.D):
raise ValueError("Invalid category index")
# Form a binary matrix with only one non-zero (1) in the last axis
u0 = np.zeros((np.size(x), self.D))
u0[[np.arange(np.size(x)), np.ravel(x)]] = 1
u0 = np.reshape(u0, np.shape(x) + (self.D,))
u = [u0]
# f(x) is zero
f = 0
return (u, f)
class Categorical(Multinomial):
"""
Node for categorical random variables.
"""
@classmethod
@ensureparents
def _constructor(cls, p, **kwargs):
"""
Constructs distribution and moments objects.
This method is called if useconstructor decorator is used for __init__.
Becase the distribution and moments object depend on the number of
categories, that is, they depend on the parent node, this method can be
used to construct those objects.
"""
# Get the number of categories
D = p.dims[0][0]
parents = [p]
moments = CategoricalMoments(D)
distribution = CategoricalDistribution(D)
return (parents,
kwargs,
( (D,), ),
cls._total_plates(kwargs.get('plates'),
distribution.plates_from_parent(0, p.plates)),
distribution,
moments,
cls._parent_moments)
def random(self):
"""
Draw a random sample from the distribution.
"""
logp = self.phi[0]
logp -= np.amax(logp, axis=-1, keepdims=True)
p = np.exp(logp)
return random.categorical(p, size=self.plates)
def show(self):
"""
Print the distribution using standard parameterization.
"""
p = self.u[0]
print("%s ~ Categorical(p)" % self.name)
print(" p = ")
print(p)
| gpl-3.0 |
jhawkesworth/ansible | test/units/modules/network/eos/test_eos_system.py | 59 | 4643 | # (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.compat.mock import patch
from ansible.modules.network.eos import eos_system
from units.modules.utils import set_module_args
from .eos_module import TestEosModule, load_fixture
class TestEosSystemModule(TestEosModule):
module = eos_system
def setUp(self):
super(TestEosSystemModule, self).setUp()
self.mock_get_config = patch('ansible.modules.network.eos.eos_system.get_config')
self.get_config = self.mock_get_config.start()
self.mock_load_config = patch('ansible.modules.network.eos.eos_system.load_config')
self.load_config = self.mock_load_config.start()
def tearDown(self):
super(TestEosSystemModule, self).tearDown()
self.mock_get_config.stop()
self.mock_load_config.stop()
def load_fixtures(self, commands=None, transport='cli'):
self.get_config.return_value = load_fixture('eos_system_config.cfg')
self.load_config.return_value = dict(diff=None, session='session')
def test_eos_system_hostname_changed(self):
set_module_args(dict(hostname='foo'))
commands = ['hostname foo']
self.execute_module(changed=True, commands=commands)
def test_eos_system_domain_name(self):
set_module_args(dict(domain_name='test.com'))
commands = ['ip domain-name test.com']
self.execute_module(changed=True, commands=commands)
def test_eos_system_domain_list(self):
set_module_args(dict(domain_list=['ansible.com', 'redhat.com']))
commands = ['no ip domain-list ops.ansible.com',
'ip domain-list redhat.com']
self.execute_module(changed=True, commands=commands)
def test_eos_system_lookup_source(self):
set_module_args(dict(lookup_source=['Ethernet1']))
commands = ['no ip domain lookup source-interface Management1',
'ip domain lookup source-interface Ethernet1']
self.execute_module(changed=True, commands=commands)
def test_eos_system_lookup_source_complex(self):
lookup_source = [{'interface': 'Management1', 'vrf': 'mgmt'},
{'interface': 'Ethernet1'}]
set_module_args(dict(lookup_source=lookup_source))
commands = ['no ip domain lookup source-interface Management1',
'ip domain lookup vrf mgmt source-interface Management1',
'ip domain lookup source-interface Ethernet1']
self.execute_module(changed=True, commands=commands)
# def test_eos_system_name_servers(self):
# name_servers = ['8.8.8.8', '8.8.4.4']
# set_module_args(dict(name_servers=name_servers))
# commands = ['ip name-server 8.8.4.4',
# 'no ip name-server vrf mgmt 8.8.4.4']
# self.execute_module(changed=True, commands=commands)
# def rest_eos_system_name_servers_complex(self):
# name_servers = dict(server='8.8.8.8', vrf='test')
# set_module_args(dict(name_servers=name_servers))
# commands = ['ip name-server vrf test 8.8.8.8',
# 'no ip name-server vrf default 8.8.8.8',
# 'no ip name-server vrf mgmt 8.8.4.4']
# self.execute_module(changed=True, commands=commands)
def test_eos_system_state_absent(self):
set_module_args(dict(state='absent'))
commands = ['no ip domain-name', 'no hostname']
self.execute_module(changed=True, commands=commands)
def test_eos_system_no_change(self):
set_module_args(dict(hostname='switch01', domain_name='eng.ansible.com'))
commands = []
self.execute_module(commands=commands)
def test_eos_system_missing_vrf(self):
name_servers = dict(server='8.8.8.8', vrf='missing')
set_module_args(dict(name_servers=name_servers))
result = self.execute_module(failed=True)
| gpl-3.0 |
vdrhtc/InLaTeXbot | src/InlineQueryResponseDispatcher.py | 1 | 5981 | from multiprocessing import Process, Lock, Event
from threading import Thread
import re
from telegram import InlineQueryResultArticle, InputTextMessageContent, \
InlineQueryResultCachedPhoto, TelegramError, ParseMode
from src.LoggingServer import LoggingServer
class InlineQueryResponseDispatcher():
logger = LoggingServer.getInstance()
def __init__(self, bot, latexConverter, resourceManager, userOptionsManager, devnullChatId):
self._bot = bot
self._latexConverter = latexConverter
self._resourceManager = resourceManager
self._userOptionsManager = userOptionsManager
self._devnullChatId = devnullChatId
self._nextQueryArrivedEvents = {}
def dispatchInlineQueryResponse(self, inline_query):
self.logger.debug("Received inline query: " + inline_query.query + \
", id: " + str(inline_query.id) + ", from user: " + str(inline_query.from_user.id))
try:
self._nextQueryArrivedEvents[inline_query.from_user.id].set()
self._nextQueryArrivedEvents[inline_query.from_user.id] = Event()
except KeyError:
self._nextQueryArrivedEvents[inline_query.from_user.id] = Event()
responder = Process(target=self.respondToInlineQuery, args=[inline_query,
self._nextQueryArrivedEvents[
inline_query.from_user.id]])
responder.start()
Thread(target=self.joinProcess, args=[responder]).start()
def joinProcess(self, process):
process.join()
def respondToInlineQuery(self, inline_query, nextQueryArrivedEvent):
senderId = inline_query.from_user.id
queryId = inline_query.id
expression = inline_query.query
expression = self.processMultilineComments(senderId, expression)
caption = self.generateCaption(senderId, expression)
result = None
try:
expressionPngFileStream = self._latexConverter.convertExpression(expression, senderId,
str(queryId) + "_" + str(senderId))
if not nextQueryArrivedEvent.is_set():
result = self.uploadImage(expressionPngFileStream, expression, caption,
self._userOptionsManager.getCodeInCaptionOption(senderId))
except ValueError as err:
result = self.getWrongSyntaxResult(expression, err.args[0])
except TelegramError as err:
errorMessage = self._resourceManager.getString("telegram_error") + str(err)
self.logger.warn(errorMessage)
result = InlineQueryResultArticle(0, errorMessage, InputTextMessageContent(expression))
finally:
if not self.skipForNewerQuery(nextQueryArrivedEvent, senderId, expression):
self._bot.answerInlineQuery(queryId, [result], cache_time=0)
self.logger.debug("Answered to inline query from %d, expression: %s", senderId, expression)
def skipForNewerQuery(self, nextQueryArrivedEvent, senderId, expression):
if nextQueryArrivedEvent.is_set():
self.logger.debug("Skipped answering query from %d, expression: %s; newer query arrived", senderId,
expression)
return True
return False
def getWrongSyntaxResult(self, query, latexError):
if len(query) >= 250:
self.logger.debug("Query may be too long")
errorMessage = self._resourceManager.getString("inline_query_too_long")
else:
self.logger.debug("Wrong syntax in the query")
errorMessage = self._resourceManager.getString("latex_syntax_error")
return InlineQueryResultArticle(0, errorMessage, InputTextMessageContent(query), description=latexError)
def uploadImage(self, image, expression, caption, code_in_caption):
attempts = 0
errorMessage = None
while attempts < 3:
try:
latex_picture_id = self._bot.sendPhoto(self._devnullChatId, image).photo[0].file_id
self.logger.debug("Image successfully uploaded for %s", expression)
return InlineQueryResultCachedPhoto(0, photo_file_id=latex_picture_id,
caption=caption,
parse_mode=ParseMode.MARKDOWN if not code_in_caption else None)
except TelegramError as err:
errorMessage = self._resourceManager.getString("telegram_error") + str(err)
self.logger.warn(errorMessage)
attempts += 1
return InlineQueryResultArticle(0, errorMessage, InputTextMessageContent(expression))
def processMultilineComments(self, senderId, expression):
if self._userOptionsManager.getCodeInCaptionOption(senderId) is True:
return expression
else:
regex = r"^%\*"
expression = re.sub(regex, r"\\iffalse inlatexbot", expression, flags=re.MULTILINE)
regex = r"\*%"
return re.sub(regex, r"inlatexbot \\fi", expression, flags=re.MULTILINE)
def generateCaption(self, senderId, expression):
if self._userOptionsManager.getCodeInCaptionOption(senderId) is True:
return expression[:200] # no comments, return everything (max 200 symbols)
else:
regex = r"^%( *\S+.*?)$|\\iffalse inlatexbot\n(.+?)inlatexbot \\fi" # searching for comments, which are then only included
groups = re.findall(regex, expression, re.MULTILINE | re.DOTALL)
if len(groups) == 0:
return ""
else:
caption = ""
for group in groups:
caption += "".join(group) + "\n"
return caption[:-1]
| gpl-3.0 |
UB-info/estructura-datos | RafaelArqueroGimeno_S4/benchmark.py | 1 | 1333 | import ColaInterface
import PColaInterface
import random
from time import time
def benchmark(module):
'''returns a tuple with the cost of add and search in ms in given module'''
parser = module.initparser('LastFM_big.dat')
users = module.emptyType()
start = time()
for i in xrange(10):
users = module.add(users, parser)
middle = time()
for i in xrange(100):
lowrandom = random.uniform(0.0, 1.0)
highrandom = random.uniform(lowrandom, 1.0)
#print lowrandom, highrandom#debug
module.search(users, lowrandom, highrandom)
end = time()
adding = 1000.0 * (middle - start) / 10.0
searching = 1000.0 * (end - middle) / 100.0
return adding, searching#adding cost and searching cost in ms
if __name__ == '__main__':
pattern = 'In {0}, the average time spent in adding is {1:.4}ms and {2:.4}ms in searching.'
qcosts = benchmark(ColaInterface)
print pattern.format('Queue', *qcosts)
pqcosts = benchmark(PColaInterface)
print pattern.format('PriorityQueue', *pqcosts)
addfactor = qcosts[0] / pqcosts[0]
searchfactor = qcosts[1] / pqcosts[1]
print 'Conclusion: add in PriorityQueue is {0:.4} times slower but search is {1:.4} times faster.'.format(1/addfactor, searchfactor)
| mit |
entomb/CouchPotatoServer | couchpotato/core/providers/torrent/torrentbytes/__init__.py | 5 | 1737 | from .main import TorrentBytes
def start():
return TorrentBytes()
config = [{
'name': 'torrentbytes',
'groups': [
{
'tab': 'searcher',
'list': 'torrent_providers',
'name': 'TorrentBytes',
'description': 'See <a href="http://torrentbytes.net">TorrentBytes</a>',
'wizard': True,
'options': [
{
'name': 'enabled',
'type': 'enabler',
'default': False,
},
{
'name': 'username',
'default': '',
},
{
'name': 'password',
'default': '',
'type': 'password',
},
{
'name': 'seed_ratio',
'label': 'Seed ratio',
'type': 'float',
'default': 1,
'description': 'Will not be (re)moved until this seed ratio is met.',
},
{
'name': 'seed_time',
'label': 'Seed time',
'type': 'int',
'default': 40,
'description': 'Will not be (re)moved until this seed time (in hours) is met.',
},
{
'name': 'extra_score',
'advanced': True,
'label': 'Extra Score',
'type': 'int',
'default': 20,
'description': 'Starting score for each release found via this provider.',
}
],
},
],
}]
| gpl-3.0 |
Sapphirine/Yelp_Recommendation | Review_functions.py | 1 | 2119 | __author__ = 'Simons'
from main import*
from nltk.corpus import stopwords
from collections import Counter
from nltk import*
import types
import tfidf
def preProcessReviews(list):
porter = nltk.PorterStemmer()
stop = stopwords.words("english")
temp = []
i = 0;
G = []
for row in list:
token = nltk.word_tokenize(row[8])
'''print(token)'''
temp = ' '.join([a for a in token if a not in stop])
'''print(temp)'''
temp = nltk.word_tokenize(temp)
stem = [porter.stem(t) for t in temp]
'''print(stem)'''
row[8] = stem
row.append(stem)
for row in list:
print(row[9])
return list
'''words = ' '.join([a for a in reviews[i].split() if a not in stop])'''
def findStoreId(store_name):
store_id = 'null'
for row in store:
if(store_name == row[6]):
store_id = row[1]
if (store_id == 'null'):
print('Restaurant not found!')
else:
return store_id
def findStoreName(store_id):
store_name = 'null'
for row in store:
if(store_id == row[1]):
store_name = row[6]
if (store_name == 'null'):
print('Restaurant not found!')
else:
return store_name
def findUserReviews(user_id):
user_reviews=[]
for row in reviews:
if user_id == row[1]:
user_reviews.append(row[8])
return user_reviews
def findStoreReviews(store_id):
store_revies=[]
for row in reviews:
if store_id == row[2]:
store_reviews.append(row[8])
return store_reviews
def normalize(list):
length = len(list)
t = 0
for i in range(length):
if t < list[i]:
t = list[i]
for i in range(length):
list[i]=list[i]/t
return list
def compare (list1,list2,list3):
g = []
for rows in list1:
for rows in list2:
g.extend(tfidf(list1.rows[8],list2.rows[8],list3[rownumber]))
g = sorted(g,key=getKey)
g = normalize(g)
return g
| mit |
sebrandon1/nova | nova/tests/unit/api/openstack/compute/test_auth.py | 15 | 3211 | # Copyright 2013 IBM Corp.
# Copyright 2010 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testscenarios
from nova.api import openstack as openstack_api
from nova.api.openstack import auth
from nova.api.openstack import compute
from nova.api.openstack import urlmap
from nova import test
from nova.tests.unit.api.openstack import fakes
class TestNoAuthMiddleware(testscenarios.WithScenarios, test.NoDBTestCase):
scenarios = [
('project_id', {
'expected_url': 'http://localhost/v2.1/user1_project',
'auth_middleware': auth.NoAuthMiddleware}),
('no_project_id', {
'expected_url': 'http://localhost/v2.1',
'auth_middleware': auth.NoAuthMiddlewareV2_18}),
]
def setUp(self):
super(TestNoAuthMiddleware, self).setUp()
fakes.stub_out_networking(self)
api_v21 = openstack_api.FaultWrapper(
self.auth_middleware(
compute.APIRouterV21()
)
)
self.wsgi_app = urlmap.URLMap()
self.wsgi_app['/v2.1'] = api_v21
self.req_url = '/v2.1'
def test_authorize_user(self):
req = fakes.HTTPRequest.blank(self.req_url, base_url='')
req.headers['X-Auth-User'] = 'user1'
req.headers['X-Auth-Key'] = 'user1_key'
req.headers['X-Auth-Project-Id'] = 'user1_project'
result = req.get_response(self.wsgi_app)
self.assertEqual(result.status, '204 No Content')
self.assertEqual(result.headers['X-Server-Management-Url'],
self.expected_url)
def test_authorize_user_trailing_slash(self):
# make sure it works with trailing slash on the request
self.req_url = self.req_url + '/'
req = fakes.HTTPRequest.blank(self.req_url, base_url='')
req.headers['X-Auth-User'] = 'user1'
req.headers['X-Auth-Key'] = 'user1_key'
req.headers['X-Auth-Project-Id'] = 'user1_project'
result = req.get_response(self.wsgi_app)
self.assertEqual(result.status, '204 No Content')
self.assertEqual(result.headers['X-Server-Management-Url'],
self.expected_url)
def test_auth_token_no_empty_headers(self):
req = fakes.HTTPRequest.blank(self.req_url, base_url='')
req.headers['X-Auth-User'] = 'user1'
req.headers['X-Auth-Key'] = 'user1_key'
req.headers['X-Auth-Project-Id'] = 'user1_project'
result = req.get_response(self.wsgi_app)
self.assertEqual(result.status, '204 No Content')
self.assertNotIn('X-CDN-Management-Url', result.headers)
self.assertNotIn('X-Storage-Url', result.headers)
| apache-2.0 |
bowang/tensorflow | tensorflow/python/kernel_tests/distributions/identity_bijector_test.py | 72 | 1798 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Identity Tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.ops.distributions import bijector_test_util
from tensorflow.python.ops.distributions import identity_bijector
from tensorflow.python.platform import test
class IdentityBijectorTest(test.TestCase):
"""Tests correctness of the Y = g(X) = X transformation."""
def testBijector(self):
with self.test_session():
bijector = identity_bijector.Identity()
self.assertEqual("identity", bijector.name)
x = [[[0.], [1.]]]
self.assertAllEqual(x, bijector.forward(x).eval())
self.assertAllEqual(x, bijector.inverse(x).eval())
self.assertAllEqual(0., bijector.inverse_log_det_jacobian(x).eval())
self.assertAllEqual(0., bijector.forward_log_det_jacobian(x).eval())
def testScalarCongruency(self):
with self.test_session():
bijector = identity_bijector.Identity()
bijector_test_util.assert_scalar_congruency(
bijector, lower_x=-2., upper_x=2.)
if __name__ == "__main__":
test.main()
| apache-2.0 |
betoesquivel/CIE | flask/lib/python2.7/site-packages/tornado/test/httpclient_test.py | 22 | 20058 | #!/usr/bin/env python
from __future__ import absolute_import, division, print_function, with_statement
import base64
import binascii
from contextlib import closing
import functools
import sys
import threading
from tornado.escape import utf8
from tornado.httpclient import HTTPRequest, HTTPResponse, _RequestProxy, HTTPError, HTTPClient
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
from tornado.iostream import IOStream
from tornado.log import gen_log
from tornado import netutil
from tornado.stack_context import ExceptionStackContext, NullContext
from tornado.testing import AsyncHTTPTestCase, bind_unused_port, gen_test, ExpectLog
from tornado.test.util import unittest, skipOnTravis
from tornado.util import u, bytes_type
from tornado.web import Application, RequestHandler, url
try:
from io import BytesIO # python 3
except ImportError:
from cStringIO import StringIO as BytesIO
class HelloWorldHandler(RequestHandler):
def get(self):
name = self.get_argument("name", "world")
self.set_header("Content-Type", "text/plain")
self.finish("Hello %s!" % name)
class PostHandler(RequestHandler):
def post(self):
self.finish("Post arg1: %s, arg2: %s" % (
self.get_argument("arg1"), self.get_argument("arg2")))
class ChunkHandler(RequestHandler):
def get(self):
self.write("asdf")
self.flush()
self.write("qwer")
class AuthHandler(RequestHandler):
def get(self):
self.finish(self.request.headers["Authorization"])
class CountdownHandler(RequestHandler):
def get(self, count):
count = int(count)
if count > 0:
self.redirect(self.reverse_url("countdown", count - 1))
else:
self.write("Zero")
class EchoPostHandler(RequestHandler):
def post(self):
self.write(self.request.body)
class UserAgentHandler(RequestHandler):
def get(self):
self.write(self.request.headers.get('User-Agent', 'User agent not set'))
class ContentLength304Handler(RequestHandler):
def get(self):
self.set_status(304)
self.set_header('Content-Length', 42)
def _clear_headers_for_304(self):
# Tornado strips content-length from 304 responses, but here we
# want to simulate servers that include the headers anyway.
pass
class AllMethodsHandler(RequestHandler):
SUPPORTED_METHODS = RequestHandler.SUPPORTED_METHODS + ('OTHER',)
def method(self):
self.write(self.request.method)
get = post = put = delete = options = patch = other = method
# These tests end up getting run redundantly: once here with the default
# HTTPClient implementation, and then again in each implementation's own
# test suite.
class HTTPClientCommonTestCase(AsyncHTTPTestCase):
def get_app(self):
return Application([
url("/hello", HelloWorldHandler),
url("/post", PostHandler),
url("/chunk", ChunkHandler),
url("/auth", AuthHandler),
url("/countdown/([0-9]+)", CountdownHandler, name="countdown"),
url("/echopost", EchoPostHandler),
url("/user_agent", UserAgentHandler),
url("/304_with_content_length", ContentLength304Handler),
url("/all_methods", AllMethodsHandler),
], gzip=True)
@skipOnTravis
def test_hello_world(self):
response = self.fetch("/hello")
self.assertEqual(response.code, 200)
self.assertEqual(response.headers["Content-Type"], "text/plain")
self.assertEqual(response.body, b"Hello world!")
self.assertEqual(int(response.request_time), 0)
response = self.fetch("/hello?name=Ben")
self.assertEqual(response.body, b"Hello Ben!")
def test_streaming_callback(self):
# streaming_callback is also tested in test_chunked
chunks = []
response = self.fetch("/hello",
streaming_callback=chunks.append)
# with streaming_callback, data goes to the callback and not response.body
self.assertEqual(chunks, [b"Hello world!"])
self.assertFalse(response.body)
def test_post(self):
response = self.fetch("/post", method="POST",
body="arg1=foo&arg2=bar")
self.assertEqual(response.code, 200)
self.assertEqual(response.body, b"Post arg1: foo, arg2: bar")
def test_chunked(self):
response = self.fetch("/chunk")
self.assertEqual(response.body, b"asdfqwer")
chunks = []
response = self.fetch("/chunk",
streaming_callback=chunks.append)
self.assertEqual(chunks, [b"asdf", b"qwer"])
self.assertFalse(response.body)
def test_chunked_close(self):
# test case in which chunks spread read-callback processing
# over several ioloop iterations, but the connection is already closed.
sock, port = bind_unused_port()
with closing(sock):
def write_response(stream, request_data):
stream.write(b"""\
HTTP/1.1 200 OK
Transfer-Encoding: chunked
1
1
1
2
0
""".replace(b"\n", b"\r\n"), callback=stream.close)
def accept_callback(conn, address):
# fake an HTTP server using chunked encoding where the final chunks
# and connection close all happen at once
stream = IOStream(conn, io_loop=self.io_loop)
stream.read_until(b"\r\n\r\n",
functools.partial(write_response, stream))
netutil.add_accept_handler(sock, accept_callback, self.io_loop)
self.http_client.fetch("http://127.0.0.1:%d/" % port, self.stop)
resp = self.wait()
resp.rethrow()
self.assertEqual(resp.body, b"12")
self.io_loop.remove_handler(sock.fileno())
def test_streaming_stack_context(self):
chunks = []
exc_info = []
def error_handler(typ, value, tb):
exc_info.append((typ, value, tb))
return True
def streaming_cb(chunk):
chunks.append(chunk)
if chunk == b'qwer':
1 / 0
with ExceptionStackContext(error_handler):
self.fetch('/chunk', streaming_callback=streaming_cb)
self.assertEqual(chunks, [b'asdf', b'qwer'])
self.assertEqual(1, len(exc_info))
self.assertIs(exc_info[0][0], ZeroDivisionError)
def test_basic_auth(self):
self.assertEqual(self.fetch("/auth", auth_username="Aladdin",
auth_password="open sesame").body,
b"Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==")
def test_basic_auth_explicit_mode(self):
self.assertEqual(self.fetch("/auth", auth_username="Aladdin",
auth_password="open sesame",
auth_mode="basic").body,
b"Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==")
def test_unsupported_auth_mode(self):
# curl and simple clients handle errors a bit differently; the
# important thing is that they don't fall back to basic auth
# on an unknown mode.
with ExpectLog(gen_log, "uncaught exception", required=False):
with self.assertRaises((ValueError, HTTPError)):
response = self.fetch("/auth", auth_username="Aladdin",
auth_password="open sesame",
auth_mode="asdf")
response.rethrow()
def test_follow_redirect(self):
response = self.fetch("/countdown/2", follow_redirects=False)
self.assertEqual(302, response.code)
self.assertTrue(response.headers["Location"].endswith("/countdown/1"))
response = self.fetch("/countdown/2")
self.assertEqual(200, response.code)
self.assertTrue(response.effective_url.endswith("/countdown/0"))
self.assertEqual(b"Zero", response.body)
def test_credentials_in_url(self):
url = self.get_url("/auth").replace("http://", "http://me:secret@")
self.http_client.fetch(url, self.stop)
response = self.wait()
self.assertEqual(b"Basic " + base64.b64encode(b"me:secret"),
response.body)
def test_body_encoding(self):
unicode_body = u("\xe9")
byte_body = binascii.a2b_hex(b"e9")
# unicode string in body gets converted to utf8
response = self.fetch("/echopost", method="POST", body=unicode_body,
headers={"Content-Type": "application/blah"})
self.assertEqual(response.headers["Content-Length"], "2")
self.assertEqual(response.body, utf8(unicode_body))
# byte strings pass through directly
response = self.fetch("/echopost", method="POST",
body=byte_body,
headers={"Content-Type": "application/blah"})
self.assertEqual(response.headers["Content-Length"], "1")
self.assertEqual(response.body, byte_body)
# Mixing unicode in headers and byte string bodies shouldn't
# break anything
response = self.fetch("/echopost", method="POST", body=byte_body,
headers={"Content-Type": "application/blah"},
user_agent=u("foo"))
self.assertEqual(response.headers["Content-Length"], "1")
self.assertEqual(response.body, byte_body)
def test_types(self):
response = self.fetch("/hello")
self.assertEqual(type(response.body), bytes_type)
self.assertEqual(type(response.headers["Content-Type"]), str)
self.assertEqual(type(response.code), int)
self.assertEqual(type(response.effective_url), str)
def test_header_callback(self):
first_line = []
headers = {}
chunks = []
def header_callback(header_line):
if header_line.startswith('HTTP/'):
first_line.append(header_line)
elif header_line != '\r\n':
k, v = header_line.split(':', 1)
headers[k] = v.strip()
def streaming_callback(chunk):
# All header callbacks are run before any streaming callbacks,
# so the header data is available to process the data as it
# comes in.
self.assertEqual(headers['Content-Type'], 'text/html; charset=UTF-8')
chunks.append(chunk)
self.fetch('/chunk', header_callback=header_callback,
streaming_callback=streaming_callback)
self.assertEqual(len(first_line), 1)
self.assertRegexpMatches(first_line[0], 'HTTP/1.[01] 200 OK\r\n')
self.assertEqual(chunks, [b'asdf', b'qwer'])
def test_header_callback_stack_context(self):
exc_info = []
def error_handler(typ, value, tb):
exc_info.append((typ, value, tb))
return True
def header_callback(header_line):
if header_line.startswith('Content-Type:'):
1 / 0
with ExceptionStackContext(error_handler):
self.fetch('/chunk', header_callback=header_callback)
self.assertEqual(len(exc_info), 1)
self.assertIs(exc_info[0][0], ZeroDivisionError)
def test_configure_defaults(self):
defaults = dict(user_agent='TestDefaultUserAgent', allow_ipv6=False)
# Construct a new instance of the configured client class
client = self.http_client.__class__(self.io_loop, force_instance=True,
defaults=defaults)
client.fetch(self.get_url('/user_agent'), callback=self.stop)
response = self.wait()
self.assertEqual(response.body, b'TestDefaultUserAgent')
client.close()
def test_304_with_content_length(self):
# According to the spec 304 responses SHOULD NOT include
# Content-Length or other entity headers, but some servers do it
# anyway.
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.3.5
response = self.fetch('/304_with_content_length')
self.assertEqual(response.code, 304)
self.assertEqual(response.headers['Content-Length'], '42')
def test_final_callback_stack_context(self):
# The final callback should be run outside of the httpclient's
# stack_context. We want to ensure that there is not stack_context
# between the user's callback and the IOLoop, so monkey-patch
# IOLoop.handle_callback_exception and disable the test harness's
# context with a NullContext.
# Note that this does not apply to secondary callbacks (header
# and streaming_callback), as errors there must be seen as errors
# by the http client so it can clean up the connection.
exc_info = []
def handle_callback_exception(callback):
exc_info.append(sys.exc_info())
self.stop()
self.io_loop.handle_callback_exception = handle_callback_exception
with NullContext():
self.http_client.fetch(self.get_url('/hello'),
lambda response: 1 / 0)
self.wait()
self.assertEqual(exc_info[0][0], ZeroDivisionError)
@gen_test
def test_future_interface(self):
response = yield self.http_client.fetch(self.get_url('/hello'))
self.assertEqual(response.body, b'Hello world!')
@gen_test
def test_future_http_error(self):
with self.assertRaises(HTTPError) as context:
yield self.http_client.fetch(self.get_url('/notfound'))
self.assertEqual(context.exception.code, 404)
self.assertEqual(context.exception.response.code, 404)
@gen_test
def test_reuse_request_from_response(self):
# The response.request attribute should be an HTTPRequest, not
# a _RequestProxy.
# This test uses self.http_client.fetch because self.fetch calls
# self.get_url on the input unconditionally.
url = self.get_url('/hello')
response = yield self.http_client.fetch(url)
self.assertEqual(response.request.url, url)
self.assertTrue(isinstance(response.request, HTTPRequest))
response2 = yield self.http_client.fetch(response.request)
self.assertEqual(response2.body, b'Hello world!')
def test_all_methods(self):
for method in ['GET', 'DELETE', 'OPTIONS']:
response = self.fetch('/all_methods', method=method)
self.assertEqual(response.body, utf8(method))
for method in ['POST', 'PUT', 'PATCH']:
response = self.fetch('/all_methods', method=method, body=b'')
self.assertEqual(response.body, utf8(method))
response = self.fetch('/all_methods', method='HEAD')
self.assertEqual(response.body, b'')
response = self.fetch('/all_methods', method='OTHER',
allow_nonstandard_methods=True)
self.assertEqual(response.body, b'OTHER')
@gen_test
def test_body(self):
hello_url = self.get_url('/hello')
with self.assertRaises(AssertionError) as context:
yield self.http_client.fetch(hello_url, body='data')
self.assertTrue('must be empty' in str(context.exception))
with self.assertRaises(AssertionError) as context:
yield self.http_client.fetch(hello_url, method='POST')
self.assertTrue('must not be empty' in str(context.exception))
class RequestProxyTest(unittest.TestCase):
def test_request_set(self):
proxy = _RequestProxy(HTTPRequest('http://example.com/',
user_agent='foo'),
dict())
self.assertEqual(proxy.user_agent, 'foo')
def test_default_set(self):
proxy = _RequestProxy(HTTPRequest('http://example.com/'),
dict(network_interface='foo'))
self.assertEqual(proxy.network_interface, 'foo')
def test_both_set(self):
proxy = _RequestProxy(HTTPRequest('http://example.com/',
proxy_host='foo'),
dict(proxy_host='bar'))
self.assertEqual(proxy.proxy_host, 'foo')
def test_neither_set(self):
proxy = _RequestProxy(HTTPRequest('http://example.com/'),
dict())
self.assertIs(proxy.auth_username, None)
def test_bad_attribute(self):
proxy = _RequestProxy(HTTPRequest('http://example.com/'),
dict())
with self.assertRaises(AttributeError):
proxy.foo
def test_defaults_none(self):
proxy = _RequestProxy(HTTPRequest('http://example.com/'), None)
self.assertIs(proxy.auth_username, None)
class HTTPResponseTestCase(unittest.TestCase):
def test_str(self):
response = HTTPResponse(HTTPRequest('http://example.com'),
200, headers={}, buffer=BytesIO())
s = str(response)
self.assertTrue(s.startswith('HTTPResponse('))
self.assertIn('code=200', s)
class SyncHTTPClientTest(unittest.TestCase):
def setUp(self):
if IOLoop.configured_class().__name__ in ('TwistedIOLoop',
'AsyncIOMainLoop'):
# TwistedIOLoop only supports the global reactor, so we can't have
# separate IOLoops for client and server threads.
# AsyncIOMainLoop doesn't work with the default policy
# (although it could with some tweaks to this test and a
# policy that created loops for non-main threads).
raise unittest.SkipTest(
'Sync HTTPClient not compatible with TwistedIOLoop or '
'AsyncIOMainLoop')
self.server_ioloop = IOLoop()
sock, self.port = bind_unused_port()
app = Application([('/', HelloWorldHandler)])
self.server = HTTPServer(app, io_loop=self.server_ioloop)
self.server.add_socket(sock)
self.server_thread = threading.Thread(target=self.server_ioloop.start)
self.server_thread.start()
self.http_client = HTTPClient()
def tearDown(self):
def stop_server():
self.server.stop()
self.server_ioloop.stop()
self.server_ioloop.add_callback(stop_server)
self.server_thread.join()
self.http_client.close()
self.server_ioloop.close(all_fds=True)
def get_url(self, path):
return 'http://localhost:%d%s' % (self.port, path)
def test_sync_client(self):
response = self.http_client.fetch(self.get_url('/'))
self.assertEqual(b'Hello world!', response.body)
def test_sync_client_error(self):
# Synchronous HTTPClient raises errors directly; no need for
# response.rethrow()
with self.assertRaises(HTTPError) as assertion:
self.http_client.fetch(self.get_url('/notfound'))
self.assertEqual(assertion.exception.code, 404)
class HTTPRequestTestCase(unittest.TestCase):
def test_headers(self):
request = HTTPRequest('http://example.com', headers={'foo': 'bar'})
self.assertEqual(request.headers, {'foo': 'bar'})
def test_headers_setter(self):
request = HTTPRequest('http://example.com')
request.headers = {'bar': 'baz'}
self.assertEqual(request.headers, {'bar': 'baz'})
def test_null_headers_setter(self):
request = HTTPRequest('http://example.com')
request.headers = None
self.assertEqual(request.headers, {})
def test_body(self):
request = HTTPRequest('http://example.com', body='foo')
self.assertEqual(request.body, utf8('foo'))
def test_body_setter(self):
request = HTTPRequest('http://example.com')
request.body = 'foo'
self.assertEqual(request.body, utf8('foo'))
| mit |
rickerc/cinder_audit | cinder/tests/api/contrib/test_snapshot_actions.py | 3 | 2517 | # Copyright 2013, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import uuid
import webob
from cinder import db
from cinder import exception
from cinder.openstack.common import jsonutils
from cinder.openstack.common.rpc import common as rpc_common
from cinder import test
from cinder.tests.api import fakes
from cinder.tests.api.v2 import stubs
from cinder import volume
from cinder.volume import api as volume_api
class SnapshotActionsTest(test.TestCase):
def setUp(self):
super(SnapshotActionsTest, self).setUp()
def test_update_snapshot_status(self):
self.stubs.Set(db, 'snapshot_get', stub_snapshot_get)
self.stubs.Set(db, 'snapshot_update', stub_snapshot_update)
body = {'os-update_snapshot_status': {'status': 'available'}}
req = webob.Request.blank('/v2/fake/snapshots/1/action')
req.method = "POST"
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 202)
def test_update_snapshot_status_invalid_status(self):
self.stubs.Set(db, 'snapshot_get', stub_snapshot_get)
body = {'os-update_snapshot_status': {'status': 'in-use'}}
req = webob.Request.blank('/v2/fake/snapshots/1/action')
req.method = "POST"
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 400)
def stub_snapshot_get(context, snapshot_id):
snapshot = stubs.stub_snapshot(snapshot_id)
if snapshot_id == 3:
snapshot['status'] = 'error'
elif snapshot_id == 1:
snapshot['status'] = 'creating'
elif snapshot_id == 7:
snapshot['status'] = 'available'
else:
snapshot['status'] = 'creating'
return snapshot
def stub_snapshot_update(self, context, id, **kwargs):
pass
| apache-2.0 |
dliessi/frescobaldi | frescobaldi_app/historymanager.py | 3 | 3272 | # This file is part of the Frescobaldi project, http://www.frescobaldi.org/
#
# Copyright (c) 2008 - 2014 by Wilbert Berendsen
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# See http://www.gnu.org/licenses/ for more information.
"""
Manages the history of the open documents of a MainWindow.
Contains smart logic to switch documents if the active document is closed.
If no documents remain, listen to other HistoryManager instances and
make the document set-current there also current here.
"""
import weakref
import app
import signals
# This signal is emitted whenever a MainWindow sets a document current (active)
# Any HistoryManager can listen to it and follow it if no document is current
_setCurrentDocument = signals.Signal() # Document
class HistoryManager(object):
"""Keeps the history of document switches by the user.
If a document is closed, the previously active document is set active.
If no documents remain, nothing is done.
"""
def __init__(self, mainwindow, othermanager=None):
self.mainwindow = weakref.ref(mainwindow)
self._documents = list(othermanager._documents if othermanager else app.documents)
self._has_current = bool(self._documents)
mainwindow.currentDocumentChanged.connect(self.setCurrentDocument)
app.documentCreated.connect(self.addDocument, 1)
app.documentClosed.connect(self.removeDocument, 1)
_setCurrentDocument.connect(self._listen)
def addDocument(self, doc):
self._documents.insert(-1, doc)
def removeDocument(self, doc):
active = doc is self._documents[-1]
if active:
if len(self._documents) > 1:
self.mainwindow().setCurrentDocument(self._documents[-2])
else:
# last document removed; listen to setCurrentDocument from others
self._has_current = False
self._documents.remove(doc)
def setCurrentDocument(self, doc):
self._documents.remove(doc)
self._documents.append(doc)
self._has_current = True
# notify possible interested parties
_setCurrentDocument(doc)
def documents(self):
"""Returns the documents in order of most recent been active."""
return self._documents[::-1]
def _listen(self, document):
"""Called when any MainWindow emits the currentDocumentChanged."""
if not self._has_current:
# prevent nested emits of this signal from reacting MainWindows
with _setCurrentDocument.blocked():
self.mainwindow().setCurrentDocument(document)
| gpl-2.0 |
CUCWD/edx-platform | lms/djangoapps/instructor_task/tasks_base.py | 22 | 4590 | """
Base class for Instructor celery tasks.
"""
import logging
from celery import Task
from celery.states import FAILURE, SUCCESS
from lms.djangoapps.instructor_task.models import InstructorTask
# define different loggers for use within tasks and on client side
TASK_LOG = logging.getLogger('edx.celery.task')
class BaseInstructorTask(Task):
"""
Base task class for use with InstructorTask models.
Permits updating information about task in corresponding InstructorTask for monitoring purposes.
Assumes that the entry_id of the InstructorTask model is the first argument to the task.
The `entry_id` is the primary key for the InstructorTask entry representing the task. This class
updates the entry on success and failure of the task it wraps. It is setting the entry's value
for task_state based on what Celery would set it to once the task returns to Celery:
FAILURE if an exception is encountered, and SUCCESS if it returns normally.
Other arguments are pass-throughs to perform_module_state_update, and documented there.
"""
abstract = True
def on_success(self, task_progress, task_id, args, kwargs):
"""
Update InstructorTask object corresponding to this task with info about success.
Updates task_output and task_state. But it shouldn't actually do anything
if the task is only creating subtasks to actually do the work.
Assumes `task_progress` is a dict containing the task's result, with the following keys:
'attempted': number of attempts made
'succeeded': number of attempts that "succeeded"
'skipped': number of attempts that "skipped"
'failed': number of attempts that "failed"
'total': number of possible subtasks to attempt
'action_name': user-visible verb to use in status messages. Should be past-tense.
Pass-through of input `action_name`.
'duration_ms': how long the task has (or had) been running.
This is JSON-serialized and stored in the task_output column of the InstructorTask entry.
"""
TASK_LOG.debug('Task %s: success returned with progress: %s', task_id, task_progress)
# We should be able to find the InstructorTask object to update
# based on the task_id here, without having to dig into the
# original args to the task. On the other hand, the entry_id
# is the first value passed to all such args, so we'll use that.
# And we assume that it exists, else we would already have had a failure.
entry_id = args[0]
entry = InstructorTask.objects.get(pk=entry_id)
# Check to see if any subtasks had been defined as part of this task.
# If not, then we know that we're done. (If so, let the subtasks
# handle updating task_state themselves.)
if len(entry.subtasks) == 0:
entry.task_output = InstructorTask.create_output_for_success(task_progress)
entry.task_state = SUCCESS
entry.save_now()
def on_failure(self, exc, task_id, args, kwargs, einfo):
"""
Update InstructorTask object corresponding to this task with info about failure.
Fetches and updates exception and traceback information on failure.
If an exception is raised internal to the task, it is caught by celery and provided here.
The information is recorded in the InstructorTask object as a JSON-serialized dict
stored in the task_output column. It contains the following keys:
'exception': type of exception object
'message': error message from exception object
'traceback': traceback information (truncated if necessary)
Note that there is no way to record progress made within the task (e.g. attempted,
succeeded, etc.) when such failures occur.
"""
TASK_LOG.debug(u'Task %s: failure returned', task_id)
entry_id = args[0]
try:
entry = InstructorTask.objects.get(pk=entry_id)
except InstructorTask.DoesNotExist:
# if the InstructorTask object does not exist, then there's no point
# trying to update it.
TASK_LOG.error(u"Task (%s) has no InstructorTask object for id %s", task_id, entry_id)
else:
TASK_LOG.warning(u"Task (%s) failed", task_id, exc_info=True)
entry.task_output = InstructorTask.create_output_for_failure(einfo.exception, einfo.traceback)
entry.task_state = FAILURE
entry.save_now()
| agpl-3.0 |
keithroe/vtkoptix | Imaging/Core/Testing/Python/TestWipe.py | 26 | 3668 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
=========================================================================
Program: Visualization Toolkit
Module: TestNamedColorsIntegration.py
Copyright (c) Ken Martin, Will Schroeder, Bill Lorensen
All rights reserved.
See Copyright.txt or http://www.kitware.com/Copyright.htm for details.
This software is distributed WITHOUT ANY WARRANTY; without even
the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
PURPOSE. See the above copyright notice for more information.
=========================================================================
'''
import vtk
import vtk.test.Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
class TestWipe(vtk.test.Testing.vtkTest):
def testWipe(self):
# Image pipeline
renWin = vtk.vtkRenderWindow()
image1 = vtk.vtkImageCanvasSource2D()
image1.SetNumberOfScalarComponents(3)
image1.SetScalarTypeToUnsignedChar()
image1.SetExtent(0, 79, 0, 79, 0, 0)
image1.SetDrawColor(255, 255, 0)
image1.FillBox(0, 79, 0, 79)
image1.Update()
image2 = vtk.vtkImageCanvasSource2D()
image2.SetNumberOfScalarComponents(3)
image2.SetScalarTypeToUnsignedChar()
image2.SetExtent(0, 79, 0, 79, 0, 0)
image2.SetDrawColor(0, 255, 255)
image2.FillBox(0, 79, 0, 79)
image2.Update()
mapper = vtk.vtkImageMapper()
mapper.SetInputConnection(image1.GetOutputPort())
mapper.SetColorWindow(255)
mapper.SetColorLevel(127.5)
actor = vtk.vtkActor2D()
actor.SetMapper(mapper)
imager = vtk.vtkRenderer()
imager.AddActor2D(actor)
renWin.AddRenderer(imager)
wipes = ["Quad", "Horizontal", "Vertical", "LowerLeft", "LowerRight", "UpperLeft", "UpperRight"]
wiper = dict()
mapper = dict()
actor = dict()
imagers = dict()
for wipe in wipes:
wiper.update({wipe:vtk.vtkImageRectilinearWipe()})
wiper[wipe].SetInput1Data(image1.GetOutput())
wiper[wipe].SetInput2Data(image2.GetOutput())
wiper[wipe].SetPosition(20, 20)
eval('wiper[wipe].SetWipeTo' + wipe + '()')
mapper.update({wipe:vtk.vtkImageMapper()})
mapper[wipe].SetInputConnection(wiper[wipe].GetOutputPort())
mapper[wipe].SetColorWindow(255)
mapper[wipe].SetColorLevel(127.5)
actor.update({wipe:vtk.vtkActor2D()})
actor[wipe].SetMapper(mapper[wipe])
imagers.update({wipe:vtk.vtkRenderer()})
imagers[wipe].AddActor2D(actor[wipe])
renWin.AddRenderer(imagers[wipe])
imagers["Quad"].SetViewport(0, .5, .25, 1)
imagers["Horizontal"].SetViewport(.25, .5, .5, 1)
imagers["Vertical"].SetViewport(.5, .5, .75, 1)
imagers["LowerLeft"].SetViewport(.75, .5, 1, 1)
imagers["LowerRight"].SetViewport(0, 0, .25, .5)
imagers["UpperLeft"].SetViewport(.25, 0, .5, .5)
imagers["UpperRight"].SetViewport(.5, 0, .75, .5)
imager.SetViewport(.75, 0, 1, .5)
renWin.SetSize(400, 200)
# render and interact with data
iRen = vtk.vtkRenderWindowInteractor()
iRen.SetRenderWindow(renWin);
renWin.Render()
img_file = "TestWipe.png"
vtk.test.Testing.compareImage(iRen.GetRenderWindow(), vtk.test.Testing.getAbsImagePath(img_file), threshold=25)
vtk.test.Testing.interact()
if __name__ == "__main__":
vtk.test.Testing.main([(TestWipe, 'test')])
| bsd-3-clause |
tchernomax/ansible | test/units/modules/network/nxos/test_nxos_bgp.py | 18 | 5399 | # (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.compat.tests.mock import patch
from ansible.modules.network.nxos import nxos_bgp
from .nxos_module import TestNxosModule, load_fixture, set_module_args
class TestNxosBgpModule(TestNxosModule):
module = nxos_bgp
def setUp(self):
super(TestNxosBgpModule, self).setUp()
self.mock_load_config = patch('ansible.modules.network.nxos.nxos_bgp.load_config')
self.load_config = self.mock_load_config.start()
self.mock_get_config = patch('ansible.modules.network.nxos.nxos_bgp.get_config')
self.get_config = self.mock_get_config.start()
def tearDown(self):
super(TestNxosBgpModule, self).tearDown()
self.mock_load_config.stop()
self.mock_get_config.stop()
def load_fixtures(self, commands=None, device=''):
self.get_config.return_value = load_fixture('nxos_bgp', 'config.cfg')
self.load_config.return_value = []
def test_nxos_bgp(self):
set_module_args(dict(asn=65535, router_id='192.0.2.1'))
result = self.execute_module(changed=True)
self.assertEqual(result['commands'], ['router bgp 65535', 'router-id 192.0.2.1'])
def test_nxos_bgp_change_nothing(self):
set_module_args(dict(asn=65535, router_id='192.168.1.1'))
self.execute_module(changed=False)
def test_nxos_bgp_wrong_asn(self):
set_module_args(dict(asn=10, router_id='192.168.1.1'))
result = self.execute_module(failed=True)
self.assertEqual(result['msg'], 'Another BGP ASN already exists.')
def test_nxos_bgp_remove(self):
set_module_args(dict(asn=65535, state='absent'))
self.execute_module(changed=True, commands=['no router bgp 65535'])
def test_nxos_bgp_remove_vrf(self):
set_module_args(dict(asn=65535, vrf='test2', state='absent'))
self.execute_module(changed=True, commands=['router bgp 65535', 'no vrf test2'])
def test_nxos_bgp_remove_nonexistant_vrf(self):
set_module_args(dict(asn=65535, vrf='foo', state='absent'))
self.execute_module(changed=False)
def test_nxos_bgp_remove_wrong_asn(self):
set_module_args(dict(asn=10, state='absent'))
self.execute_module(changed=False)
def test_nxos_bgp_vrf(self):
set_module_args(dict(asn=65535, vrf='test', router_id='192.0.2.1'))
result = self.execute_module(changed=True, commands=['router bgp 65535', 'vrf test', 'router-id 192.0.2.1'])
self.assertEqual(result['warnings'], ["VRF test doesn't exist."])
def test_nxos_bgp_global_param(self):
set_module_args(dict(asn=65535, shutdown=True))
self.execute_module(changed=True, commands=['router bgp 65535', 'shutdown'])
def test_nxos_bgp_global_param_outside_default(self):
set_module_args(dict(asn=65535, vrf='test', shutdown=True))
result = self.execute_module(failed=True)
self.assertEqual(result['msg'], 'Global params can be modified only under "default" VRF.')
def test_nxos_bgp_default_value(self):
set_module_args(dict(asn=65535, graceful_restart_timers_restart='default'))
self.execute_module(
changed=True,
commands=['router bgp 65535', 'graceful-restart restart-time 120']
)
class TestNxosBgp32BitsAS(TestNxosModule):
module = nxos_bgp
def setUp(self):
super(TestNxosBgp32BitsAS, self).setUp()
self.mock_load_config = patch('ansible.modules.network.nxos.nxos_bgp.load_config')
self.load_config = self.mock_load_config.start()
self.mock_get_config = patch('ansible.modules.network.nxos.nxos_bgp.get_config')
self.get_config = self.mock_get_config.start()
def tearDown(self):
super(TestNxosBgp32BitsAS, self).tearDown()
self.mock_load_config.stop()
self.mock_get_config.stop()
def load_fixtures(self, commands=None, device=''):
self.get_config.return_value = load_fixture('nxos_bgp', 'config_32_bits_as.cfg')
self.load_config.return_value = []
def test_nxos_bgp_change_nothing(self):
set_module_args(dict(asn='65535.65535', router_id='192.168.1.1'))
self.execute_module(changed=False)
def test_nxos_bgp_wrong_asn(self):
set_module_args(dict(asn='65535.10', router_id='192.168.1.1'))
result = self.execute_module(failed=True)
self.assertEqual(result['msg'], 'Another BGP ASN already exists.')
def test_nxos_bgp_remove(self):
set_module_args(dict(asn='65535.65535', state='absent'))
self.execute_module(changed=True, commands=['no router bgp 65535.65535'])
| gpl-3.0 |
guilherme-pg/citationhunt | config_test.py | 2 | 1363 | import config
import re
import unittest
class ConfigTest(unittest.TestCase):
@classmethod
def add_validate_categories_test(cls, cfg):
def test(self):
# Categories must have underscores instead of spaces.
self.assertNotIn(' ', cfg.hidden_category)
self.assertNotIn(' ', cfg.citation_needed_category)
name = 'test_' + cfg.lang_code + '_category_names_underscores'
setattr(cls, name, test)
@classmethod
def add_validate_templates_test(cls, cfg):
def test(self):
# Templates should contain spaces, not underscores.
for tpl in cfg.citation_needed_templates:
self.assertNotIn('_', tpl)
setattr(cls, 'test_' + cfg.lang_code + '_template_names_spaces', test)
@classmethod
def add_validate_wikipedia_domain_test(cls, cfg):
def test(self):
self.assertTrue(re.match('^[a-z]+.wikipedia.org$',
cfg.wikipedia_domain))
setattr(cls, 'test_' + cfg.lang_code + '_wikipedia_domain', test)
if __name__ == '__main__':
for lc in config.LANG_CODES_TO_LANG_NAMES:
cfg = config.get_localized_config(lc)
ConfigTest.add_validate_categories_test(cfg)
ConfigTest.add_validate_templates_test(cfg)
ConfigTest.add_validate_wikipedia_domain_test(cfg)
unittest.main()
| mit |
gpagliuca/pyfas | pyfas/test/test_ppl.py | 2 | 2108 | import os
import pytest
import xlrd
import tempfile
from pyfas import Ppl
TEST_FLD = os.getcwd() + os.sep + "test_files" + os.sep
oscheck = pytest.mark.skipif(os.name == 'posix',
reason='this module works only on win')
def test_not_a_ppl():
with pytest.raises(ValueError) as exeinfo:
ppl = Ppl(TEST_FLD+"/FC1_rev01.tpl")
assert exinfo.value.message == "not a ppl file"
def test_init():
ppl = Ppl(TEST_FLD+"FC1_rev01.ppl")
assert ppl.fname == "FC1_rev01.ppl"
assert ppl._attributes['branch_idx'][0] == 18
branch = 'tiein spool . $'
assert int(ppl.geometries[branch][0][0]) == 0
assert int(ppl.geometries[branch][0][-1]) == 265
assert int(ppl.geometries[branch][1][0]) == -120
assert int(ppl.geometries[branch][1][11]) == -120
def test_time_series():
ppl = Ppl(TEST_FLD+"FC1_rev01.ppl")
assert int(ppl.time[0]) == 0
assert int(ppl.time[-1]) == 1.8e5
def test_attributes():
ppl = Ppl(TEST_FLD+"FC1_rev01.ppl")
assert ppl._attributes['CATALOG'] == 331
assert ppl._attributes['data_idx'] == 381
assert 'GG' in ppl.profiles[1]
assert ppl._attributes['nvar'] == 48
def test_extraction():
ppl = Ppl(TEST_FLD+"FC1_rev01.ppl")
ppl.extract(4)
assert ppl.data[4][1][0][0] == 9.962770e6
assert ppl.data[4][1][-1][0] == 1.276020e7
def test_filter():
ppl = Ppl(TEST_FLD+"FC1_rev01.ppl")
PTs = ppl.filter_data('PT')
assert 'PT' in PTs[4]
assert 'old_offshore' in PTs[4]
ppl.profiles
assert 'GG' in ppl.profiles[1]
@oscheck
def test_to_excel():
ppl = Ppl(TEST_FLD+"FC1_rev01.ppl")
ppl.to_excel()
assert "FC1_rev01_ppl.xlsx" in os.listdir()
xl = xlrd.open_workbook("FC1_rev01_ppl.xlsx")
sh = xl.sheet_by_index(14)
assert sh.cell_value(2, 2) == 1.654940e1
os.remove("FC1_rev01_ppl.xlsx")
temp_folder = tempfile.gettempdir()
ppl.to_excel(temp_folder)
assert "FC1_rev01_ppl.xlsx" in os.listdir(temp_folder)
os.remove(temp_folder+os.sep+"FC1_rev01_ppl.xlsx")
| gpl-3.0 |
Integral-Technology-Solutions/ConfigNOW-4.3 | Lib/xml/dom/ext/Printer.py | 3 | 13729 | ########################################################################
#
# File Name: Printer.py
#
# Documentation: http://docs.4suite.com/4DOM/Printer.py.html
#
"""
The printing sub-system.
WWW: http://4suite.com/4DOM e-mail: support@4suite.com
Copyright (c) 2000 Fourthought Inc, USA. All Rights Reserved.
See http://4suite.com/COPYRIGHT for license and copyright information
"""
import string, re
from xml.dom import Node
from xml.dom.ext.Visitor import Visitor, WalkerInterface
from xml.dom import ext, XMLNS_NAMESPACE, XML_NAMESPACE, XHTML_NAMESPACE
from xml.dom.html import TranslateHtmlCdata
from xml.dom.html import HTML_4_TRANSITIONAL_INLINE
from xml.dom.html import HTML_FORBIDDEN_END
from xml.dom.html import HTML_BOOLEAN_ATTRS
ILLEGAL_LOW_CHARS = '[\x01-\x08\x0B-\x0C\x0E-\x1F]'
SURROGATE_BLOCK = '[\xF0-\xF7][\x80-\xBF][\x80-\xBF][\x80-\xBF]'
ILLEGAL_HIGH_CHARS = '\xEF\xBF[\xBE\xBF]'
#Note: Prolly fuzzy on this, but it looks as if characters from the surrogate block are allowed if in scalar form, which is encoded in UTF8 the same was as in surrogate block form
XML_ILLEGAL_CHAR_PATTERN = re.compile('%s|%s'%(ILLEGAL_LOW_CHARS, ILLEGAL_HIGH_CHARS))
g_utf8TwoBytePattern = re.compile('([\xC0-\xC3])([\x80-\xBF])')
g_cdataCharPattern = re.compile('[&<]|]]>')
g_charToEntity = {
'&': '&',
'<': '<',
']]>': ']]>',
}
try:
#The following stanza courtesy Martin von Loewis
import codecs # Python 1.6+ only
from types import UnicodeType
def utf8_to_code(text, encoding):
encoder = codecs.lookup(encoding)[0] # encode,decode,reader,writer
if type(text) is not UnicodeType:
text = unicode(text, "utf-8")
return encoder(text)[0] # result,size
def strobj_to_utf8str(text, encoding):
if string.upper(encoding) not in ["UTF-8", "ISO-8859-1", "LATIN-1"]:
raise ValueError("Invalid encoding: %s"%encoding)
encoder = codecs.lookup(encoding)[0] # encode,decode,reader,writer
if type(text) is not UnicodeType:
text = unicode(text, "utf-8")
#FIXME
return str(encoder(text)[0])
except ImportError:
def utf8_to_code(text, encoding):
encoding = string.upper(encoding)
if encoding == 'UTF-8':
return text
from xml.unicode.iso8859 import wstring
wstring.install_alias('ISO-8859-1', 'ISO_8859-1:1987')
#Note: Pass through to wstrop. This means we don't play nice and
#Escape characters that are not in the target encoding.
ws = wstring.from_utf8(text)
text = ws.encode(encoding)
#This version would skip all untranslatable chars: see wstrop.c
#text = ws.encode(encoding, 1)
return text
strobj_to_utf8str = utf8_to_code
def TranslateCdataAttr(characters):
'''Handles normalization and some intelligence about quoting'''
if not characters:
return '', "'"
if "'" in characters:
delimiter = '"'
new_chars = re.sub('"', '"', characters)
else:
delimiter = "'"
new_chars = re.sub("'", ''', characters)
#FIXME: There's more to normalization
#Convert attribute new-lines to character entity
# characters is possibly shorter than new_chars (no entities)
if "\n" in characters:
new_chars = re.sub('\n', ' ', new_chars)
return new_chars, delimiter
#Note: Unicode object only for now
def TranslateCdata(characters, encoding='UTF-8', prev_chars='', markupSafe=0,
charsetHandler=utf8_to_code):
"""
charsetHandler is a function that takes a string or unicode object as the
first argument, representing the string to be procesed, and an encoding
specifier as the second argument. It must return a string or unicode
object
"""
if not characters:
return ''
if not markupSafe:
if g_cdataCharPattern.search(characters):
new_string = g_cdataCharPattern.subn(
lambda m, d=g_charToEntity: d[m.group()],
characters)[0]
else:
new_string = characters
if prev_chars[-2:] == ']]' and characters[0] == '>':
new_string = '>' + new_string[1:]
else:
new_string = characters
#Note: use decimal char entity rep because some browsers are broken
#FIXME: This will bomb for high characters. Should, for instance, detect
#The UTF-8 for 0xFFFE and put out 
if XML_ILLEGAL_CHAR_PATTERN.search(new_string):
new_string = XML_ILLEGAL_CHAR_PATTERN.subn(
lambda m: '&#%i;' % ord(m.group()),
new_string)[0]
new_string = charsetHandler(new_string, encoding)
return new_string
class PrintVisitor(Visitor):
def __init__(self, stream, encoding, indent='', plainElements=None,
nsHints=None, isXhtml=0, force8bit=0):
self.stream = stream
self.encoding = encoding
# Namespaces
self._namespaces = [{}]
self._nsHints = nsHints or {}
# PrettyPrint
self._indent = indent
self._depth = 0
self._inText = 0
self._plainElements = plainElements or []
# HTML support
self._html = None
self._isXhtml = isXhtml
self.force8bit = force8bit
return
def _write(self, text):
if self.force8bit:
obj = strobj_to_utf8str(text, self.encoding)
else:
obj = utf8_to_code(text, self.encoding)
self.stream.write(obj)
return
def _tryIndent(self):
if not self._inText and self._indent:
self._write('\n' + self._indent*self._depth)
return
def visit(self, node):
if self._html is None:
# Set HTMLDocument flag here for speed
self._html = hasattr(node.ownerDocument, 'getElementsByName')
nodeType = node.nodeType
if node.nodeType == Node.ELEMENT_NODE:
return self.visitElement(node)
elif node.nodeType == Node.ATTRIBUTE_NODE:
return self.visitAttr(node)
elif node.nodeType == Node.TEXT_NODE:
return self.visitText(node)
elif node.nodeType == Node.CDATA_SECTION_NODE:
return self.visitCDATASection(node)
elif node.nodeType == Node.ENTITY_REFERENCE_NODE:
return self.visitEntityReference(node)
elif node.nodeType == Node.ENTITY_NODE:
return self.visitEntity(node)
elif node.nodeType == Node.PROCESSING_INSTRUCTION_NODE:
return self.visitProcessingInstruction(node)
elif node.nodeType == Node.COMMENT_NODE:
return self.visitComment(node)
elif node.nodeType == Node.DOCUMENT_NODE:
return self.visitDocument(node)
elif node.nodeType == Node.DOCUMENT_TYPE_NODE:
return self.visitDocumentType(node)
elif node.nodeType == Node.DOCUMENT_FRAGMENT_NODE:
return self.visitDocumentFragment(node)
elif node.nodeType == Node.NOTATION_NODE:
return self.visitNotation(node)
# It has a node type, but we don't know how to handle it
raise Exception("Unknown node type: %s" % repr(node))
def visitNodeList(self, node, exclude=None):
for curr in node:
curr is not exclude and self.visit(curr)
return
def visitNamedNodeMap(self, node):
for item in node.values():
self.visit(item)
return
def visitAttr(self, node):
if node.namespaceURI == XMLNS_NAMESPACE:
# Skip namespace declarations
return
self._write(' ' + node.name)
value = node.value
if value or not self._html:
text = TranslateCdata(value, self.encoding)
text, delimiter = TranslateCdataAttr(text)
self._write("=%s%s%s" % (delimiter, text, delimiter))
return
def visitProlog(self):
self._write("<?xml version='1.0' encoding='%s'?>" % (
self.encoding or 'utf-8'
))
self._inText = 0
return
def visitDocument(self, node):
not self._html and self.visitProlog()
node.doctype and self.visitDocumentType(node.doctype)
self.visitNodeList(node.childNodes, exclude=node.doctype)
return
def visitDocumentFragment(self, node):
self.visitNodeList(node.childNodes)
return
def visitElement(self, node):
self._namespaces.append(self._namespaces[-1].copy())
inline = node.tagName in self._plainElements
not inline and self._tryIndent()
self._write('<%s' % node.tagName)
if self._isXhtml or not self._html:
namespaces = ''
if self._isXhtml:
nss = {'xml': XML_NAMESPACE, '': XHTML_NAMESPACE}
else:
nss = ext.GetAllNs(node)
if self._nsHints:
self._nsHints.update(nss)
nss = self._nsHints
self._nsHints = {}
del nss['xml']
for prefix in nss.keys():
if not self._namespaces[-1].has_key(prefix) or self._namespaces[-1][prefix] != nss[prefix]:
if prefix:
xmlns = " xmlns:%s='%s'" % (prefix, nss[prefix])
else:
xmlns = " xmlns='%s'" % nss[prefix]
namespaces = namespaces + xmlns
self._namespaces[-1][prefix] = nss[prefix]
self._write(namespaces)
for attr in node.attributes.values():
self.visitAttr(attr)
if len(node.childNodes):
self._write('>')
self._depth = self._depth + 1
self.visitNodeList(node.childNodes)
self._depth = self._depth - 1
if not self._html or (node.tagName not in HTML_FORBIDDEN_END):
not (self._inText and inline) and self._tryIndent()
self._write('</%s>' % node.tagName)
elif not self._html:
self._write('/>')
elif node.tagName not in HTML_FORBIDDEN_END:
self._write('></%s>' % node.tagName)
else:
self._write('>')
del self._namespaces[-1]
self._inText = 0
return
def visitText(self, node):
text = node.data
if self._indent:
text = string.strip(text) and text
if text:
if self._html:
text = TranslateHtmlCdata(text, self.encoding)
else:
text = TranslateCdata(text, self.encoding)
self.stream.write(text)
self._inText = 1
return
def visitDocumentType(self, doctype):
self._tryIndent()
self._write('<!DOCTYPE %s' % doctype.name)
if doctype.systemId and '"' in doctype.systemId:
system = "'%s'" % doctype.systemId
else:
system = '"%s"' % doctype.systemId
if doctype.publicId and '"' in doctype.publicId:
# We should probably throw an error
# Valid characters: <space> | <newline> | <linefeed> |
# [a-zA-Z0-9] | [-'()+,./:=?;!*#@$_%]
public = "'%s'" % doctype.publicId
else:
public = '"%s"' % doctype.publicId
if doctype.publicId and doctype.systemId:
self._write(' PUBLIC %s %s' % (public, system))
elif doctype.systemId:
self._write(' SYSTEM %s' % system)
if doctype.entities or doctype.notations:
self._write(' [')
self._depth = self._depth + 1
self.visitNamedNodeMap(doctype.entities)
self.visitNamedNodeMap(doctype.notations)
self._depth = self._depth - 1
self._tryIndent()
self._write(']>')
else:
self._write('>')
self._inText = 0
return
def visitEntity(self, node):
"""Visited from a NamedNodeMap in DocumentType"""
self._tryIndent()
self._write('<!ENTITY %s' % (node.nodeName))
node.publicId and self._write(' PUBLIC %s' % node.publicId)
node.systemId and self._write(' SYSTEM %s' % node.systemId)
node.notationName and self._write(' NDATA %s' % node.notationName)
self._write('>')
return
def visitNotation(self, node):
"""Visited from a NamedNodeMap in DocumentType"""
self._tryIndent()
self._write('<!NOTATION %s' % node.nodeName)
node.publicId and self._write(' PUBLIC %s' % node.publicId)
node.systemId and self._write(' SYSTEM %s' % node.systemId)
self._write('>')
return
def visitCDATASection(self, node):
self._tryIndent()
self._write('<![CDATA[%s]]>' % (node.data))
self._inText = 0
return
def visitComment(self, node):
self._tryIndent()
self._write('<!--%s-->' % (node.data))
self._inText = 0
return
def visitEntityReference(self, node):
self._write('&%s;' % node.nodeName)
self._inText = 1
return
def visitProcessingInstruction(self, node):
self._tryIndent()
self._write('<?%s %s?>' % (node.target, node.data))
self._inText = 0
return
class PrintWalker(WalkerInterface):
def __init__(self, visitor, startNode):
WalkerInterface.__init__(self, visitor)
self.start_node = startNode
return
def step(self):
"""There is really no step to printing. It prints the whole thing"""
self.visitor.visit(self.start_node)
return
def run(self):
return self.step()
| mit |
OniOniOn-/MCEdit-Unified | pymclevel/run_regression_test.py | 13 | 5917 | # !/usr/bin/env python
import tempfile
import sys
import subprocess
import shutil
import os
import hashlib
import contextlib
import gzip
import fnmatch
import tarfile
import zipfile
def generate_file_list(directory):
for dirpath, dirnames, filenames in os.walk(directory):
for filename in filenames:
yield os.path.join(dirpath, filename)
def sha1_file(name, checksum=None):
CHUNKSIZE = 1024
if checksum is None:
checksum = hashlib.sha1()
if fnmatch.fnmatch(name, "*.dat"):
opener = gzip.open
else:
opener = open
with contextlib.closing(opener(name, 'rb')) as data:
chunk = data.read(CHUNKSIZE)
while len(chunk) == CHUNKSIZE:
checksum.update(chunk)
chunk = data.read(CHUNKSIZE)
else:
checksum.update(chunk)
return checksum
def calculate_result(directory):
checksum = hashlib.sha1()
for filename in sorted(generate_file_list(directory)):
if filename.endswith("session.lock"):
continue
sha1_file(filename, checksum)
return checksum.hexdigest()
@contextlib.contextmanager
def temporary_directory(prefix='regr'):
name = tempfile.mkdtemp(prefix)
try:
yield name
finally:
shutil.rmtree(name)
@contextlib.contextmanager
def directory_clone(src):
with temporary_directory('regr') as name:
subdir = os.path.join(name, "subdir")
shutil.copytree(src, subdir)
yield subdir
def launch_subprocess(directory, arguments, env=None):
#my python breaks with an empty environ, i think it wants PATH
#if sys.platform == "win32":
if env is None:
env = {}
newenv = {}
newenv.update(os.environ)
newenv.update(env)
proc = subprocess.Popen((["python.exe"] if sys.platform == "win32" else []) + [
"./mce.py",
directory] + arguments, stdin=subprocess.PIPE, stdout=subprocess.PIPE, env=newenv)
return proc
class RegressionError(Exception):
pass
def do_test(test_data, result_check, arguments=()):
"""Run a regression test on the given world.
result_check - sha1 of the recursive tree generated
arguments - arguments to give to mce.py on execution
"""
result_check = result_check.lower()
env = {
'MCE_RANDOM_SEED': '42',
'MCE_LAST_PLAYED': '42',
}
if 'MCE_PROFILE' in os.environ:
env['MCE_PROFILE'] = os.environ['MCE_PROFILE']
with directory_clone(test_data) as directory:
proc = launch_subprocess(directory, arguments, env)
proc.stdin.close()
proc.wait()
if proc.returncode:
raise RegressionError("Program execution failed!")
checksum = calculate_result(directory).lower()
if checksum != result_check.lower():
raise RegressionError("Checksum mismatch: {0!r} != {1!r}".format(checksum, result_check))
print "[OK] (sha1sum of result is {0!r}, as expected)".format(result_check)
def do_test_match_output(test_data, result_check, arguments=()):
result_check = result_check.lower()
env = {
'MCE_RANDOM_SEED': '42',
'MCE_LAST_PLAYED': '42'
}
with directory_clone(test_data) as directory:
proc = launch_subprocess(directory, arguments, env)
proc.stdin.close()
output = proc.stdout.read()
proc.wait()
if proc.returncode:
raise RegressionError("Program execution failed!")
print "Output\n{0}".format(output)
checksum = hashlib.sha1()
checksum.update(output)
checksum = checksum.hexdigest()
if checksum != result_check.lower():
raise RegressionError("Checksum mismatch: {0!r} != {1!r}".format(checksum, result_check))
print "[OK] (sha1sum of result is {0!r}, as expected)".format(result_check)
alpha_tests = [
(do_test, 'baseline', '2bf250ec4e5dd8bfd73b3ccd0a5ff749569763cf', []),
(do_test, 'degrief', '2b7eecd5e660f20415413707b4576b1234debfcb', ['degrief']),
(do_test_match_output, 'analyze', '9cb4aec2ed7a895c3a5d20d6e29e26459e00bd53', ['analyze']),
(do_test, 'relight', 'f3b3445b0abca1fe2b183bc48b24fb734dfca781', ['relight']),
(do_test, 'replace', '4e816038f9851817b0d75df948d058143708d2ec',
['replace', 'Water (active)', 'with', 'Lava (active)']),
(do_test, 'fill', '94566d069edece4ff0cc52ef2d8f877fbe9720ab', ['fill', 'Water (active)']),
(do_test, 'heightmap', '71c20e7d7e335cb64b3eb0e9f6f4c9abaa09b070', ['heightmap', 'regression_test/mars.png']),
]
import optparse
parser = optparse.OptionParser()
parser.add_option("--profile", help="Perform profiling on regression tests", action="store_true")
def main(argv):
options, args = parser.parse_args(argv)
if len(args) <= 1:
do_these_regressions = ['*']
else:
do_these_regressions = args[1:]
with directory_clone("testfiles/AnvilWorld") as directory:
test_data = directory
passes = []
fails = []
for func, name, sha, args in alpha_tests:
print "Starting regression {0} ({1})".format(name, args)
if any(fnmatch.fnmatch(name, x) for x in do_these_regressions):
if options.profile:
print >> sys.stderr, "Starting to profile to %s.profile" % name
os.environ['MCE_PROFILE'] = '%s.profile' % name
try:
func(test_data, sha, args)
except RegressionError, e:
fails.append("Regression {0} failed: {1}".format(name, e))
print fails[-1]
else:
passes.append("Regression {0!r} complete.".format(name))
print passes[-1]
print "{0} tests passed.".format(len(passes))
for line in fails:
print line
if __name__ == '__main__':
sys.exit(main(sys.argv))
| isc |
benoitsteiner/tensorflow | tensorflow/contrib/layers/python/layers/initializers_test.py | 111 | 7640 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for initializers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib import layers
from tensorflow.contrib.layers.python.layers import initializers
from tensorflow.contrib.layers.python.layers import regularizers
from tensorflow.python.client import session
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class InitializerTest(test.TestCase):
def test_xavier_wrong_dtype(self):
with self.assertRaisesRegexp(
TypeError, 'Cannot create initializer for non-floating point type.'):
initializers.xavier_initializer(dtype=dtypes.int32)
self.assertIsNone(regularizers.l1_regularizer(0.)(None))
def _test_xavier(self, initializer, shape, variance, uniform):
with session.Session() as sess:
var = variable_scope.get_variable(
name='test',
shape=shape,
dtype=dtypes.float32,
initializer=initializer(
uniform=uniform, seed=1))
sess.run(variables.global_variables_initializer())
values = var.eval()
self.assertAllClose(np.var(values), variance, 1e-3, 1e-3)
def test_xavier_uniform(self):
self._test_xavier(initializers.xavier_initializer, [100, 40],
2. / (100. + 40.), True)
def test_xavier_normal(self):
self._test_xavier(initializers.xavier_initializer, [100, 40],
2. / (100. + 40.), False)
def test_xavier_scalar(self):
self._test_xavier(initializers.xavier_initializer, [], 0.0, True)
def test_xavier_conv2d_uniform(self):
self._test_xavier(layers.xavier_initializer_conv2d, [100, 40, 5, 7],
2. / (100. * 40 * (5 + 7)), True)
def test_xavier_conv2d_normal(self):
self._test_xavier(layers.xavier_initializer_conv2d, [100, 40, 5, 7],
2. / (100. * 40 * (5 + 7)), False)
class VarianceScalingInitializerTest(test.TestCase):
def test_wrong_dtype(self):
with self.assertRaisesRegexp(
TypeError, 'Cannot create initializer for non-floating point type.'):
initializers.variance_scaling_initializer(dtype=dtypes.int32)
initializer = initializers.variance_scaling_initializer()
with self.assertRaisesRegexp(
TypeError, 'Cannot create initializer for non-floating point type.'):
initializer([], dtype=dtypes.int32)
def _test_variance(self, initializer, shape, variance, factor, mode, uniform):
with ops.Graph().as_default() as g:
with self.test_session(graph=g) as sess:
var = variable_scope.get_variable(
name='test',
shape=shape,
dtype=dtypes.float32,
initializer=initializer(
factor=factor, mode=mode, uniform=uniform, seed=1))
sess.run(variables.global_variables_initializer())
values = var.eval()
self.assertAllClose(np.var(values), variance, 1e-3, 1e-3)
def test_fan_in(self):
for uniform in [False, True]:
self._test_variance(
initializers.variance_scaling_initializer,
shape=[100, 40],
variance=2. / 100.,
factor=2.0,
mode='FAN_IN',
uniform=uniform)
def test_fan_out(self):
for uniform in [False, True]:
self._test_variance(
initializers.variance_scaling_initializer,
shape=[100, 40],
variance=2. / 40.,
factor=2.0,
mode='FAN_OUT',
uniform=uniform)
def test_fan_avg(self):
for uniform in [False, True]:
self._test_variance(
initializers.variance_scaling_initializer,
shape=[100, 40],
variance=4. / (100. + 40.),
factor=2.0,
mode='FAN_AVG',
uniform=uniform)
def test_conv2d_fan_in(self):
for uniform in [False, True]:
self._test_variance(
initializers.variance_scaling_initializer,
shape=[100, 40, 5, 7],
variance=2. / (100. * 40. * 5.),
factor=2.0,
mode='FAN_IN',
uniform=uniform)
def test_conv2d_fan_out(self):
for uniform in [False, True]:
self._test_variance(
initializers.variance_scaling_initializer,
shape=[100, 40, 5, 7],
variance=2. / (100. * 40. * 7.),
factor=2.0,
mode='FAN_OUT',
uniform=uniform)
def test_conv2d_fan_avg(self):
for uniform in [False, True]:
self._test_variance(
initializers.variance_scaling_initializer,
shape=[100, 40, 5, 7],
variance=2. / (100. * 40. * (5. + 7.)),
factor=2.0,
mode='FAN_AVG',
uniform=uniform)
def test_xavier_uniform(self):
self._test_variance(
initializers.variance_scaling_initializer,
shape=[100, 40],
variance=2. / (100. + 40.),
factor=1.0,
mode='FAN_AVG',
uniform=True)
def test_xavier_normal(self):
self._test_variance(
initializers.variance_scaling_initializer,
shape=[100, 40],
variance=2. / (100. + 40.),
factor=1.0,
mode='FAN_AVG',
uniform=False)
def test_xavier_scalar(self):
self._test_variance(
initializers.variance_scaling_initializer,
shape=[],
variance=0.0,
factor=1.0,
mode='FAN_AVG',
uniform=False)
def test_xavier_conv2d_uniform(self):
self._test_variance(
initializers.variance_scaling_initializer,
shape=[100, 40, 5, 7],
variance=2. / (100. * 40. * (5. + 7.)),
factor=1.0,
mode='FAN_AVG',
uniform=True)
def test_xavier_conv2d_normal(self):
self._test_variance(
initializers.variance_scaling_initializer,
shape=[100, 40, 5, 7],
variance=2. / (100. * 40. * (5. + 7.)),
factor=1.0,
mode='FAN_AVG',
uniform=True)
def test_1d_shape_fan_in(self):
for uniform in [False, True]:
self._test_variance(
initializers.variance_scaling_initializer,
shape=[100],
variance=2. / 100.,
factor=2.0,
mode='FAN_IN',
uniform=uniform)
def test_1d_shape_fan_out(self):
for uniform in [False, True]:
self._test_variance(
initializers.variance_scaling_initializer,
shape=[100],
variance=2. / 100.,
factor=2.0,
mode='FAN_OUT',
uniform=uniform)
def test_1d_shape_fan_avg(self):
for uniform in [False, True]:
self._test_variance(
initializers.variance_scaling_initializer,
shape=[100],
variance=4. / (100. + 100.),
factor=2.0,
mode='FAN_AVG',
uniform=uniform)
if __name__ == '__main__':
test.main()
| apache-2.0 |
dcroc16/skunk_works | google_appengine/lib/python-gflags/gflags.py | 448 | 104236 | #!/usr/bin/env python
#
# Copyright (c) 2002, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# ---
# Author: Chad Lester
# Design and style contributions by:
# Amit Patel, Bogdan Cocosel, Daniel Dulitz, Eric Tiedemann,
# Eric Veach, Laurence Gonsalves, Matthew Springer
# Code reorganized a bit by Craig Silverstein
"""This module is used to define and parse command line flags.
This module defines a *distributed* flag-definition policy: rather than
an application having to define all flags in or near main(), each python
module defines flags that are useful to it. When one python module
imports another, it gains access to the other's flags. (This is
implemented by having all modules share a common, global registry object
containing all the flag information.)
Flags are defined through the use of one of the DEFINE_xxx functions.
The specific function used determines how the flag is parsed, checked,
and optionally type-converted, when it's seen on the command line.
IMPLEMENTATION: DEFINE_* creates a 'Flag' object and registers it with a
'FlagValues' object (typically the global FlagValues FLAGS, defined
here). The 'FlagValues' object can scan the command line arguments and
pass flag arguments to the corresponding 'Flag' objects for
value-checking and type conversion. The converted flag values are
available as attributes of the 'FlagValues' object.
Code can access the flag through a FlagValues object, for instance
gflags.FLAGS.myflag. Typically, the __main__ module passes the command
line arguments to gflags.FLAGS for parsing.
At bottom, this module calls getopt(), so getopt functionality is
supported, including short- and long-style flags, and the use of -- to
terminate flags.
Methods defined by the flag module will throw 'FlagsError' exceptions.
The exception argument will be a human-readable string.
FLAG TYPES: This is a list of the DEFINE_*'s that you can do. All flags
take a name, default value, help-string, and optional 'short' name
(one-letter name). Some flags have other arguments, which are described
with the flag.
DEFINE_string: takes any input, and interprets it as a string.
DEFINE_bool or
DEFINE_boolean: typically does not take an argument: say --myflag to
set FLAGS.myflag to true, or --nomyflag to set
FLAGS.myflag to false. Alternately, you can say
--myflag=true or --myflag=t or --myflag=1 or
--myflag=false or --myflag=f or --myflag=0
DEFINE_float: takes an input and interprets it as a floating point
number. Takes optional args lower_bound and upper_bound;
if the number specified on the command line is out of
range, it will raise a FlagError.
DEFINE_integer: takes an input and interprets it as an integer. Takes
optional args lower_bound and upper_bound as for floats.
DEFINE_enum: takes a list of strings which represents legal values. If
the command-line value is not in this list, raise a flag
error. Otherwise, assign to FLAGS.flag as a string.
DEFINE_list: Takes a comma-separated list of strings on the commandline.
Stores them in a python list object.
DEFINE_spaceseplist: Takes a space-separated list of strings on the
commandline. Stores them in a python list object.
Example: --myspacesepflag "foo bar baz"
DEFINE_multistring: The same as DEFINE_string, except the flag can be
specified more than once on the commandline. The
result is a python list object (list of strings),
even if the flag is only on the command line once.
DEFINE_multi_int: The same as DEFINE_integer, except the flag can be
specified more than once on the commandline. The
result is a python list object (list of ints), even if
the flag is only on the command line once.
SPECIAL FLAGS: There are a few flags that have special meaning:
--help prints a list of all the flags in a human-readable fashion
--helpshort prints a list of all key flags (see below).
--helpxml prints a list of all flags, in XML format. DO NOT parse
the output of --help and --helpshort. Instead, parse
the output of --helpxml. For more info, see
"OUTPUT FOR --helpxml" below.
--flagfile=foo read flags from file foo.
--undefok=f1,f2 ignore unrecognized option errors for f1,f2.
For boolean flags, you should use --undefok=boolflag, and
--boolflag and --noboolflag will be accepted. Do not use
--undefok=noboolflag.
-- as in getopt(), terminates flag-processing
FLAGS VALIDATORS: If your program:
- requires flag X to be specified
- needs flag Y to match a regular expression
- or requires any more general constraint to be satisfied
then validators are for you!
Each validator represents a constraint over one flag, which is enforced
starting from the initial parsing of the flags and until the program
terminates.
Also, lower_bound and upper_bound for numerical flags are enforced using flag
validators.
Howto:
If you want to enforce a constraint over one flag, use
gflags.RegisterValidator(flag_name,
checker,
message='Flag validation failed',
flag_values=FLAGS)
After flag values are initially parsed, and after any change to the specified
flag, method checker(flag_value) will be executed. If constraint is not
satisfied, an IllegalFlagValue exception will be raised. See
RegisterValidator's docstring for a detailed explanation on how to construct
your own checker.
EXAMPLE USAGE:
FLAGS = gflags.FLAGS
gflags.DEFINE_integer('my_version', 0, 'Version number.')
gflags.DEFINE_string('filename', None, 'Input file name', short_name='f')
gflags.RegisterValidator('my_version',
lambda value: value % 2 == 0,
message='--my_version must be divisible by 2')
gflags.MarkFlagAsRequired('filename')
NOTE ON --flagfile:
Flags may be loaded from text files in addition to being specified on
the commandline.
Any flags you don't feel like typing, throw them in a file, one flag per
line, for instance:
--myflag=myvalue
--nomyboolean_flag
You then specify your file with the special flag '--flagfile=somefile'.
You CAN recursively nest flagfile= tokens OR use multiple files on the
command line. Lines beginning with a single hash '#' or a double slash
'//' are comments in your flagfile.
Any flagfile=<file> will be interpreted as having a relative path from
the current working directory rather than from the place the file was
included from:
myPythonScript.py --flagfile=config/somefile.cfg
If somefile.cfg includes further --flagfile= directives, these will be
referenced relative to the original CWD, not from the directory the
including flagfile was found in!
The caveat applies to people who are including a series of nested files
in a different dir than they are executing out of. Relative path names
are always from CWD, not from the directory of the parent include
flagfile. We do now support '~' expanded directory names.
Absolute path names ALWAYS work!
EXAMPLE USAGE:
FLAGS = gflags.FLAGS
# Flag names are globally defined! So in general, we need to be
# careful to pick names that are unlikely to be used by other libraries.
# If there is a conflict, we'll get an error at import time.
gflags.DEFINE_string('name', 'Mr. President', 'your name')
gflags.DEFINE_integer('age', None, 'your age in years', lower_bound=0)
gflags.DEFINE_boolean('debug', False, 'produces debugging output')
gflags.DEFINE_enum('gender', 'male', ['male', 'female'], 'your gender')
def main(argv):
try:
argv = FLAGS(argv) # parse flags
except gflags.FlagsError, e:
print '%s\\nUsage: %s ARGS\\n%s' % (e, sys.argv[0], FLAGS)
sys.exit(1)
if FLAGS.debug: print 'non-flag arguments:', argv
print 'Happy Birthday', FLAGS.name
if FLAGS.age is not None:
print 'You are a %d year old %s' % (FLAGS.age, FLAGS.gender)
if __name__ == '__main__':
main(sys.argv)
KEY FLAGS:
As we already explained, each module gains access to all flags defined
by all the other modules it transitively imports. In the case of
non-trivial scripts, this means a lot of flags ... For documentation
purposes, it is good to identify the flags that are key (i.e., really
important) to a module. Clearly, the concept of "key flag" is a
subjective one. When trying to determine whether a flag is key to a
module or not, assume that you are trying to explain your module to a
potential user: which flags would you really like to mention first?
We'll describe shortly how to declare which flags are key to a module.
For the moment, assume we know the set of key flags for each module.
Then, if you use the app.py module, you can use the --helpshort flag to
print only the help for the flags that are key to the main module, in a
human-readable format.
NOTE: If you need to parse the flag help, do NOT use the output of
--help / --helpshort. That output is meant for human consumption, and
may be changed in the future. Instead, use --helpxml; flags that are
key for the main module are marked there with a <key>yes</key> element.
The set of key flags for a module M is composed of:
1. Flags defined by module M by calling a DEFINE_* function.
2. Flags that module M explictly declares as key by using the function
DECLARE_key_flag(<flag_name>)
3. Key flags of other modules that M specifies by using the function
ADOPT_module_key_flags(<other_module>)
This is a "bulk" declaration of key flags: each flag that is key for
<other_module> becomes key for the current module too.
Notice that if you do not use the functions described at points 2 and 3
above, then --helpshort prints information only about the flags defined
by the main module of our script. In many cases, this behavior is good
enough. But if you move part of the main module code (together with the
related flags) into a different module, then it is nice to use
DECLARE_key_flag / ADOPT_module_key_flags and make sure --helpshort
lists all relevant flags (otherwise, your code refactoring may confuse
your users).
Note: each of DECLARE_key_flag / ADOPT_module_key_flags has its own
pluses and minuses: DECLARE_key_flag is more targeted and may lead a
more focused --helpshort documentation. ADOPT_module_key_flags is good
for cases when an entire module is considered key to the current script.
Also, it does not require updates to client scripts when a new flag is
added to the module.
EXAMPLE USAGE 2 (WITH KEY FLAGS):
Consider an application that contains the following three files (two
auxiliary modules and a main module)
File libfoo.py:
import gflags
gflags.DEFINE_integer('num_replicas', 3, 'Number of replicas to start')
gflags.DEFINE_boolean('rpc2', True, 'Turn on the usage of RPC2.')
... some code ...
File libbar.py:
import gflags
gflags.DEFINE_string('bar_gfs_path', '/gfs/path',
'Path to the GFS files for libbar.')
gflags.DEFINE_string('email_for_bar_errors', 'bar-team@google.com',
'Email address for bug reports about module libbar.')
gflags.DEFINE_boolean('bar_risky_hack', False,
'Turn on an experimental and buggy optimization.')
... some code ...
File myscript.py:
import gflags
import libfoo
import libbar
gflags.DEFINE_integer('num_iterations', 0, 'Number of iterations.')
# Declare that all flags that are key for libfoo are
# key for this module too.
gflags.ADOPT_module_key_flags(libfoo)
# Declare that the flag --bar_gfs_path (defined in libbar) is key
# for this module.
gflags.DECLARE_key_flag('bar_gfs_path')
... some code ...
When myscript is invoked with the flag --helpshort, the resulted help
message lists information about all the key flags for myscript:
--num_iterations, --num_replicas, --rpc2, and --bar_gfs_path.
Of course, myscript uses all the flags declared by it (in this case,
just --num_replicas) or by any of the modules it transitively imports
(e.g., the modules libfoo, libbar). E.g., it can access the value of
FLAGS.bar_risky_hack, even if --bar_risky_hack is not declared as a key
flag for myscript.
OUTPUT FOR --helpxml:
The --helpxml flag generates output with the following structure:
<?xml version="1.0"?>
<AllFlags>
<program>PROGRAM_BASENAME</program>
<usage>MAIN_MODULE_DOCSTRING</usage>
(<flag>
[<key>yes</key>]
<file>DECLARING_MODULE</file>
<name>FLAG_NAME</name>
<meaning>FLAG_HELP_MESSAGE</meaning>
<default>DEFAULT_FLAG_VALUE</default>
<current>CURRENT_FLAG_VALUE</current>
<type>FLAG_TYPE</type>
[OPTIONAL_ELEMENTS]
</flag>)*
</AllFlags>
Notes:
1. The output is intentionally similar to the output generated by the
C++ command-line flag library. The few differences are due to the
Python flags that do not have a C++ equivalent (at least not yet),
e.g., DEFINE_list.
2. New XML elements may be added in the future.
3. DEFAULT_FLAG_VALUE is in serialized form, i.e., the string you can
pass for this flag on the command-line. E.g., for a flag defined
using DEFINE_list, this field may be foo,bar, not ['foo', 'bar'].
4. CURRENT_FLAG_VALUE is produced using str(). This means that the
string 'false' will be represented in the same way as the boolean
False. Using repr() would have removed this ambiguity and simplified
parsing, but would have broken the compatibility with the C++
command-line flags.
5. OPTIONAL_ELEMENTS describe elements relevant for certain kinds of
flags: lower_bound, upper_bound (for flags that specify bounds),
enum_value (for enum flags), list_separator (for flags that consist of
a list of values, separated by a special token).
6. We do not provide any example here: please use --helpxml instead.
This module requires at least python 2.2.1 to run.
"""
import cgi
import getopt
import os
import re
import string
import struct
import sys
# pylint: disable-msg=C6204
try:
import fcntl
except ImportError:
fcntl = None
try:
# Importing termios will fail on non-unix platforms.
import termios
except ImportError:
termios = None
import gflags_validators
# pylint: enable-msg=C6204
# Are we running under pychecker?
_RUNNING_PYCHECKER = 'pychecker.python' in sys.modules
def _GetCallingModuleObjectAndName():
"""Returns the module that's calling into this module.
We generally use this function to get the name of the module calling a
DEFINE_foo... function.
"""
# Walk down the stack to find the first globals dict that's not ours.
for depth in range(1, sys.getrecursionlimit()):
if not sys._getframe(depth).f_globals is globals():
globals_for_frame = sys._getframe(depth).f_globals
module, module_name = _GetModuleObjectAndName(globals_for_frame)
if module_name is not None:
return module, module_name
raise AssertionError("No module was found")
def _GetCallingModule():
"""Returns the name of the module that's calling into this module."""
return _GetCallingModuleObjectAndName()[1]
def _GetThisModuleObjectAndName():
"""Returns: (module object, module name) for this module."""
return _GetModuleObjectAndName(globals())
# module exceptions:
class FlagsError(Exception):
"""The base class for all flags errors."""
pass
class DuplicateFlag(FlagsError):
"""Raised if there is a flag naming conflict."""
pass
class CantOpenFlagFileError(FlagsError):
"""Raised if flagfile fails to open: doesn't exist, wrong permissions, etc."""
pass
class DuplicateFlagCannotPropagateNoneToSwig(DuplicateFlag):
"""Special case of DuplicateFlag -- SWIG flag value can't be set to None.
This can be raised when a duplicate flag is created. Even if allow_override is
True, we still abort if the new value is None, because it's currently
impossible to pass None default value back to SWIG. See FlagValues.SetDefault
for details.
"""
pass
class DuplicateFlagError(DuplicateFlag):
"""A DuplicateFlag whose message cites the conflicting definitions.
A DuplicateFlagError conveys more information than a DuplicateFlag,
namely the modules where the conflicting definitions occur. This
class was created to avoid breaking external modules which depend on
the existing DuplicateFlags interface.
"""
def __init__(self, flagname, flag_values, other_flag_values=None):
"""Create a DuplicateFlagError.
Args:
flagname: Name of the flag being redefined.
flag_values: FlagValues object containing the first definition of
flagname.
other_flag_values: If this argument is not None, it should be the
FlagValues object where the second definition of flagname occurs.
If it is None, we assume that we're being called when attempting
to create the flag a second time, and we use the module calling
this one as the source of the second definition.
"""
self.flagname = flagname
first_module = flag_values.FindModuleDefiningFlag(
flagname, default='<unknown>')
if other_flag_values is None:
second_module = _GetCallingModule()
else:
second_module = other_flag_values.FindModuleDefiningFlag(
flagname, default='<unknown>')
msg = "The flag '%s' is defined twice. First from %s, Second from %s" % (
self.flagname, first_module, second_module)
DuplicateFlag.__init__(self, msg)
class IllegalFlagValue(FlagsError):
"""The flag command line argument is illegal."""
pass
class UnrecognizedFlag(FlagsError):
"""Raised if a flag is unrecognized."""
pass
# An UnrecognizedFlagError conveys more information than an UnrecognizedFlag.
# Since there are external modules that create DuplicateFlags, the interface to
# DuplicateFlag shouldn't change. The flagvalue will be assigned the full value
# of the flag and its argument, if any, allowing handling of unrecognized flags
# in an exception handler.
# If flagvalue is the empty string, then this exception is an due to a
# reference to a flag that was not already defined.
class UnrecognizedFlagError(UnrecognizedFlag):
def __init__(self, flagname, flagvalue=''):
self.flagname = flagname
self.flagvalue = flagvalue
UnrecognizedFlag.__init__(
self, "Unknown command line flag '%s'" % flagname)
# Global variable used by expvar
_exported_flags = {}
_help_width = 80 # width of help output
def GetHelpWidth():
"""Returns: an integer, the width of help lines that is used in TextWrap."""
if (not sys.stdout.isatty()) or (termios is None) or (fcntl is None):
return _help_width
try:
data = fcntl.ioctl(sys.stdout, termios.TIOCGWINSZ, '1234')
columns = struct.unpack('hh', data)[1]
# Emacs mode returns 0.
# Here we assume that any value below 40 is unreasonable
if columns >= 40:
return columns
# Returning an int as default is fine, int(int) just return the int.
return int(os.getenv('COLUMNS', _help_width))
except (TypeError, IOError, struct.error):
return _help_width
def CutCommonSpacePrefix(text):
"""Removes a common space prefix from the lines of a multiline text.
If the first line does not start with a space, it is left as it is and
only in the remaining lines a common space prefix is being searched
for. That means the first line will stay untouched. This is especially
useful to turn doc strings into help texts. This is because some
people prefer to have the doc comment start already after the
apostrophe and then align the following lines while others have the
apostrophes on a separate line.
The function also drops trailing empty lines and ignores empty lines
following the initial content line while calculating the initial
common whitespace.
Args:
text: text to work on
Returns:
the resulting text
"""
text_lines = text.splitlines()
# Drop trailing empty lines
while text_lines and not text_lines[-1]:
text_lines = text_lines[:-1]
if text_lines:
# We got some content, is the first line starting with a space?
if text_lines[0] and text_lines[0][0].isspace():
text_first_line = []
else:
text_first_line = [text_lines.pop(0)]
# Calculate length of common leading whitespace (only over content lines)
common_prefix = os.path.commonprefix([line for line in text_lines if line])
space_prefix_len = len(common_prefix) - len(common_prefix.lstrip())
# If we have a common space prefix, drop it from all lines
if space_prefix_len:
for index in xrange(len(text_lines)):
if text_lines[index]:
text_lines[index] = text_lines[index][space_prefix_len:]
return '\n'.join(text_first_line + text_lines)
return ''
def TextWrap(text, length=None, indent='', firstline_indent=None, tabs=' '):
"""Wraps a given text to a maximum line length and returns it.
We turn lines that only contain whitespace into empty lines. We keep
new lines and tabs (e.g., we do not treat tabs as spaces).
Args:
text: text to wrap
length: maximum length of a line, includes indentation
if this is None then use GetHelpWidth()
indent: indent for all but first line
firstline_indent: indent for first line; if None, fall back to indent
tabs: replacement for tabs
Returns:
wrapped text
Raises:
FlagsError: if indent not shorter than length
FlagsError: if firstline_indent not shorter than length
"""
# Get defaults where callee used None
if length is None:
length = GetHelpWidth()
if indent is None:
indent = ''
if len(indent) >= length:
raise FlagsError('Indent must be shorter than length')
# In line we will be holding the current line which is to be started
# with indent (or firstline_indent if available) and then appended
# with words.
if firstline_indent is None:
firstline_indent = ''
line = indent
else:
line = firstline_indent
if len(firstline_indent) >= length:
raise FlagsError('First line indent must be shorter than length')
# If the callee does not care about tabs we simply convert them to
# spaces If callee wanted tabs to be single space then we do that
# already here.
if not tabs or tabs == ' ':
text = text.replace('\t', ' ')
else:
tabs_are_whitespace = not tabs.strip()
line_regex = re.compile('([ ]*)(\t*)([^ \t]+)', re.MULTILINE)
# Split the text into lines and the lines with the regex above. The
# resulting lines are collected in result[]. For each split we get the
# spaces, the tabs and the next non white space (e.g. next word).
result = []
for text_line in text.splitlines():
# Store result length so we can find out whether processing the next
# line gave any new content
old_result_len = len(result)
# Process next line with line_regex. For optimization we do an rstrip().
# - process tabs (changes either line or word, see below)
# - process word (first try to squeeze on line, then wrap or force wrap)
# Spaces found on the line are ignored, they get added while wrapping as
# needed.
for spaces, current_tabs, word in line_regex.findall(text_line.rstrip()):
# If tabs weren't converted to spaces, handle them now
if current_tabs:
# If the last thing we added was a space anyway then drop
# it. But let's not get rid of the indentation.
if (((result and line != indent) or
(not result and line != firstline_indent)) and line[-1] == ' '):
line = line[:-1]
# Add the tabs, if that means adding whitespace, just add it at
# the line, the rstrip() code while shorten the line down if
# necessary
if tabs_are_whitespace:
line += tabs * len(current_tabs)
else:
# if not all tab replacement is whitespace we prepend it to the word
word = tabs * len(current_tabs) + word
# Handle the case where word cannot be squeezed onto current last line
if len(line) + len(word) > length and len(indent) + len(word) <= length:
result.append(line.rstrip())
line = indent + word
word = ''
# No space left on line or can we append a space?
if len(line) + 1 >= length:
result.append(line.rstrip())
line = indent
else:
line += ' '
# Add word and shorten it up to allowed line length. Restart next
# line with indent and repeat, or add a space if we're done (word
# finished) This deals with words that cannot fit on one line
# (e.g. indent + word longer than allowed line length).
while len(line) + len(word) >= length:
line += word
result.append(line[:length])
word = line[length:]
line = indent
# Default case, simply append the word and a space
if word:
line += word + ' '
# End of input line. If we have content we finish the line. If the
# current line is just the indent but we had content in during this
# original line then we need to add an empty line.
if (result and line != indent) or (not result and line != firstline_indent):
result.append(line.rstrip())
elif len(result) == old_result_len:
result.append('')
line = indent
return '\n'.join(result)
def DocToHelp(doc):
"""Takes a __doc__ string and reformats it as help."""
# Get rid of starting and ending white space. Using lstrip() or even
# strip() could drop more than maximum of first line and right space
# of last line.
doc = doc.strip()
# Get rid of all empty lines
whitespace_only_line = re.compile('^[ \t]+$', re.M)
doc = whitespace_only_line.sub('', doc)
# Cut out common space at line beginnings
doc = CutCommonSpacePrefix(doc)
# Just like this module's comment, comments tend to be aligned somehow.
# In other words they all start with the same amount of white space
# 1) keep double new lines
# 2) keep ws after new lines if not empty line
# 3) all other new lines shall be changed to a space
# Solution: Match new lines between non white space and replace with space.
doc = re.sub('(?<=\S)\n(?=\S)', ' ', doc, re.M)
return doc
def _GetModuleObjectAndName(globals_dict):
"""Returns the module that defines a global environment, and its name.
Args:
globals_dict: A dictionary that should correspond to an environment
providing the values of the globals.
Returns:
A pair consisting of (1) module object and (2) module name (a
string). Returns (None, None) if the module could not be
identified.
"""
# The use of .items() (instead of .iteritems()) is NOT a mistake: if
# a parallel thread imports a module while we iterate over
# .iteritems() (not nice, but possible), we get a RuntimeError ...
# Hence, we use the slightly slower but safer .items().
for name, module in sys.modules.items():
if getattr(module, '__dict__', None) is globals_dict:
if name == '__main__':
# Pick a more informative name for the main module.
name = sys.argv[0]
return (module, name)
return (None, None)
def _GetMainModule():
"""Returns: string, name of the module from which execution started."""
# First, try to use the same logic used by _GetCallingModuleObjectAndName(),
# i.e., call _GetModuleObjectAndName(). For that we first need to
# find the dictionary that the main module uses to store the
# globals.
#
# That's (normally) the same dictionary object that the deepest
# (oldest) stack frame is using for globals.
deepest_frame = sys._getframe(0)
while deepest_frame.f_back is not None:
deepest_frame = deepest_frame.f_back
globals_for_main_module = deepest_frame.f_globals
main_module_name = _GetModuleObjectAndName(globals_for_main_module)[1]
# The above strategy fails in some cases (e.g., tools that compute
# code coverage by redefining, among other things, the main module).
# If so, just use sys.argv[0]. We can probably always do this, but
# it's safest to try to use the same logic as _GetCallingModuleObjectAndName()
if main_module_name is None:
main_module_name = sys.argv[0]
return main_module_name
class FlagValues:
"""Registry of 'Flag' objects.
A 'FlagValues' can then scan command line arguments, passing flag
arguments through to the 'Flag' objects that it owns. It also
provides easy access to the flag values. Typically only one
'FlagValues' object is needed by an application: gflags.FLAGS
This class is heavily overloaded:
'Flag' objects are registered via __setitem__:
FLAGS['longname'] = x # register a new flag
The .value attribute of the registered 'Flag' objects can be accessed
as attributes of this 'FlagValues' object, through __getattr__. Both
the long and short name of the original 'Flag' objects can be used to
access its value:
FLAGS.longname # parsed flag value
FLAGS.x # parsed flag value (short name)
Command line arguments are scanned and passed to the registered 'Flag'
objects through the __call__ method. Unparsed arguments, including
argv[0] (e.g. the program name) are returned.
argv = FLAGS(sys.argv) # scan command line arguments
The original registered Flag objects can be retrieved through the use
of the dictionary-like operator, __getitem__:
x = FLAGS['longname'] # access the registered Flag object
The str() operator of a 'FlagValues' object provides help for all of
the registered 'Flag' objects.
"""
def __init__(self):
# Since everything in this class is so heavily overloaded, the only
# way of defining and using fields is to access __dict__ directly.
# Dictionary: flag name (string) -> Flag object.
self.__dict__['__flags'] = {}
# Dictionary: module name (string) -> list of Flag objects that are defined
# by that module.
self.__dict__['__flags_by_module'] = {}
# Dictionary: module id (int) -> list of Flag objects that are defined by
# that module.
self.__dict__['__flags_by_module_id'] = {}
# Dictionary: module name (string) -> list of Flag objects that are
# key for that module.
self.__dict__['__key_flags_by_module'] = {}
# Set if we should use new style gnu_getopt rather than getopt when parsing
# the args. Only possible with Python 2.3+
self.UseGnuGetOpt(False)
def UseGnuGetOpt(self, use_gnu_getopt=True):
"""Use GNU-style scanning. Allows mixing of flag and non-flag arguments.
See http://docs.python.org/library/getopt.html#getopt.gnu_getopt
Args:
use_gnu_getopt: wether or not to use GNU style scanning.
"""
self.__dict__['__use_gnu_getopt'] = use_gnu_getopt
def IsGnuGetOpt(self):
return self.__dict__['__use_gnu_getopt']
def FlagDict(self):
return self.__dict__['__flags']
def FlagsByModuleDict(self):
"""Returns the dictionary of module_name -> list of defined flags.
Returns:
A dictionary. Its keys are module names (strings). Its values
are lists of Flag objects.
"""
return self.__dict__['__flags_by_module']
def FlagsByModuleIdDict(self):
"""Returns the dictionary of module_id -> list of defined flags.
Returns:
A dictionary. Its keys are module IDs (ints). Its values
are lists of Flag objects.
"""
return self.__dict__['__flags_by_module_id']
def KeyFlagsByModuleDict(self):
"""Returns the dictionary of module_name -> list of key flags.
Returns:
A dictionary. Its keys are module names (strings). Its values
are lists of Flag objects.
"""
return self.__dict__['__key_flags_by_module']
def _RegisterFlagByModule(self, module_name, flag):
"""Records the module that defines a specific flag.
We keep track of which flag is defined by which module so that we
can later sort the flags by module.
Args:
module_name: A string, the name of a Python module.
flag: A Flag object, a flag that is key to the module.
"""
flags_by_module = self.FlagsByModuleDict()
flags_by_module.setdefault(module_name, []).append(flag)
def _RegisterFlagByModuleId(self, module_id, flag):
"""Records the module that defines a specific flag.
Args:
module_id: An int, the ID of the Python module.
flag: A Flag object, a flag that is key to the module.
"""
flags_by_module_id = self.FlagsByModuleIdDict()
flags_by_module_id.setdefault(module_id, []).append(flag)
def _RegisterKeyFlagForModule(self, module_name, flag):
"""Specifies that a flag is a key flag for a module.
Args:
module_name: A string, the name of a Python module.
flag: A Flag object, a flag that is key to the module.
"""
key_flags_by_module = self.KeyFlagsByModuleDict()
# The list of key flags for the module named module_name.
key_flags = key_flags_by_module.setdefault(module_name, [])
# Add flag, but avoid duplicates.
if flag not in key_flags:
key_flags.append(flag)
def _GetFlagsDefinedByModule(self, module):
"""Returns the list of flags defined by a module.
Args:
module: A module object or a module name (a string).
Returns:
A new list of Flag objects. Caller may update this list as he
wishes: none of those changes will affect the internals of this
FlagValue object.
"""
if not isinstance(module, str):
module = module.__name__
return list(self.FlagsByModuleDict().get(module, []))
def _GetKeyFlagsForModule(self, module):
"""Returns the list of key flags for a module.
Args:
module: A module object or a module name (a string)
Returns:
A new list of Flag objects. Caller may update this list as he
wishes: none of those changes will affect the internals of this
FlagValue object.
"""
if not isinstance(module, str):
module = module.__name__
# Any flag is a key flag for the module that defined it. NOTE:
# key_flags is a fresh list: we can update it without affecting the
# internals of this FlagValues object.
key_flags = self._GetFlagsDefinedByModule(module)
# Take into account flags explicitly declared as key for a module.
for flag in self.KeyFlagsByModuleDict().get(module, []):
if flag not in key_flags:
key_flags.append(flag)
return key_flags
def FindModuleDefiningFlag(self, flagname, default=None):
"""Return the name of the module defining this flag, or default.
Args:
flagname: Name of the flag to lookup.
default: Value to return if flagname is not defined. Defaults
to None.
Returns:
The name of the module which registered the flag with this name.
If no such module exists (i.e. no flag with this name exists),
we return default.
"""
for module, flags in self.FlagsByModuleDict().iteritems():
for flag in flags:
if flag.name == flagname or flag.short_name == flagname:
return module
return default
def FindModuleIdDefiningFlag(self, flagname, default=None):
"""Return the ID of the module defining this flag, or default.
Args:
flagname: Name of the flag to lookup.
default: Value to return if flagname is not defined. Defaults
to None.
Returns:
The ID of the module which registered the flag with this name.
If no such module exists (i.e. no flag with this name exists),
we return default.
"""
for module_id, flags in self.FlagsByModuleIdDict().iteritems():
for flag in flags:
if flag.name == flagname or flag.short_name == flagname:
return module_id
return default
def AppendFlagValues(self, flag_values):
"""Appends flags registered in another FlagValues instance.
Args:
flag_values: registry to copy from
"""
for flag_name, flag in flag_values.FlagDict().iteritems():
# Each flags with shortname appears here twice (once under its
# normal name, and again with its short name). To prevent
# problems (DuplicateFlagError) with double flag registration, we
# perform a check to make sure that the entry we're looking at is
# for its normal name.
if flag_name == flag.name:
try:
self[flag_name] = flag
except DuplicateFlagError:
raise DuplicateFlagError(flag_name, self,
other_flag_values=flag_values)
def RemoveFlagValues(self, flag_values):
"""Remove flags that were previously appended from another FlagValues.
Args:
flag_values: registry containing flags to remove.
"""
for flag_name in flag_values.FlagDict():
self.__delattr__(flag_name)
def __setitem__(self, name, flag):
"""Registers a new flag variable."""
fl = self.FlagDict()
if not isinstance(flag, Flag):
raise IllegalFlagValue(flag)
if not isinstance(name, type("")):
raise FlagsError("Flag name must be a string")
if len(name) == 0:
raise FlagsError("Flag name cannot be empty")
# If running under pychecker, duplicate keys are likely to be
# defined. Disable check for duplicate keys when pycheck'ing.
if (name in fl and not flag.allow_override and
not fl[name].allow_override and not _RUNNING_PYCHECKER):
module, module_name = _GetCallingModuleObjectAndName()
if (self.FindModuleDefiningFlag(name) == module_name and
id(module) != self.FindModuleIdDefiningFlag(name)):
# If the flag has already been defined by a module with the same name,
# but a different ID, we can stop here because it indicates that the
# module is simply being imported a subsequent time.
return
raise DuplicateFlagError(name, self)
short_name = flag.short_name
if short_name is not None:
if (short_name in fl and not flag.allow_override and
not fl[short_name].allow_override and not _RUNNING_PYCHECKER):
raise DuplicateFlagError(short_name, self)
fl[short_name] = flag
fl[name] = flag
global _exported_flags
_exported_flags[name] = flag
def __getitem__(self, name):
"""Retrieves the Flag object for the flag --name."""
return self.FlagDict()[name]
def __getattr__(self, name):
"""Retrieves the 'value' attribute of the flag --name."""
fl = self.FlagDict()
if name not in fl:
raise AttributeError(name)
return fl[name].value
def __setattr__(self, name, value):
"""Sets the 'value' attribute of the flag --name."""
fl = self.FlagDict()
fl[name].value = value
self._AssertValidators(fl[name].validators)
return value
def _AssertAllValidators(self):
all_validators = set()
for flag in self.FlagDict().itervalues():
for validator in flag.validators:
all_validators.add(validator)
self._AssertValidators(all_validators)
def _AssertValidators(self, validators):
"""Assert if all validators in the list are satisfied.
Asserts validators in the order they were created.
Args:
validators: Iterable(gflags_validators.Validator), validators to be
verified
Raises:
AttributeError: if validators work with a non-existing flag.
IllegalFlagValue: if validation fails for at least one validator
"""
for validator in sorted(
validators, key=lambda validator: validator.insertion_index):
try:
validator.Verify(self)
except gflags_validators.Error, e:
message = validator.PrintFlagsWithValues(self)
raise IllegalFlagValue('%s: %s' % (message, str(e)))
def _FlagIsRegistered(self, flag_obj):
"""Checks whether a Flag object is registered under some name.
Note: this is non trivial: in addition to its normal name, a flag
may have a short name too. In self.FlagDict(), both the normal and
the short name are mapped to the same flag object. E.g., calling
only "del FLAGS.short_name" is not unregistering the corresponding
Flag object (it is still registered under the longer name).
Args:
flag_obj: A Flag object.
Returns:
A boolean: True iff flag_obj is registered under some name.
"""
flag_dict = self.FlagDict()
# Check whether flag_obj is registered under its long name.
name = flag_obj.name
if flag_dict.get(name, None) == flag_obj:
return True
# Check whether flag_obj is registered under its short name.
short_name = flag_obj.short_name
if (short_name is not None and
flag_dict.get(short_name, None) == flag_obj):
return True
# The flag cannot be registered under any other name, so we do not
# need to do a full search through the values of self.FlagDict().
return False
def __delattr__(self, flag_name):
"""Deletes a previously-defined flag from a flag object.
This method makes sure we can delete a flag by using
del flag_values_object.<flag_name>
E.g.,
gflags.DEFINE_integer('foo', 1, 'Integer flag.')
del gflags.FLAGS.foo
Args:
flag_name: A string, the name of the flag to be deleted.
Raises:
AttributeError: When there is no registered flag named flag_name.
"""
fl = self.FlagDict()
if flag_name not in fl:
raise AttributeError(flag_name)
flag_obj = fl[flag_name]
del fl[flag_name]
if not self._FlagIsRegistered(flag_obj):
# If the Flag object indicated by flag_name is no longer
# registered (please see the docstring of _FlagIsRegistered), then
# we delete the occurrences of the flag object in all our internal
# dictionaries.
self.__RemoveFlagFromDictByModule(self.FlagsByModuleDict(), flag_obj)
self.__RemoveFlagFromDictByModule(self.FlagsByModuleIdDict(), flag_obj)
self.__RemoveFlagFromDictByModule(self.KeyFlagsByModuleDict(), flag_obj)
def __RemoveFlagFromDictByModule(self, flags_by_module_dict, flag_obj):
"""Removes a flag object from a module -> list of flags dictionary.
Args:
flags_by_module_dict: A dictionary that maps module names to lists of
flags.
flag_obj: A flag object.
"""
for unused_module, flags_in_module in flags_by_module_dict.iteritems():
# while (as opposed to if) takes care of multiple occurrences of a
# flag in the list for the same module.
while flag_obj in flags_in_module:
flags_in_module.remove(flag_obj)
def SetDefault(self, name, value):
"""Changes the default value of the named flag object."""
fl = self.FlagDict()
if name not in fl:
raise AttributeError(name)
fl[name].SetDefault(value)
self._AssertValidators(fl[name].validators)
def __contains__(self, name):
"""Returns True if name is a value (flag) in the dict."""
return name in self.FlagDict()
has_key = __contains__ # a synonym for __contains__()
def __iter__(self):
return iter(self.FlagDict())
def __call__(self, argv):
"""Parses flags from argv; stores parsed flags into this FlagValues object.
All unparsed arguments are returned. Flags are parsed using the GNU
Program Argument Syntax Conventions, using getopt:
http://www.gnu.org/software/libc/manual/html_mono/libc.html#Getopt
Args:
argv: argument list. Can be of any type that may be converted to a list.
Returns:
The list of arguments not parsed as options, including argv[0]
Raises:
FlagsError: on any parsing error
"""
# Support any sequence type that can be converted to a list
argv = list(argv)
shortopts = ""
longopts = []
fl = self.FlagDict()
# This pre parses the argv list for --flagfile=<> options.
argv = argv[:1] + self.ReadFlagsFromFiles(argv[1:], force_gnu=False)
# Correct the argv to support the google style of passing boolean
# parameters. Boolean parameters may be passed by using --mybool,
# --nomybool, --mybool=(true|false|1|0). getopt does not support
# having options that may or may not have a parameter. We replace
# instances of the short form --mybool and --nomybool with their
# full forms: --mybool=(true|false).
original_argv = list(argv) # list() makes a copy
shortest_matches = None
for name, flag in fl.items():
if not flag.boolean:
continue
if shortest_matches is None:
# Determine the smallest allowable prefix for all flag names
shortest_matches = self.ShortestUniquePrefixes(fl)
no_name = 'no' + name
prefix = shortest_matches[name]
no_prefix = shortest_matches[no_name]
# Replace all occurrences of this boolean with extended forms
for arg_idx in range(1, len(argv)):
arg = argv[arg_idx]
if arg.find('=') >= 0: continue
if arg.startswith('--'+prefix) and ('--'+name).startswith(arg):
argv[arg_idx] = ('--%s=true' % name)
elif arg.startswith('--'+no_prefix) and ('--'+no_name).startswith(arg):
argv[arg_idx] = ('--%s=false' % name)
# Loop over all of the flags, building up the lists of short options
# and long options that will be passed to getopt. Short options are
# specified as a string of letters, each letter followed by a colon
# if it takes an argument. Long options are stored in an array of
# strings. Each string ends with an '=' if it takes an argument.
for name, flag in fl.items():
longopts.append(name + "=")
if len(name) == 1: # one-letter option: allow short flag type also
shortopts += name
if not flag.boolean:
shortopts += ":"
longopts.append('undefok=')
undefok_flags = []
# In case --undefok is specified, loop to pick up unrecognized
# options one by one.
unrecognized_opts = []
args = argv[1:]
while True:
try:
if self.__dict__['__use_gnu_getopt']:
optlist, unparsed_args = getopt.gnu_getopt(args, shortopts, longopts)
else:
optlist, unparsed_args = getopt.getopt(args, shortopts, longopts)
break
except getopt.GetoptError, e:
if not e.opt or e.opt in fl:
# Not an unrecognized option, re-raise the exception as a FlagsError
raise FlagsError(e)
# Remove offender from args and try again
for arg_index in range(len(args)):
if ((args[arg_index] == '--' + e.opt) or
(args[arg_index] == '-' + e.opt) or
(args[arg_index].startswith('--' + e.opt + '='))):
unrecognized_opts.append((e.opt, args[arg_index]))
args = args[0:arg_index] + args[arg_index+1:]
break
else:
# We should have found the option, so we don't expect to get
# here. We could assert, but raising the original exception
# might work better.
raise FlagsError(e)
for name, arg in optlist:
if name == '--undefok':
flag_names = arg.split(',')
undefok_flags.extend(flag_names)
# For boolean flags, if --undefok=boolflag is specified, then we should
# also accept --noboolflag, in addition to --boolflag.
# Since we don't know the type of the undefok'd flag, this will affect
# non-boolean flags as well.
# NOTE: You shouldn't use --undefok=noboolflag, because then we will
# accept --nonoboolflag here. We are choosing not to do the conversion
# from noboolflag -> boolflag because of the ambiguity that flag names
# can start with 'no'.
undefok_flags.extend('no' + name for name in flag_names)
continue
if name.startswith('--'):
# long option
name = name[2:]
short_option = 0
else:
# short option
name = name[1:]
short_option = 1
if name in fl:
flag = fl[name]
if flag.boolean and short_option: arg = 1
flag.Parse(arg)
# If there were unrecognized options, raise an exception unless
# the options were named via --undefok.
for opt, value in unrecognized_opts:
if opt not in undefok_flags:
raise UnrecognizedFlagError(opt, value)
if unparsed_args:
if self.__dict__['__use_gnu_getopt']:
# if using gnu_getopt just return the program name + remainder of argv.
ret_val = argv[:1] + unparsed_args
else:
# unparsed_args becomes the first non-flag detected by getopt to
# the end of argv. Because argv may have been modified above,
# return original_argv for this region.
ret_val = argv[:1] + original_argv[-len(unparsed_args):]
else:
ret_val = argv[:1]
self._AssertAllValidators()
return ret_val
def Reset(self):
"""Resets the values to the point before FLAGS(argv) was called."""
for f in self.FlagDict().values():
f.Unparse()
def RegisteredFlags(self):
"""Returns: a list of the names and short names of all registered flags."""
return list(self.FlagDict())
def FlagValuesDict(self):
"""Returns: a dictionary that maps flag names to flag values."""
flag_values = {}
for flag_name in self.RegisteredFlags():
flag = self.FlagDict()[flag_name]
flag_values[flag_name] = flag.value
return flag_values
def __str__(self):
"""Generates a help string for all known flags."""
return self.GetHelp()
def GetHelp(self, prefix=''):
"""Generates a help string for all known flags."""
helplist = []
flags_by_module = self.FlagsByModuleDict()
if flags_by_module:
modules = sorted(flags_by_module)
# Print the help for the main module first, if possible.
main_module = _GetMainModule()
if main_module in modules:
modules.remove(main_module)
modules = [main_module] + modules
for module in modules:
self.__RenderOurModuleFlags(module, helplist)
self.__RenderModuleFlags('gflags',
_SPECIAL_FLAGS.FlagDict().values(),
helplist)
else:
# Just print one long list of flags.
self.__RenderFlagList(
self.FlagDict().values() + _SPECIAL_FLAGS.FlagDict().values(),
helplist, prefix)
return '\n'.join(helplist)
def __RenderModuleFlags(self, module, flags, output_lines, prefix=""):
"""Generates a help string for a given module."""
if not isinstance(module, str):
module = module.__name__
output_lines.append('\n%s%s:' % (prefix, module))
self.__RenderFlagList(flags, output_lines, prefix + " ")
def __RenderOurModuleFlags(self, module, output_lines, prefix=""):
"""Generates a help string for a given module."""
flags = self._GetFlagsDefinedByModule(module)
if flags:
self.__RenderModuleFlags(module, flags, output_lines, prefix)
def __RenderOurModuleKeyFlags(self, module, output_lines, prefix=""):
"""Generates a help string for the key flags of a given module.
Args:
module: A module object or a module name (a string).
output_lines: A list of strings. The generated help message
lines will be appended to this list.
prefix: A string that is prepended to each generated help line.
"""
key_flags = self._GetKeyFlagsForModule(module)
if key_flags:
self.__RenderModuleFlags(module, key_flags, output_lines, prefix)
def ModuleHelp(self, module):
"""Describe the key flags of a module.
Args:
module: A module object or a module name (a string).
Returns:
string describing the key flags of a module.
"""
helplist = []
self.__RenderOurModuleKeyFlags(module, helplist)
return '\n'.join(helplist)
def MainModuleHelp(self):
"""Describe the key flags of the main module.
Returns:
string describing the key flags of a module.
"""
return self.ModuleHelp(_GetMainModule())
def __RenderFlagList(self, flaglist, output_lines, prefix=" "):
fl = self.FlagDict()
special_fl = _SPECIAL_FLAGS.FlagDict()
flaglist = [(flag.name, flag) for flag in flaglist]
flaglist.sort()
flagset = {}
for (name, flag) in flaglist:
# It's possible this flag got deleted or overridden since being
# registered in the per-module flaglist. Check now against the
# canonical source of current flag information, the FlagDict.
if fl.get(name, None) != flag and special_fl.get(name, None) != flag:
# a different flag is using this name now
continue
# only print help once
if flag in flagset: continue
flagset[flag] = 1
flaghelp = ""
if flag.short_name: flaghelp += "-%s," % flag.short_name
if flag.boolean:
flaghelp += "--[no]%s" % flag.name + ":"
else:
flaghelp += "--%s" % flag.name + ":"
flaghelp += " "
if flag.help:
flaghelp += flag.help
flaghelp = TextWrap(flaghelp, indent=prefix+" ",
firstline_indent=prefix)
if flag.default_as_str:
flaghelp += "\n"
flaghelp += TextWrap("(default: %s)" % flag.default_as_str,
indent=prefix+" ")
if flag.parser.syntactic_help:
flaghelp += "\n"
flaghelp += TextWrap("(%s)" % flag.parser.syntactic_help,
indent=prefix+" ")
output_lines.append(flaghelp)
def get(self, name, default):
"""Returns the value of a flag (if not None) or a default value.
Args:
name: A string, the name of a flag.
default: Default value to use if the flag value is None.
"""
value = self.__getattr__(name)
if value is not None: # Can't do if not value, b/c value might be '0' or ""
return value
else:
return default
def ShortestUniquePrefixes(self, fl):
"""Returns: dictionary; maps flag names to their shortest unique prefix."""
# Sort the list of flag names
sorted_flags = []
for name, flag in fl.items():
sorted_flags.append(name)
if flag.boolean:
sorted_flags.append('no%s' % name)
sorted_flags.sort()
# For each name in the sorted list, determine the shortest unique
# prefix by comparing itself to the next name and to the previous
# name (the latter check uses cached info from the previous loop).
shortest_matches = {}
prev_idx = 0
for flag_idx in range(len(sorted_flags)):
curr = sorted_flags[flag_idx]
if flag_idx == (len(sorted_flags) - 1):
next = None
else:
next = sorted_flags[flag_idx+1]
next_len = len(next)
for curr_idx in range(len(curr)):
if (next is None
or curr_idx >= next_len
or curr[curr_idx] != next[curr_idx]):
# curr longer than next or no more chars in common
shortest_matches[curr] = curr[:max(prev_idx, curr_idx) + 1]
prev_idx = curr_idx
break
else:
# curr shorter than (or equal to) next
shortest_matches[curr] = curr
prev_idx = curr_idx + 1 # next will need at least one more char
return shortest_matches
def __IsFlagFileDirective(self, flag_string):
"""Checks whether flag_string contain a --flagfile=<foo> directive."""
if isinstance(flag_string, type("")):
if flag_string.startswith('--flagfile='):
return 1
elif flag_string == '--flagfile':
return 1
elif flag_string.startswith('-flagfile='):
return 1
elif flag_string == '-flagfile':
return 1
else:
return 0
return 0
def ExtractFilename(self, flagfile_str):
"""Returns filename from a flagfile_str of form -[-]flagfile=filename.
The cases of --flagfile foo and -flagfile foo shouldn't be hitting
this function, as they are dealt with in the level above this
function.
"""
if flagfile_str.startswith('--flagfile='):
return os.path.expanduser((flagfile_str[(len('--flagfile=')):]).strip())
elif flagfile_str.startswith('-flagfile='):
return os.path.expanduser((flagfile_str[(len('-flagfile=')):]).strip())
else:
raise FlagsError('Hit illegal --flagfile type: %s' % flagfile_str)
def __GetFlagFileLines(self, filename, parsed_file_list):
"""Returns the useful (!=comments, etc) lines from a file with flags.
Args:
filename: A string, the name of the flag file.
parsed_file_list: A list of the names of the files we have
already read. MUTATED BY THIS FUNCTION.
Returns:
List of strings. See the note below.
NOTE(springer): This function checks for a nested --flagfile=<foo>
tag and handles the lower file recursively. It returns a list of
all the lines that _could_ contain command flags. This is
EVERYTHING except whitespace lines and comments (lines starting
with '#' or '//').
"""
line_list = [] # All line from flagfile.
flag_line_list = [] # Subset of lines w/o comments, blanks, flagfile= tags.
try:
file_obj = open(filename, 'r')
except IOError, e_msg:
raise CantOpenFlagFileError('ERROR:: Unable to open flagfile: %s' % e_msg)
line_list = file_obj.readlines()
file_obj.close()
parsed_file_list.append(filename)
# This is where we check each line in the file we just read.
for line in line_list:
if line.isspace():
pass
# Checks for comment (a line that starts with '#').
elif line.startswith('#') or line.startswith('//'):
pass
# Checks for a nested "--flagfile=<bar>" flag in the current file.
# If we find one, recursively parse down into that file.
elif self.__IsFlagFileDirective(line):
sub_filename = self.ExtractFilename(line)
# We do a little safety check for reparsing a file we've already done.
if not sub_filename in parsed_file_list:
included_flags = self.__GetFlagFileLines(sub_filename,
parsed_file_list)
flag_line_list.extend(included_flags)
else: # Case of hitting a circularly included file.
sys.stderr.write('Warning: Hit circular flagfile dependency: %s\n' %
(sub_filename,))
else:
# Any line that's not a comment or a nested flagfile should get
# copied into 2nd position. This leaves earlier arguments
# further back in the list, thus giving them higher priority.
flag_line_list.append(line.strip())
return flag_line_list
def ReadFlagsFromFiles(self, argv, force_gnu=True):
"""Processes command line args, but also allow args to be read from file.
Args:
argv: A list of strings, usually sys.argv[1:], which may contain one or
more flagfile directives of the form --flagfile="./filename".
Note that the name of the program (sys.argv[0]) should be omitted.
force_gnu: If False, --flagfile parsing obeys normal flag semantics.
If True, --flagfile parsing instead follows gnu_getopt semantics.
*** WARNING *** force_gnu=False may become the future default!
Returns:
A new list which has the original list combined with what we read
from any flagfile(s).
References: Global gflags.FLAG class instance.
This function should be called before the normal FLAGS(argv) call.
This function scans the input list for a flag that looks like:
--flagfile=<somefile>. Then it opens <somefile>, reads all valid key
and value pairs and inserts them into the input list between the
first item of the list and any subsequent items in the list.
Note that your application's flags are still defined the usual way
using gflags DEFINE_flag() type functions.
Notes (assuming we're getting a commandline of some sort as our input):
--> Flags from the command line argv _should_ always take precedence!
--> A further "--flagfile=<otherfile.cfg>" CAN be nested in a flagfile.
It will be processed after the parent flag file is done.
--> For duplicate flags, first one we hit should "win".
--> In a flagfile, a line beginning with # or // is a comment.
--> Entirely blank lines _should_ be ignored.
"""
parsed_file_list = []
rest_of_args = argv
new_argv = []
while rest_of_args:
current_arg = rest_of_args[0]
rest_of_args = rest_of_args[1:]
if self.__IsFlagFileDirective(current_arg):
# This handles the case of -(-)flagfile foo. In this case the
# next arg really is part of this one.
if current_arg == '--flagfile' or current_arg == '-flagfile':
if not rest_of_args:
raise IllegalFlagValue('--flagfile with no argument')
flag_filename = os.path.expanduser(rest_of_args[0])
rest_of_args = rest_of_args[1:]
else:
# This handles the case of (-)-flagfile=foo.
flag_filename = self.ExtractFilename(current_arg)
new_argv.extend(
self.__GetFlagFileLines(flag_filename, parsed_file_list))
else:
new_argv.append(current_arg)
# Stop parsing after '--', like getopt and gnu_getopt.
if current_arg == '--':
break
# Stop parsing after a non-flag, like getopt.
if not current_arg.startswith('-'):
if not force_gnu and not self.__dict__['__use_gnu_getopt']:
break
if rest_of_args:
new_argv.extend(rest_of_args)
return new_argv
def FlagsIntoString(self):
"""Returns a string with the flags assignments from this FlagValues object.
This function ignores flags whose value is None. Each flag
assignment is separated by a newline.
NOTE: MUST mirror the behavior of the C++ CommandlineFlagsIntoString
from http://code.google.com/p/google-gflags
"""
s = ''
for flag in self.FlagDict().values():
if flag.value is not None:
s += flag.Serialize() + '\n'
return s
def AppendFlagsIntoFile(self, filename):
"""Appends all flags assignments from this FlagInfo object to a file.
Output will be in the format of a flagfile.
NOTE: MUST mirror the behavior of the C++ AppendFlagsIntoFile
from http://code.google.com/p/google-gflags
"""
out_file = open(filename, 'a')
out_file.write(self.FlagsIntoString())
out_file.close()
def WriteHelpInXMLFormat(self, outfile=None):
"""Outputs flag documentation in XML format.
NOTE: We use element names that are consistent with those used by
the C++ command-line flag library, from
http://code.google.com/p/google-gflags
We also use a few new elements (e.g., <key>), but we do not
interfere / overlap with existing XML elements used by the C++
library. Please maintain this consistency.
Args:
outfile: File object we write to. Default None means sys.stdout.
"""
outfile = outfile or sys.stdout
outfile.write('<?xml version=\"1.0\"?>\n')
outfile.write('<AllFlags>\n')
indent = ' '
_WriteSimpleXMLElement(outfile, 'program', os.path.basename(sys.argv[0]),
indent)
usage_doc = sys.modules['__main__'].__doc__
if not usage_doc:
usage_doc = '\nUSAGE: %s [flags]\n' % sys.argv[0]
else:
usage_doc = usage_doc.replace('%s', sys.argv[0])
_WriteSimpleXMLElement(outfile, 'usage', usage_doc, indent)
# Get list of key flags for the main module.
key_flags = self._GetKeyFlagsForModule(_GetMainModule())
# Sort flags by declaring module name and next by flag name.
flags_by_module = self.FlagsByModuleDict()
all_module_names = list(flags_by_module.keys())
all_module_names.sort()
for module_name in all_module_names:
flag_list = [(f.name, f) for f in flags_by_module[module_name]]
flag_list.sort()
for unused_flag_name, flag in flag_list:
is_key = flag in key_flags
flag.WriteInfoInXMLFormat(outfile, module_name,
is_key=is_key, indent=indent)
outfile.write('</AllFlags>\n')
outfile.flush()
def AddValidator(self, validator):
"""Register new flags validator to be checked.
Args:
validator: gflags_validators.Validator
Raises:
AttributeError: if validators work with a non-existing flag.
"""
for flag_name in validator.GetFlagsNames():
flag = self.FlagDict()[flag_name]
flag.validators.append(validator)
# end of FlagValues definition
# The global FlagValues instance
FLAGS = FlagValues()
def _StrOrUnicode(value):
"""Converts value to a python string or, if necessary, unicode-string."""
try:
return str(value)
except UnicodeEncodeError:
return unicode(value)
def _MakeXMLSafe(s):
"""Escapes <, >, and & from s, and removes XML 1.0-illegal chars."""
s = cgi.escape(s) # Escape <, >, and &
# Remove characters that cannot appear in an XML 1.0 document
# (http://www.w3.org/TR/REC-xml/#charsets).
#
# NOTE: if there are problems with current solution, one may move to
# XML 1.1, which allows such chars, if they're entity-escaped (&#xHH;).
s = re.sub(r'[\x00-\x08\x0b\x0c\x0e-\x1f]', '', s)
# Convert non-ascii characters to entities. Note: requires python >=2.3
s = s.encode('ascii', 'xmlcharrefreplace') # u'\xce\x88' -> 'uΈ'
return s
def _WriteSimpleXMLElement(outfile, name, value, indent):
"""Writes a simple XML element.
Args:
outfile: File object we write the XML element to.
name: A string, the name of XML element.
value: A Python object, whose string representation will be used
as the value of the XML element.
indent: A string, prepended to each line of generated output.
"""
value_str = _StrOrUnicode(value)
if isinstance(value, bool):
# Display boolean values as the C++ flag library does: no caps.
value_str = value_str.lower()
safe_value_str = _MakeXMLSafe(value_str)
outfile.write('%s<%s>%s</%s>\n' % (indent, name, safe_value_str, name))
class Flag:
"""Information about a command-line flag.
'Flag' objects define the following fields:
.name - the name for this flag
.default - the default value for this flag
.default_as_str - default value as repr'd string, e.g., "'true'" (or None)
.value - the most recent parsed value of this flag; set by Parse()
.help - a help string or None if no help is available
.short_name - the single letter alias for this flag (or None)
.boolean - if 'true', this flag does not accept arguments
.present - true if this flag was parsed from command line flags.
.parser - an ArgumentParser object
.serializer - an ArgumentSerializer object
.allow_override - the flag may be redefined without raising an error
The only public method of a 'Flag' object is Parse(), but it is
typically only called by a 'FlagValues' object. The Parse() method is
a thin wrapper around the 'ArgumentParser' Parse() method. The parsed
value is saved in .value, and the .present attribute is updated. If
this flag was already present, a FlagsError is raised.
Parse() is also called during __init__ to parse the default value and
initialize the .value attribute. This enables other python modules to
safely use flags even if the __main__ module neglects to parse the
command line arguments. The .present attribute is cleared after
__init__ parsing. If the default value is set to None, then the
__init__ parsing step is skipped and the .value attribute is
initialized to None.
Note: The default value is also presented to the user in the help
string, so it is important that it be a legal value for this flag.
"""
def __init__(self, parser, serializer, name, default, help_string,
short_name=None, boolean=0, allow_override=0):
self.name = name
if not help_string:
help_string = '(no help available)'
self.help = help_string
self.short_name = short_name
self.boolean = boolean
self.present = 0
self.parser = parser
self.serializer = serializer
self.allow_override = allow_override
self.value = None
self.validators = []
self.SetDefault(default)
def __hash__(self):
return hash(id(self))
def __eq__(self, other):
return self is other
def __lt__(self, other):
if isinstance(other, Flag):
return id(self) < id(other)
return NotImplemented
def __GetParsedValueAsString(self, value):
if value is None:
return None
if self.serializer:
return repr(self.serializer.Serialize(value))
if self.boolean:
if value:
return repr('true')
else:
return repr('false')
return repr(_StrOrUnicode(value))
def Parse(self, argument):
try:
self.value = self.parser.Parse(argument)
except ValueError, e: # recast ValueError as IllegalFlagValue
raise IllegalFlagValue("flag --%s=%s: %s" % (self.name, argument, e))
self.present += 1
def Unparse(self):
if self.default is None:
self.value = None
else:
self.Parse(self.default)
self.present = 0
def Serialize(self):
if self.value is None:
return ''
if self.boolean:
if self.value:
return "--%s" % self.name
else:
return "--no%s" % self.name
else:
if not self.serializer:
raise FlagsError("Serializer not present for flag %s" % self.name)
return "--%s=%s" % (self.name, self.serializer.Serialize(self.value))
def SetDefault(self, value):
"""Changes the default value (and current value too) for this Flag."""
# We can't allow a None override because it may end up not being
# passed to C++ code when we're overriding C++ flags. So we
# cowardly bail out until someone fixes the semantics of trying to
# pass None to a C++ flag. See swig_flags.Init() for details on
# this behavior.
# TODO(olexiy): Users can directly call this method, bypassing all flags
# validators (we don't have FlagValues here, so we can not check
# validators).
# The simplest solution I see is to make this method private.
# Another approach would be to store reference to the corresponding
# FlagValues with each flag, but this seems to be an overkill.
if value is None and self.allow_override:
raise DuplicateFlagCannotPropagateNoneToSwig(self.name)
self.default = value
self.Unparse()
self.default_as_str = self.__GetParsedValueAsString(self.value)
def Type(self):
"""Returns: a string that describes the type of this Flag."""
# NOTE: we use strings, and not the types.*Type constants because
# our flags can have more exotic types, e.g., 'comma separated list
# of strings', 'whitespace separated list of strings', etc.
return self.parser.Type()
def WriteInfoInXMLFormat(self, outfile, module_name, is_key=False, indent=''):
"""Writes common info about this flag, in XML format.
This is information that is relevant to all flags (e.g., name,
meaning, etc.). If you defined a flag that has some other pieces of
info, then please override _WriteCustomInfoInXMLFormat.
Please do NOT override this method.
Args:
outfile: File object we write to.
module_name: A string, the name of the module that defines this flag.
is_key: A boolean, True iff this flag is key for main module.
indent: A string that is prepended to each generated line.
"""
outfile.write(indent + '<flag>\n')
inner_indent = indent + ' '
if is_key:
_WriteSimpleXMLElement(outfile, 'key', 'yes', inner_indent)
_WriteSimpleXMLElement(outfile, 'file', module_name, inner_indent)
# Print flag features that are relevant for all flags.
_WriteSimpleXMLElement(outfile, 'name', self.name, inner_indent)
if self.short_name:
_WriteSimpleXMLElement(outfile, 'short_name', self.short_name,
inner_indent)
if self.help:
_WriteSimpleXMLElement(outfile, 'meaning', self.help, inner_indent)
# The default flag value can either be represented as a string like on the
# command line, or as a Python object. We serialize this value in the
# latter case in order to remain consistent.
if self.serializer and not isinstance(self.default, str):
default_serialized = self.serializer.Serialize(self.default)
else:
default_serialized = self.default
_WriteSimpleXMLElement(outfile, 'default', default_serialized, inner_indent)
_WriteSimpleXMLElement(outfile, 'current', self.value, inner_indent)
_WriteSimpleXMLElement(outfile, 'type', self.Type(), inner_indent)
# Print extra flag features this flag may have.
self._WriteCustomInfoInXMLFormat(outfile, inner_indent)
outfile.write(indent + '</flag>\n')
def _WriteCustomInfoInXMLFormat(self, outfile, indent):
"""Writes extra info about this flag, in XML format.
"Extra" means "not already printed by WriteInfoInXMLFormat above."
Args:
outfile: File object we write to.
indent: A string that is prepended to each generated line.
"""
# Usually, the parser knows the extra details about the flag, so
# we just forward the call to it.
self.parser.WriteCustomInfoInXMLFormat(outfile, indent)
# End of Flag definition
class _ArgumentParserCache(type):
"""Metaclass used to cache and share argument parsers among flags."""
_instances = {}
def __call__(mcs, *args, **kwargs):
"""Returns an instance of the argument parser cls.
This method overrides behavior of the __new__ methods in
all subclasses of ArgumentParser (inclusive). If an instance
for mcs with the same set of arguments exists, this instance is
returned, otherwise a new instance is created.
If any keyword arguments are defined, or the values in args
are not hashable, this method always returns a new instance of
cls.
Args:
args: Positional initializer arguments.
kwargs: Initializer keyword arguments.
Returns:
An instance of cls, shared or new.
"""
if kwargs:
return type.__call__(mcs, *args, **kwargs)
else:
instances = mcs._instances
key = (mcs,) + tuple(args)
try:
return instances[key]
except KeyError:
# No cache entry for key exists, create a new one.
return instances.setdefault(key, type.__call__(mcs, *args))
except TypeError:
# An object in args cannot be hashed, always return
# a new instance.
return type.__call__(mcs, *args)
class ArgumentParser(object):
"""Base class used to parse and convert arguments.
The Parse() method checks to make sure that the string argument is a
legal value and convert it to a native type. If the value cannot be
converted, it should throw a 'ValueError' exception with a human
readable explanation of why the value is illegal.
Subclasses should also define a syntactic_help string which may be
presented to the user to describe the form of the legal values.
Argument parser classes must be stateless, since instances are cached
and shared between flags. Initializer arguments are allowed, but all
member variables must be derived from initializer arguments only.
"""
__metaclass__ = _ArgumentParserCache
syntactic_help = ""
def Parse(self, argument):
"""Default implementation: always returns its argument unmodified."""
return argument
def Type(self):
return 'string'
def WriteCustomInfoInXMLFormat(self, outfile, indent):
pass
class ArgumentSerializer:
"""Base class for generating string representations of a flag value."""
def Serialize(self, value):
return _StrOrUnicode(value)
class ListSerializer(ArgumentSerializer):
def __init__(self, list_sep):
self.list_sep = list_sep
def Serialize(self, value):
return self.list_sep.join([_StrOrUnicode(x) for x in value])
# Flags validators
def RegisterValidator(flag_name,
checker,
message='Flag validation failed',
flag_values=FLAGS):
"""Adds a constraint, which will be enforced during program execution.
The constraint is validated when flags are initially parsed, and after each
change of the corresponding flag's value.
Args:
flag_name: string, name of the flag to be checked.
checker: method to validate the flag.
input - value of the corresponding flag (string, boolean, etc.
This value will be passed to checker by the library). See file's
docstring for examples.
output - Boolean.
Must return True if validator constraint is satisfied.
If constraint is not satisfied, it should either return False or
raise gflags_validators.Error(desired_error_message).
message: error text to be shown to the user if checker returns False.
If checker raises gflags_validators.Error, message from the raised
Error will be shown.
flag_values: FlagValues
Raises:
AttributeError: if flag_name is not registered as a valid flag name.
"""
flag_values.AddValidator(gflags_validators.SimpleValidator(flag_name,
checker,
message))
def MarkFlagAsRequired(flag_name, flag_values=FLAGS):
"""Ensure that flag is not None during program execution.
Registers a flag validator, which will follow usual validator
rules.
Args:
flag_name: string, name of the flag
flag_values: FlagValues
Raises:
AttributeError: if flag_name is not registered as a valid flag name.
"""
RegisterValidator(flag_name,
lambda value: value is not None,
message='Flag --%s must be specified.' % flag_name,
flag_values=flag_values)
def _RegisterBoundsValidatorIfNeeded(parser, name, flag_values):
"""Enforce lower and upper bounds for numeric flags.
Args:
parser: NumericParser (either FloatParser or IntegerParser). Provides lower
and upper bounds, and help text to display.
name: string, name of the flag
flag_values: FlagValues
"""
if parser.lower_bound is not None or parser.upper_bound is not None:
def Checker(value):
if value is not None and parser.IsOutsideBounds(value):
message = '%s is not %s' % (value, parser.syntactic_help)
raise gflags_validators.Error(message)
return True
RegisterValidator(name,
Checker,
flag_values=flag_values)
# The DEFINE functions are explained in mode details in the module doc string.
def DEFINE(parser, name, default, help, flag_values=FLAGS, serializer=None,
**args):
"""Registers a generic Flag object.
NOTE: in the docstrings of all DEFINE* functions, "registers" is short
for "creates a new flag and registers it".
Auxiliary function: clients should use the specialized DEFINE_<type>
function instead.
Args:
parser: ArgumentParser that is used to parse the flag arguments.
name: A string, the flag name.
default: The default value of the flag.
help: A help string.
flag_values: FlagValues object the flag will be registered with.
serializer: ArgumentSerializer that serializes the flag value.
args: Dictionary with extra keyword args that are passes to the
Flag __init__.
"""
DEFINE_flag(Flag(parser, serializer, name, default, help, **args),
flag_values)
def DEFINE_flag(flag, flag_values=FLAGS):
"""Registers a 'Flag' object with a 'FlagValues' object.
By default, the global FLAGS 'FlagValue' object is used.
Typical users will use one of the more specialized DEFINE_xxx
functions, such as DEFINE_string or DEFINE_integer. But developers
who need to create Flag objects themselves should use this function
to register their flags.
"""
# copying the reference to flag_values prevents pychecker warnings
fv = flag_values
fv[flag.name] = flag
# Tell flag_values who's defining the flag.
if isinstance(flag_values, FlagValues):
# Regarding the above isinstance test: some users pass funny
# values of flag_values (e.g., {}) in order to avoid the flag
# registration (in the past, there used to be a flag_values ==
# FLAGS test here) and redefine flags with the same name (e.g.,
# debug). To avoid breaking their code, we perform the
# registration only if flag_values is a real FlagValues object.
module, module_name = _GetCallingModuleObjectAndName()
flag_values._RegisterFlagByModule(module_name, flag)
flag_values._RegisterFlagByModuleId(id(module), flag)
def _InternalDeclareKeyFlags(flag_names,
flag_values=FLAGS, key_flag_values=None):
"""Declares a flag as key for the calling module.
Internal function. User code should call DECLARE_key_flag or
ADOPT_module_key_flags instead.
Args:
flag_names: A list of strings that are names of already-registered
Flag objects.
flag_values: A FlagValues object that the flags listed in
flag_names have registered with (the value of the flag_values
argument from the DEFINE_* calls that defined those flags).
This should almost never need to be overridden.
key_flag_values: A FlagValues object that (among possibly many
other things) keeps track of the key flags for each module.
Default None means "same as flag_values". This should almost
never need to be overridden.
Raises:
UnrecognizedFlagError: when we refer to a flag that was not
defined yet.
"""
key_flag_values = key_flag_values or flag_values
module = _GetCallingModule()
for flag_name in flag_names:
if flag_name not in flag_values:
raise UnrecognizedFlagError(flag_name)
flag = flag_values.FlagDict()[flag_name]
key_flag_values._RegisterKeyFlagForModule(module, flag)
def DECLARE_key_flag(flag_name, flag_values=FLAGS):
"""Declares one flag as key to the current module.
Key flags are flags that are deemed really important for a module.
They are important when listing help messages; e.g., if the
--helpshort command-line flag is used, then only the key flags of the
main module are listed (instead of all flags, as in the case of
--help).
Sample usage:
gflags.DECLARED_key_flag('flag_1')
Args:
flag_name: A string, the name of an already declared flag.
(Redeclaring flags as key, including flags implicitly key
because they were declared in this module, is a no-op.)
flag_values: A FlagValues object. This should almost never
need to be overridden.
"""
if flag_name in _SPECIAL_FLAGS:
# Take care of the special flags, e.g., --flagfile, --undefok.
# These flags are defined in _SPECIAL_FLAGS, and are treated
# specially during flag parsing, taking precedence over the
# user-defined flags.
_InternalDeclareKeyFlags([flag_name],
flag_values=_SPECIAL_FLAGS,
key_flag_values=flag_values)
return
_InternalDeclareKeyFlags([flag_name], flag_values=flag_values)
def ADOPT_module_key_flags(module, flag_values=FLAGS):
"""Declares that all flags key to a module are key to the current module.
Args:
module: A module object.
flag_values: A FlagValues object. This should almost never need
to be overridden.
Raises:
FlagsError: When given an argument that is a module name (a
string), instead of a module object.
"""
# NOTE(salcianu): an even better test would be if not
# isinstance(module, types.ModuleType) but I didn't want to import
# types for such a tiny use.
if isinstance(module, str):
raise FlagsError('Received module name %s; expected a module object.'
% module)
_InternalDeclareKeyFlags(
[f.name for f in flag_values._GetKeyFlagsForModule(module.__name__)],
flag_values=flag_values)
# If module is this flag module, take _SPECIAL_FLAGS into account.
if module == _GetThisModuleObjectAndName()[0]:
_InternalDeclareKeyFlags(
# As we associate flags with _GetCallingModuleObjectAndName(), the
# special flags defined in this module are incorrectly registered with
# a different module. So, we can't use _GetKeyFlagsForModule.
# Instead, we take all flags from _SPECIAL_FLAGS (a private
# FlagValues, where no other module should register flags).
[f.name for f in _SPECIAL_FLAGS.FlagDict().values()],
flag_values=_SPECIAL_FLAGS,
key_flag_values=flag_values)
#
# STRING FLAGS
#
def DEFINE_string(name, default, help, flag_values=FLAGS, **args):
"""Registers a flag whose value can be any string."""
parser = ArgumentParser()
serializer = ArgumentSerializer()
DEFINE(parser, name, default, help, flag_values, serializer, **args)
#
# BOOLEAN FLAGS
#
class BooleanParser(ArgumentParser):
"""Parser of boolean values."""
def Convert(self, argument):
"""Converts the argument to a boolean; raise ValueError on errors."""
if type(argument) == str:
if argument.lower() in ['true', 't', '1']:
return True
elif argument.lower() in ['false', 'f', '0']:
return False
bool_argument = bool(argument)
if argument == bool_argument:
# The argument is a valid boolean (True, False, 0, or 1), and not just
# something that always converts to bool (list, string, int, etc.).
return bool_argument
raise ValueError('Non-boolean argument to boolean flag', argument)
def Parse(self, argument):
val = self.Convert(argument)
return val
def Type(self):
return 'bool'
class BooleanFlag(Flag):
"""Basic boolean flag.
Boolean flags do not take any arguments, and their value is either
True (1) or False (0). The false value is specified on the command
line by prepending the word 'no' to either the long or the short flag
name.
For example, if a Boolean flag was created whose long name was
'update' and whose short name was 'x', then this flag could be
explicitly unset through either --noupdate or --nox.
"""
def __init__(self, name, default, help, short_name=None, **args):
p = BooleanParser()
Flag.__init__(self, p, None, name, default, help, short_name, 1, **args)
if not self.help: self.help = "a boolean value"
def DEFINE_boolean(name, default, help, flag_values=FLAGS, **args):
"""Registers a boolean flag.
Such a boolean flag does not take an argument. If a user wants to
specify a false value explicitly, the long option beginning with 'no'
must be used: i.e. --noflag
This flag will have a value of None, True or False. None is possible
if default=None and the user does not specify the flag on the command
line.
"""
DEFINE_flag(BooleanFlag(name, default, help, **args), flag_values)
# Match C++ API to unconfuse C++ people.
DEFINE_bool = DEFINE_boolean
class HelpFlag(BooleanFlag):
"""
HelpFlag is a special boolean flag that prints usage information and
raises a SystemExit exception if it is ever found in the command
line arguments. Note this is called with allow_override=1, so other
apps can define their own --help flag, replacing this one, if they want.
"""
def __init__(self):
BooleanFlag.__init__(self, "help", 0, "show this help",
short_name="?", allow_override=1)
def Parse(self, arg):
if arg:
doc = sys.modules["__main__"].__doc__
flags = str(FLAGS)
print doc or ("\nUSAGE: %s [flags]\n" % sys.argv[0])
if flags:
print "flags:"
print flags
sys.exit(1)
class HelpXMLFlag(BooleanFlag):
"""Similar to HelpFlag, but generates output in XML format."""
def __init__(self):
BooleanFlag.__init__(self, 'helpxml', False,
'like --help, but generates XML output',
allow_override=1)
def Parse(self, arg):
if arg:
FLAGS.WriteHelpInXMLFormat(sys.stdout)
sys.exit(1)
class HelpshortFlag(BooleanFlag):
"""
HelpshortFlag is a special boolean flag that prints usage
information for the "main" module, and rasies a SystemExit exception
if it is ever found in the command line arguments. Note this is
called with allow_override=1, so other apps can define their own
--helpshort flag, replacing this one, if they want.
"""
def __init__(self):
BooleanFlag.__init__(self, "helpshort", 0,
"show usage only for this module", allow_override=1)
def Parse(self, arg):
if arg:
doc = sys.modules["__main__"].__doc__
flags = FLAGS.MainModuleHelp()
print doc or ("\nUSAGE: %s [flags]\n" % sys.argv[0])
if flags:
print "flags:"
print flags
sys.exit(1)
#
# Numeric parser - base class for Integer and Float parsers
#
class NumericParser(ArgumentParser):
"""Parser of numeric values.
Parsed value may be bounded to a given upper and lower bound.
"""
def IsOutsideBounds(self, val):
return ((self.lower_bound is not None and val < self.lower_bound) or
(self.upper_bound is not None and val > self.upper_bound))
def Parse(self, argument):
val = self.Convert(argument)
if self.IsOutsideBounds(val):
raise ValueError("%s is not %s" % (val, self.syntactic_help))
return val
def WriteCustomInfoInXMLFormat(self, outfile, indent):
if self.lower_bound is not None:
_WriteSimpleXMLElement(outfile, 'lower_bound', self.lower_bound, indent)
if self.upper_bound is not None:
_WriteSimpleXMLElement(outfile, 'upper_bound', self.upper_bound, indent)
def Convert(self, argument):
"""Default implementation: always returns its argument unmodified."""
return argument
# End of Numeric Parser
#
# FLOAT FLAGS
#
class FloatParser(NumericParser):
"""Parser of floating point values.
Parsed value may be bounded to a given upper and lower bound.
"""
number_article = "a"
number_name = "number"
syntactic_help = " ".join((number_article, number_name))
def __init__(self, lower_bound=None, upper_bound=None):
super(FloatParser, self).__init__()
self.lower_bound = lower_bound
self.upper_bound = upper_bound
sh = self.syntactic_help
if lower_bound is not None and upper_bound is not None:
sh = ("%s in the range [%s, %s]" % (sh, lower_bound, upper_bound))
elif lower_bound == 0:
sh = "a non-negative %s" % self.number_name
elif upper_bound == 0:
sh = "a non-positive %s" % self.number_name
elif upper_bound is not None:
sh = "%s <= %s" % (self.number_name, upper_bound)
elif lower_bound is not None:
sh = "%s >= %s" % (self.number_name, lower_bound)
self.syntactic_help = sh
def Convert(self, argument):
"""Converts argument to a float; raises ValueError on errors."""
return float(argument)
def Type(self):
return 'float'
# End of FloatParser
def DEFINE_float(name, default, help, lower_bound=None, upper_bound=None,
flag_values=FLAGS, **args):
"""Registers a flag whose value must be a float.
If lower_bound or upper_bound are set, then this flag must be
within the given range.
"""
parser = FloatParser(lower_bound, upper_bound)
serializer = ArgumentSerializer()
DEFINE(parser, name, default, help, flag_values, serializer, **args)
_RegisterBoundsValidatorIfNeeded(parser, name, flag_values=flag_values)
#
# INTEGER FLAGS
#
class IntegerParser(NumericParser):
"""Parser of an integer value.
Parsed value may be bounded to a given upper and lower bound.
"""
number_article = "an"
number_name = "integer"
syntactic_help = " ".join((number_article, number_name))
def __init__(self, lower_bound=None, upper_bound=None):
super(IntegerParser, self).__init__()
self.lower_bound = lower_bound
self.upper_bound = upper_bound
sh = self.syntactic_help
if lower_bound is not None and upper_bound is not None:
sh = ("%s in the range [%s, %s]" % (sh, lower_bound, upper_bound))
elif lower_bound == 1:
sh = "a positive %s" % self.number_name
elif upper_bound == -1:
sh = "a negative %s" % self.number_name
elif lower_bound == 0:
sh = "a non-negative %s" % self.number_name
elif upper_bound == 0:
sh = "a non-positive %s" % self.number_name
elif upper_bound is not None:
sh = "%s <= %s" % (self.number_name, upper_bound)
elif lower_bound is not None:
sh = "%s >= %s" % (self.number_name, lower_bound)
self.syntactic_help = sh
def Convert(self, argument):
__pychecker__ = 'no-returnvalues'
if type(argument) == str:
base = 10
if len(argument) > 2 and argument[0] == "0" and argument[1] == "x":
base = 16
return int(argument, base)
else:
return int(argument)
def Type(self):
return 'int'
def DEFINE_integer(name, default, help, lower_bound=None, upper_bound=None,
flag_values=FLAGS, **args):
"""Registers a flag whose value must be an integer.
If lower_bound, or upper_bound are set, then this flag must be
within the given range.
"""
parser = IntegerParser(lower_bound, upper_bound)
serializer = ArgumentSerializer()
DEFINE(parser, name, default, help, flag_values, serializer, **args)
_RegisterBoundsValidatorIfNeeded(parser, name, flag_values=flag_values)
#
# ENUM FLAGS
#
class EnumParser(ArgumentParser):
"""Parser of a string enum value (a string value from a given set).
If enum_values (see below) is not specified, any string is allowed.
"""
def __init__(self, enum_values=None):
super(EnumParser, self).__init__()
self.enum_values = enum_values
def Parse(self, argument):
if self.enum_values and argument not in self.enum_values:
raise ValueError("value should be one of <%s>" %
"|".join(self.enum_values))
return argument
def Type(self):
return 'string enum'
class EnumFlag(Flag):
"""Basic enum flag; its value can be any string from list of enum_values."""
def __init__(self, name, default, help, enum_values=None,
short_name=None, **args):
enum_values = enum_values or []
p = EnumParser(enum_values)
g = ArgumentSerializer()
Flag.__init__(self, p, g, name, default, help, short_name, **args)
if not self.help: self.help = "an enum string"
self.help = "<%s>: %s" % ("|".join(enum_values), self.help)
def _WriteCustomInfoInXMLFormat(self, outfile, indent):
for enum_value in self.parser.enum_values:
_WriteSimpleXMLElement(outfile, 'enum_value', enum_value, indent)
def DEFINE_enum(name, default, enum_values, help, flag_values=FLAGS,
**args):
"""Registers a flag whose value can be any string from enum_values."""
DEFINE_flag(EnumFlag(name, default, help, enum_values, ** args),
flag_values)
#
# LIST FLAGS
#
class BaseListParser(ArgumentParser):
"""Base class for a parser of lists of strings.
To extend, inherit from this class; from the subclass __init__, call
BaseListParser.__init__(self, token, name)
where token is a character used to tokenize, and name is a description
of the separator.
"""
def __init__(self, token=None, name=None):
assert name
super(BaseListParser, self).__init__()
self._token = token
self._name = name
self.syntactic_help = "a %s separated list" % self._name
def Parse(self, argument):
if isinstance(argument, list):
return argument
elif argument == '':
return []
else:
return [s.strip() for s in argument.split(self._token)]
def Type(self):
return '%s separated list of strings' % self._name
class ListParser(BaseListParser):
"""Parser for a comma-separated list of strings."""
def __init__(self):
BaseListParser.__init__(self, ',', 'comma')
def WriteCustomInfoInXMLFormat(self, outfile, indent):
BaseListParser.WriteCustomInfoInXMLFormat(self, outfile, indent)
_WriteSimpleXMLElement(outfile, 'list_separator', repr(','), indent)
class WhitespaceSeparatedListParser(BaseListParser):
"""Parser for a whitespace-separated list of strings."""
def __init__(self):
BaseListParser.__init__(self, None, 'whitespace')
def WriteCustomInfoInXMLFormat(self, outfile, indent):
BaseListParser.WriteCustomInfoInXMLFormat(self, outfile, indent)
separators = list(string.whitespace)
separators.sort()
for ws_char in string.whitespace:
_WriteSimpleXMLElement(outfile, 'list_separator', repr(ws_char), indent)
def DEFINE_list(name, default, help, flag_values=FLAGS, **args):
"""Registers a flag whose value is a comma-separated list of strings."""
parser = ListParser()
serializer = ListSerializer(',')
DEFINE(parser, name, default, help, flag_values, serializer, **args)
def DEFINE_spaceseplist(name, default, help, flag_values=FLAGS, **args):
"""Registers a flag whose value is a whitespace-separated list of strings.
Any whitespace can be used as a separator.
"""
parser = WhitespaceSeparatedListParser()
serializer = ListSerializer(' ')
DEFINE(parser, name, default, help, flag_values, serializer, **args)
#
# MULTI FLAGS
#
class MultiFlag(Flag):
"""A flag that can appear multiple time on the command-line.
The value of such a flag is a list that contains the individual values
from all the appearances of that flag on the command-line.
See the __doc__ for Flag for most behavior of this class. Only
differences in behavior are described here:
* The default value may be either a single value or a list of values.
A single value is interpreted as the [value] singleton list.
* The value of the flag is always a list, even if the option was
only supplied once, and even if the default value is a single
value
"""
def __init__(self, *args, **kwargs):
Flag.__init__(self, *args, **kwargs)
self.help += ';\n repeat this option to specify a list of values'
def Parse(self, arguments):
"""Parses one or more arguments with the installed parser.
Args:
arguments: a single argument or a list of arguments (typically a
list of default values); a single argument is converted
internally into a list containing one item.
"""
if not isinstance(arguments, list):
# Default value may be a list of values. Most other arguments
# will not be, so convert them into a single-item list to make
# processing simpler below.
arguments = [arguments]
if self.present:
# keep a backup reference to list of previously supplied option values
values = self.value
else:
# "erase" the defaults with an empty list
values = []
for item in arguments:
# have Flag superclass parse argument, overwriting self.value reference
Flag.Parse(self, item) # also increments self.present
values.append(self.value)
# put list of option values back in the 'value' attribute
self.value = values
def Serialize(self):
if not self.serializer:
raise FlagsError("Serializer not present for flag %s" % self.name)
if self.value is None:
return ''
s = ''
multi_value = self.value
for self.value in multi_value:
if s: s += ' '
s += Flag.Serialize(self)
self.value = multi_value
return s
def Type(self):
return 'multi ' + self.parser.Type()
def DEFINE_multi(parser, serializer, name, default, help, flag_values=FLAGS,
**args):
"""Registers a generic MultiFlag that parses its args with a given parser.
Auxiliary function. Normal users should NOT use it directly.
Developers who need to create their own 'Parser' classes for options
which can appear multiple times can call this module function to
register their flags.
"""
DEFINE_flag(MultiFlag(parser, serializer, name, default, help, **args),
flag_values)
def DEFINE_multistring(name, default, help, flag_values=FLAGS, **args):
"""Registers a flag whose value can be a list of any strings.
Use the flag on the command line multiple times to place multiple
string values into the list. The 'default' may be a single string
(which will be converted into a single-element list) or a list of
strings.
"""
parser = ArgumentParser()
serializer = ArgumentSerializer()
DEFINE_multi(parser, serializer, name, default, help, flag_values, **args)
def DEFINE_multi_int(name, default, help, lower_bound=None, upper_bound=None,
flag_values=FLAGS, **args):
"""Registers a flag whose value can be a list of arbitrary integers.
Use the flag on the command line multiple times to place multiple
integer values into the list. The 'default' may be a single integer
(which will be converted into a single-element list) or a list of
integers.
"""
parser = IntegerParser(lower_bound, upper_bound)
serializer = ArgumentSerializer()
DEFINE_multi(parser, serializer, name, default, help, flag_values, **args)
def DEFINE_multi_float(name, default, help, lower_bound=None, upper_bound=None,
flag_values=FLAGS, **args):
"""Registers a flag whose value can be a list of arbitrary floats.
Use the flag on the command line multiple times to place multiple
float values into the list. The 'default' may be a single float
(which will be converted into a single-element list) or a list of
floats.
"""
parser = FloatParser(lower_bound, upper_bound)
serializer = ArgumentSerializer()
DEFINE_multi(parser, serializer, name, default, help, flag_values, **args)
# Now register the flags that we want to exist in all applications.
# These are all defined with allow_override=1, so user-apps can use
# these flagnames for their own purposes, if they want.
DEFINE_flag(HelpFlag())
DEFINE_flag(HelpshortFlag())
DEFINE_flag(HelpXMLFlag())
# Define special flags here so that help may be generated for them.
# NOTE: Please do NOT use _SPECIAL_FLAGS from outside this module.
_SPECIAL_FLAGS = FlagValues()
DEFINE_string(
'flagfile', "",
"Insert flag definitions from the given file into the command line.",
_SPECIAL_FLAGS)
DEFINE_string(
'undefok', "",
"comma-separated list of flag names that it is okay to specify "
"on the command line even if the program does not define a flag "
"with that name. IMPORTANT: flags in this list that have "
"arguments MUST use the --flag=value format.", _SPECIAL_FLAGS)
| mit |
aruizramon/alec_erpnext | erpnext/hr/doctype/salary_slip/salary_slip.py | 4 | 7973 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import add_days, cint, cstr, flt, getdate, nowdate, rounded, date_diff, money_in_words
from frappe.model.naming import make_autoname
from frappe import msgprint, _
from erpnext.accounts.utils import get_fiscal_year
from erpnext.setup.utils import get_company_currency
from erpnext.hr.utils import set_employee_name
from erpnext.hr.doctype.process_payroll.process_payroll import get_month_details
from erpnext.hr.doctype.employee.employee import get_holiday_list_for_employee
from erpnext.utilities.transaction_base import TransactionBase
class SalarySlip(TransactionBase):
def autoname(self):
self.name = make_autoname('Sal Slip/' +self.employee + '/.#####')
def validate(self):
self.check_existing()
if not (len(self.get("earnings")) or len(self.get("deductions"))):
self.get_emp_and_leave_details()
else:
self.get_leave_details(lwp = self.leave_without_pay)
if not self.net_pay:
self.calculate_net_pay()
company_currency = get_company_currency(self.company)
self.total_in_words = money_in_words(self.rounded_total, company_currency)
set_employee_name(self)
def get_emp_and_leave_details(self):
if self.employee:
joining_date, relieving_date = frappe.db.get_value("Employee", self.employee,
["date_of_joining", "relieving_date"])
self.get_leave_details(joining_date, relieving_date)
struct = self.check_sal_struct(joining_date, relieving_date)
if struct:
self.set("earnings", [])
self.set("deduction", [])
self.pull_sal_struct(struct)
def check_sal_struct(self, joining_date, relieving_date):
m = get_month_details(self.fiscal_year, self.month)
struct = frappe.db.sql("""select name from `tabSalary Structure`
where employee=%s and is_active = 'Yes'
and (from_date <= %s or from_date <= %s)
and (to_date is null or to_date >= %s or to_date >= %s)""",
(self.employee, m.month_start_date, joining_date, m.month_end_date, relieving_date))
if not struct:
msgprint(_("No active Salary Structure found for employee {0} and the month")
.format(self.employee))
self.employee = None
return struct and struct[0][0] or ''
def pull_sal_struct(self, struct):
from erpnext.hr.doctype.salary_structure.salary_structure import make_salary_slip
make_salary_slip(struct, self)
def pull_emp_details(self):
emp = frappe.db.get_value("Employee", self.employee, ["bank_name", "bank_ac_no"], as_dict=1)
if emp:
self.bank_name = emp.bank_name
self.bank_account_no = emp.bank_ac_no
def get_leave_details(self, joining_date=None, relieving_date=None, lwp=None):
if not self.fiscal_year:
# if default fiscal year is not set, get from nowdate
self.fiscal_year = get_fiscal_year(nowdate())[0]
if not self.month:
self.month = "%02d" % getdate(nowdate()).month
if not joining_date:
joining_date, relieving_date = frappe.db.get_value("Employee", self.employee,
["date_of_joining", "relieving_date"])
m = get_month_details(self.fiscal_year, self.month)
holidays = self.get_holidays_for_employee(m['month_start_date'], m['month_end_date'])
working_days = m["month_days"]
if not cint(frappe.db.get_value("HR Settings", None, "include_holidays_in_total_working_days")):
working_days -= len(holidays)
if working_days < 0:
frappe.throw(_("There are more holidays than working days this month."))
if not lwp:
lwp = self.calculate_lwp(holidays, m)
self.total_days_in_month = working_days
self.leave_without_pay = lwp
payment_days = flt(self.get_payment_days(m, joining_date, relieving_date)) - flt(lwp)
self.payment_days = payment_days > 0 and payment_days or 0
def get_payment_days(self, month, joining_date, relieving_date):
start_date = month['month_start_date']
if joining_date:
if joining_date > month['month_start_date']:
start_date = joining_date
elif joining_date > month['month_end_date']:
return
end_date = month['month_end_date']
if relieving_date:
if relieving_date > start_date and relieving_date < month['month_end_date']:
end_date = relieving_date
elif relieving_date < month['month_start_date']:
frappe.throw(_("Employee relieved on {0} must be set as 'Left'")
.format(relieving_date))
payment_days = date_diff(end_date, start_date) + 1
if not cint(frappe.db.get_value("HR Settings", None, "include_holidays_in_total_working_days")):
holidays = self.get_holidays_for_employee(start_date, end_date)
payment_days -= len(holidays)
return payment_days
def get_holidays_for_employee(self, start_date, end_date):
holiday_list = get_holiday_list_for_employee(self.employee)
holidays = frappe.db.sql_list('''select holiday_date from `tabHoliday`
where
parent=%(holiday_list)s
and holiday_date >= %(start_date)s
and holiday_date <= %(end_date)s''', {
"holiday_list": holiday_list,
"start_date": start_date,
"end_date": end_date
})
holidays = [cstr(i) for i in holidays]
return holidays
def calculate_lwp(self, holidays, m):
lwp = 0
for d in range(m['month_days']):
dt = add_days(cstr(m['month_start_date']), d)
if dt not in holidays:
leave = frappe.db.sql("""
select t1.name, t1.half_day
from `tabLeave Application` t1, `tabLeave Type` t2
where t2.name = t1.leave_type
and t2.is_lwp = 1
and t1.docstatus = 1
and t1.employee = %s
and %s between from_date and to_date
""", (self.employee, dt))
if leave:
lwp = cint(leave[0][1]) and (lwp + 0.5) or (lwp + 1)
return lwp
def check_existing(self):
ret_exist = frappe.db.sql("""select name from `tabSalary Slip`
where month = %s and fiscal_year = %s and docstatus != 2
and employee = %s and name != %s""",
(self.month, self.fiscal_year, self.employee, self.name))
if ret_exist:
self.employee = ''
frappe.throw(_("Salary Slip of employee {0} already created for this month").format(self.employee))
def calculate_earning_total(self):
self.gross_pay = flt(self.arrear_amount) + flt(self.leave_encashment_amount)
for d in self.get("earnings"):
if cint(d.e_depends_on_lwp) == 1:
d.e_modified_amount = rounded((flt(d.e_amount) * flt(self.payment_days)
/ cint(self.total_days_in_month)), self.precision("e_modified_amount", "earnings"))
elif not self.payment_days:
d.e_modified_amount = 0
elif not d.e_modified_amount:
d.e_modified_amount = d.e_amount
self.gross_pay += flt(d.e_modified_amount)
def calculate_ded_total(self):
self.total_deduction = 0
for d in self.get('deductions'):
if cint(d.d_depends_on_lwp) == 1:
d.d_modified_amount = rounded((flt(d.d_amount) * flt(self.payment_days)
/ cint(self.total_days_in_month)), self.precision("d_modified_amount", "deductions"))
elif not self.payment_days:
d.d_modified_amount = 0
elif not d.d_modified_amount:
d.d_modified_amount = d.d_amount
self.total_deduction += flt(d.d_modified_amount)
def calculate_net_pay(self):
disable_rounded_total = cint(frappe.db.get_value("Global Defaults", None, "disable_rounded_total"))
self.calculate_earning_total()
self.calculate_ded_total()
self.net_pay = flt(self.gross_pay) - flt(self.total_deduction)
self.rounded_total = rounded(self.net_pay,
self.precision("net_pay") if disable_rounded_total else 0)
def on_submit(self):
if(self.email_check == 1):
self.send_mail_funct()
def send_mail_funct(self):
receiver = frappe.db.get_value("Employee", self.employee, "company_email")
if receiver:
subj = 'Salary Slip - ' + cstr(self.month) +'/'+cstr(self.fiscal_year)
frappe.sendmail([receiver], subject=subj, message = _("Please see attachment"),
attachments=[frappe.attach_print(self.doctype, self.name, file_name=self.name)])
else:
msgprint(_("Company Email ID not found, hence mail not sent"))
| agpl-3.0 |
staticlibs/android-ndk-r9d-arm-linux-androideabi-4.8 | lib/python2.7/test/test_largefile.py | 129 | 7642 | """Test largefile support on system where this makes sense.
"""
from __future__ import print_function
import os
import stat
import sys
import unittest
from test.test_support import run_unittest, TESTFN, verbose, requires, \
unlink
import io # C implementation of io
import _pyio as pyio # Python implementation of io
try:
import signal
# The default handler for SIGXFSZ is to abort the process.
# By ignoring it, system calls exceeding the file size resource
# limit will raise IOError instead of crashing the interpreter.
oldhandler = signal.signal(signal.SIGXFSZ, signal.SIG_IGN)
except (ImportError, AttributeError):
pass
# create >2GB file (2GB = 2147483648 bytes)
size = 2500000000
class LargeFileTest(unittest.TestCase):
"""Test that each file function works as expected for a large
(i.e. > 2GB, do we have to check > 4GB) files.
NOTE: the order of execution of the test methods is important! test_seek
must run first to create the test file. File cleanup must also be handled
outside the test instances because of this.
"""
def test_seek(self):
if verbose:
print('create large file via seek (may be sparse file) ...')
with self.open(TESTFN, 'wb') as f:
f.write(b'z')
f.seek(0)
f.seek(size)
f.write(b'a')
f.flush()
if verbose:
print('check file size with os.fstat')
self.assertEqual(os.fstat(f.fileno())[stat.ST_SIZE], size+1)
def test_osstat(self):
if verbose:
print('check file size with os.stat')
self.assertEqual(os.stat(TESTFN)[stat.ST_SIZE], size+1)
def test_seek_read(self):
if verbose:
print('play around with seek() and read() with the built largefile')
with self.open(TESTFN, 'rb') as f:
self.assertEqual(f.tell(), 0)
self.assertEqual(f.read(1), b'z')
self.assertEqual(f.tell(), 1)
f.seek(0)
self.assertEqual(f.tell(), 0)
f.seek(0, 0)
self.assertEqual(f.tell(), 0)
f.seek(42)
self.assertEqual(f.tell(), 42)
f.seek(42, 0)
self.assertEqual(f.tell(), 42)
f.seek(42, 1)
self.assertEqual(f.tell(), 84)
f.seek(0, 1)
self.assertEqual(f.tell(), 84)
f.seek(0, 2) # seek from the end
self.assertEqual(f.tell(), size + 1 + 0)
f.seek(-10, 2)
self.assertEqual(f.tell(), size + 1 - 10)
f.seek(-size-1, 2)
self.assertEqual(f.tell(), 0)
f.seek(size)
self.assertEqual(f.tell(), size)
# the 'a' that was written at the end of file above
self.assertEqual(f.read(1), b'a')
f.seek(-size-1, 1)
self.assertEqual(f.read(1), b'z')
self.assertEqual(f.tell(), 1)
def test_lseek(self):
if verbose:
print('play around with os.lseek() with the built largefile')
with self.open(TESTFN, 'rb') as f:
self.assertEqual(os.lseek(f.fileno(), 0, 0), 0)
self.assertEqual(os.lseek(f.fileno(), 42, 0), 42)
self.assertEqual(os.lseek(f.fileno(), 42, 1), 84)
self.assertEqual(os.lseek(f.fileno(), 0, 1), 84)
self.assertEqual(os.lseek(f.fileno(), 0, 2), size+1+0)
self.assertEqual(os.lseek(f.fileno(), -10, 2), size+1-10)
self.assertEqual(os.lseek(f.fileno(), -size-1, 2), 0)
self.assertEqual(os.lseek(f.fileno(), size, 0), size)
# the 'a' that was written at the end of file above
self.assertEqual(f.read(1), b'a')
def test_truncate(self):
if verbose:
print('try truncate')
with self.open(TESTFN, 'r+b') as f:
# this is already decided before start running the test suite
# but we do it anyway for extra protection
if not hasattr(f, 'truncate'):
raise unittest.SkipTest("open().truncate() not available on this system")
f.seek(0, 2)
# else we've lost track of the true size
self.assertEqual(f.tell(), size+1)
# Cut it back via seek + truncate with no argument.
newsize = size - 10
f.seek(newsize)
f.truncate()
self.assertEqual(f.tell(), newsize) # else pointer moved
f.seek(0, 2)
self.assertEqual(f.tell(), newsize) # else wasn't truncated
# Ensure that truncate(smaller than true size) shrinks
# the file.
newsize -= 1
f.seek(42)
f.truncate(newsize)
if self.new_io:
self.assertEqual(f.tell(), 42)
f.seek(0, 2)
self.assertEqual(f.tell(), newsize)
# XXX truncate(larger than true size) is ill-defined
# across platform; cut it waaaaay back
f.seek(0)
f.truncate(1)
if self.new_io:
self.assertEqual(f.tell(), 0) # else pointer moved
f.seek(0)
self.assertEqual(len(f.read()), 1) # else wasn't truncated
def test_seekable(self):
# Issue #5016; seekable() can return False when the current position
# is negative when truncated to an int.
if not self.new_io:
self.skipTest("builtin file doesn't have seekable()")
for pos in (2**31-1, 2**31, 2**31+1):
with self.open(TESTFN, 'rb') as f:
f.seek(pos)
self.assertTrue(f.seekable())
def test_main():
# On Windows and Mac OSX this test comsumes large resources; It
# takes a long time to build the >2GB file and takes >2GB of disk
# space therefore the resource must be enabled to run this test.
# If not, nothing after this line stanza will be executed.
if sys.platform[:3] == 'win' or sys.platform == 'darwin':
requires('largefile',
'test requires %s bytes and a long time to run' % str(size))
else:
# Only run if the current filesystem supports large files.
# (Skip this test on Windows, since we now always support
# large files.)
f = open(TESTFN, 'wb', buffering=0)
try:
# 2**31 == 2147483648
f.seek(2147483649)
# Seeking is not enough of a test: you must write and
# flush, too!
f.write(b'x')
f.flush()
except (IOError, OverflowError):
f.close()
unlink(TESTFN)
raise unittest.SkipTest("filesystem does not have largefile support")
else:
f.close()
suite = unittest.TestSuite()
for _open, prefix in [(io.open, 'C'), (pyio.open, 'Py'),
(open, 'Builtin')]:
class TestCase(LargeFileTest):
pass
TestCase.open = staticmethod(_open)
TestCase.new_io = _open is not open
TestCase.__name__ = prefix + LargeFileTest.__name__
suite.addTest(TestCase('test_seek'))
suite.addTest(TestCase('test_osstat'))
suite.addTest(TestCase('test_seek_read'))
suite.addTest(TestCase('test_lseek'))
with _open(TESTFN, 'wb') as f:
if hasattr(f, 'truncate'):
suite.addTest(TestCase('test_truncate'))
suite.addTest(TestCase('test_seekable'))
unlink(TESTFN)
try:
run_unittest(suite)
finally:
unlink(TESTFN)
if __name__ == '__main__':
test_main()
| gpl-2.0 |
isharacomix/tvtgj | core/game.py | 1 | 2386 | # The Game is where it all starts. A Game is an abstract and thin package in
# which all of the elements of the game are stored. It is responsible for
# creating the world, parsing and writing to save files, and turning on/off
# graphics.
from core import gfx
from core import world
import sys
import traceback
# A Game represents a single instance of a game, including its maps,
# data, and everything else.
class Game(object):
def __init__(self, sdl):
self.world = world.World()
self.sdl = sdl
def display_title(self):
title = [(1,"It'll never catch on!"),
(2,"A robot-battle roguelike (VERY) loosely based on TVTROPES"),
(4," e r y u "),
(5," s f h k "),
(6," x c b n "),
(7,"'Left stick' 'Right stick'"),
(8," Aim Target Move Robot "),
(10,"Press Z to shot bullet and Q to quit."),
(12,"Get power ups. Kill robots. Press Enter to start.")]
gfx.clear()
for y,t in title:
x = 40-len(t)//2
q = y==1
for c in t:
gfx.draw(x,y,c,'g'+("!" if q else ""))
x+= 1
# Runs an interactive session of our game with the player until either
# the player stops playing or an error occurs. Here, we pass input to the
# world until we are told we don't need to anymore. If an error occurs, we
# turn off graphics, print the traceback, and kill the program.
def play(self):
first, second = "sdl","ascii"
if not self.sdl: first,second = "ascii","sdl"
try:
gfx.start(first)
except:
gfx.start(second)
try:
c = -1
while c != "enter":
self.display_title()
c = gfx.get_input()
gfx.refresh()
while self.world.running:
c = gfx.get_input()
self.world.handle(c)
self.world.draw()
self.world.draw_gui()
gfx.refresh()
except:
gfx.stop()
print(traceback.format_exc())
sys.exit(-1)
gfx.stop()
| gpl-3.0 |
Silmathoron/NNGT | nngt/core/graph.py | 1 | 61996 | #!/usr/bin/env python
#-*- coding:utf-8 -*-
#
# graph.py
#
# This file is part of the NNGT project to generate and analyze
# neuronal networks and their activity.
# Copyright (C) 2015-2019 Tanguy Fardet
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
""" Graph class for graph generation and management """
import logging
import weakref
from copy import deepcopy
import numpy as np
import scipy.sparse as ssp
import nngt
import nngt.analysis as na
from nngt import save_to_file
from nngt.io.graph_loading import _load_from_file, _library_load
from nngt.io.io_helpers import _get_format
from nngt.io.graph_saving import _as_string
from nngt.lib import InvalidArgument, nonstring_container
from nngt.lib.connect_tools import _set_degree_type, _unique_rows
from nngt.lib.graph_helpers import _edge_prop
from nngt.lib.logger import _log_message
from nngt.lib.test_functions import graph_tool_check, is_integer
from .connections import Connections
logger = logging.getLogger(__name__)
# ----- #
# Graph #
# ----- #
class Graph(nngt.core.GraphObject):
"""
The basic graph class, which inherits from a library class such as
:class:`graph_tool.Graph`, :class:`networkx.DiGraph`, or ``igraph.Graph``.
The objects provides several functions to easily access some basic
properties.
"""
#-------------------------------------------------------------------------#
# Class properties
__num_graphs = 0
__max_id = 0
@classmethod
def num_graphs(cls):
''' Returns the number of alive instances. '''
return cls.__num_graphs
@classmethod
def from_library(cls, library_graph, name="ImportedGraph", weighted=True,
directed=True, **kwargs):
'''
Create a :class:`~nngt.Graph` by wrapping a graph object from one of
the supported libraries.
Parameters
----------
library_graph : object
Graph object from one of the supported libraries (graph-tool,
igraph, networkx).
name : str, optional (default: "ImportedGraph")
**kwargs
Other standard arguments (see :func:`~nngt.Graph.__init__`)
'''
graph = cls(name=name, weighted=False, **kwargs)
graph._from_library_graph(library_graph, copy=False)
return graph
@classmethod
def from_matrix(cls, matrix, weighted=True, directed=True, population=None,
shape=None, positions=None, name=None, **kwargs):
'''
Creates a :class:`~nngt.Graph` from a :mod:`scipy.sparse` matrix or
a dense matrix.
Parameters
----------
matrix : :mod:`scipy.sparse` matrix or :class:`numpy.ndarray`
Adjacency matrix.
weighted : bool, optional (default: True)
Whether the graph edges have weight properties.
directed : bool, optional (default: True)
Whether the graph is directed or undirected.
population : :class:`~nngt.NeuralPop`
Population to associate to the new :class:`~nngt.Network`.
shape : :class:`~nngt.geometry.Shape`, optional (default: None)
Shape to associate to the new :class:`~nngt.SpatialGraph`.
positions : (N, 2) array
Positions, in a 2D space, of the N neurons.
name : str, optional
Graph name.
Returns
-------
:class:`~nngt.Graph`
'''
mshape = matrix.shape
graph_name = "FromYMatrix_Z"
nodes = max(mshape[0], mshape[1])
if issubclass(matrix.__class__, ssp.spmatrix):
graph_name = graph_name.replace('Y', 'Sparse')
if not directed:
if mshape[0] != mshape[1] or not (matrix.T != matrix).nnz == 0:
raise InvalidArgument('Incompatible `directed=False` '
'option provided for non symmetric '
'matrix.')
matrix = ssp.tril(matrix, format=matrix.format)
else:
graph_name = graph_name.replace('Y', 'Dense')
if not directed:
if mshape[0] != mshape[1] or not (matrix.T == matrix).all():
raise InvalidArgument('Incompatible `directed=False` '
'option provided for non symmetric '
'matrix.')
matrix = np.tril(matrix)
edges = np.array(matrix.nonzero()).T
graph_name = graph_name.replace("Z", str(cls.__num_graphs))
# overwrite default name if necessary
if name is not None:
graph_name = name
graph = cls(nodes, name=graph_name, weighted=weighted,
directed=directed, **kwargs)
if population is not None:
cls.make_network(graph, population)
if shape is not None or positions is not None:
cls.make_spatial(graph, shape, positions)
weights = None
if weighted:
if issubclass(matrix.__class__, ssp.spmatrix):
weights = np.array(matrix[edges[:, 0], edges[:, 1]])[0]
else:
weights = matrix[edges[:, 0], edges[:, 1]]
if len(weights.shape) == 2:
weights = weights.A1
graph.new_edges(edges, {"weight": weights}, check_self_loops=False,
ignore_invalid=True)
return graph
@staticmethod
def from_file(filename, fmt="auto", separator=" ", secondary=";",
attributes=None, attributes_types=None, notifier="@",
ignore="#", from_string=False, name=None,
directed=True, cleanup=False):
'''
Import a saved graph from a file.
.. versionchanged :: 2.0
Added optional `attributes_types` and `cleanup` arguments.
Parameters
----------
filename: str
The path to the file.
fmt : str, optional (default: deduced from filename)
The format used to save the graph. Supported formats are:
"neighbour" (neighbour list), "ssp" (scipy.sparse), "edge_list"
(list of all the edges in the graph, one edge per line,
represented by a ``source target``-pair), "gml" (gml format,
default if `filename` ends with '.gml'), "graphml" (graphml format,
default if `filename` ends with '.graphml' or '.xml'), "dot" (dot
format, default if `filename` ends with '.dot'), "gt" (only
when using `graph_tool <http://graph-tool.skewed.de/>`_ as library,
detected if `filename` ends with '.gt').
separator : str, optional (default " ")
separator used to separate inputs in the case of custom formats
(namely "neighbour" and "edge_list")
secondary : str, optional (default: ";")
Secondary separator used to separate attributes in the case of
custom formats.
attributes : list, optional (default: [])
List of names for the attributes present in the file. If a
`notifier` is present in the file, names will be deduced from it;
otherwise the attributes will be numbered.
For "edge_list", attributes may also be present as additional
columns after the source and the target.
attributes_types : dict, optional (default: str)
Backup information if the type of the attributes is not specified
in the file. Values must be callables (types or functions) that
will take the argument value as a string input and convert it to
the proper type.
notifier : str, optional (default: "@")
Symbol specifying the following as meaningfull information.
Relevant information are formatted ``@info_name=info_value``, where
``info_name`` is in ("attributes", "directed", "name", "size") and
associated ``info_value`` are of type (``list``, ``bool``, ``str``,
``int``).
Additional notifiers are
``@type=SpatialGraph/Network/SpatialNetwork``, which must be
followed by the relevant notifiers among ``@shape``,
``@population``, and ``@graph``.
from_string : bool, optional (default: False)
Load from a string instead of a file.
ignore : str, optional (default: "#")
Ignore lines starting with the `ignore` string.
name : str, optional (default: from file information or 'LoadedGraph')
The name of the graph.
directed : bool, optional (default: from file information or True)
Whether the graph is directed or not.
cleanup : bool, optional (default: False)
If true, removes nodes before the first one that appears in the
edges and after the last one and renumber the nodes from 0.
Returns
-------
graph : :class:`~nngt.Graph` or subclass
Loaded graph.
'''
fmt = _get_format(fmt, filename)
if fmt not in ("neighbour", "edge_list", "gml"):
# only partial support for these formats, relying on backend
libgraph = _library_load(filename, fmt)
name = "LoadedGraph" if name is None else name
graph = Graph.from_library(libgraph, name=name, directed=directed)
return graph
info, edges, nattr, eattr, struct, shape, pos = _load_from_file(
filename=filename, fmt=fmt, separator=separator, ignore=ignore,
secondary=secondary, attributes=attributes,
attributes_types=attributes_types, notifier=notifier,
cleanup=cleanup)
# create the graph
name = info.get("name", "LoadedGraph") if name is None else name
graph = Graph(nodes=info["size"], name=name,
directed=info.get("directed", directed))
# make the nodes attributes
lst_attr, dtpes, lst_values = [], [], []
if info["node_attributes"]: # node attributes to add to the graph
lst_attr = info["node_attributes"]
dtpes = info["node_attr_types"]
lst_values = [nattr[name] for name in info["node_attributes"]]
for nattr, dtype, values in zip(lst_attr, dtpes, lst_values):
graph.new_node_attribute(nattr, dtype, values=values)
# make the edges and their attributes
lst_attr, dtpes, lst_values = [], [], []
if info["edge_attributes"]: # edge attributes to add to the graph
lst_attr = info["edge_attributes"]
dtpes = info["edge_attr_types"]
lst_values = [eattr[name] for name in info["edge_attributes"]]
if len(edges):
graph.new_edges(edges, check_duplicates=False,
check_self_loops=False, check_existing=False)
for eattr, dtype, values in zip(lst_attr, dtpes, lst_values):
graph.new_edge_attribute(eattr, dtype, values=values)
if struct is not None:
if isinstance(struct, nngt.NeuralPop):
nngt.Network.make_network(graph, struct)
else:
graph.structure = struct
struct._parent = weakref.ref(graph)
for g in struct.values():
g._struct = weakref.ref(struct)
g._net = weakref.ref(graph)
if pos is not None or shape is not None:
nngt.SpatialGraph.make_spatial(graph, shape=shape, positions=pos)
return graph
@staticmethod
def make_spatial(graph, shape=None, positions=None, copy=False):
'''
Turn a :class:`~nngt.Graph` object into a :class:`~nngt.SpatialGraph`,
or a :class:`~nngt.Network` into a :class:`~nngt.SpatialNetwork`.
Parameters
----------
graph : :class:`~nngt.Graph` or :class:`~nngt.SpatialGraph`
Graph to convert.
shape : :class:`~nngt.geometry.Shape`, optional (default: None)
Shape to associate to the new :class:`~nngt.SpatialGraph`.
positions : (N, 2) array
Positions, in a 2D space, of the N neurons.
copy : bool, optional (default: ``False``)
Whether the operation should be made in-place on the object or if a
new object should be returned.
Notes
-----
In-place operation that directly converts the original graph if `copy`
is ``False``, else returns the copied :class:`~nngt.Graph` turned into
a :class:`~nngt.SpatialGraph`.
The `shape` argument can be skipped if `positions` are given; in that
case, the neurons will be embedded in a rectangle that contains them
all.
'''
if copy:
graph = graph.copy()
if isinstance(graph, nngt.Network):
graph.__class__ = nngt.SpatialNetwork
else:
graph.__class__ = nngt.SpatialGraph
graph._init_spatial_properties(shape, positions)
if copy:
return graph
@staticmethod
def make_network(graph, neural_pop, copy=False, **kwargs):
'''
Turn a :class:`~nngt.Graph` object into a :class:`~nngt.Network`, or a
:class:`~nngt.SpatialGraph` into a :class:`~nngt.SpatialNetwork`.
Parameters
----------
graph : :class:`~nngt.Graph` or :class:`~nngt.SpatialGraph`
Graph to convert
neural_pop : :class:`~nngt.NeuralPop`
Population to associate to the new :class:`~nngt.Network`
copy : bool, optional (default: ``False``)
Whether the operation should be made in-place on the object or if a
new object should be returned.
Notes
-----
In-place operation that directly converts the original graph if `copy`
is ``False``, else returns the copied :class:`~nngt.Graph` turned into
a :class:`~nngt.Network`.
'''
if copy:
graph = graph.copy()
if isinstance(graph, nngt.SpatialGraph):
graph.__class__ = nngt.SpatialNetwork
else:
graph.__class__ = nngt.Network
# set delays to 1. or to provided value if they are not already set
if "delays" not in kwargs and not hasattr(graph, '_d'):
graph._d = {"distribution": "constant", "value": 1.}
elif "delays" in kwargs and not hasattr(graph, '_d'):
graph._d = kwargs["delays"]
elif "delays" in kwargs:
_log_message(logger, "WARNING",
'Graph already had delays set, ignoring new ones.')
graph._init_bioproperties(neural_pop)
if copy:
return graph
#-------------------------------------------------------------------------#
# Constructor/destructor and properties
def __init__(self, nodes=None, name="Graph", weighted=True, directed=True,
copy_graph=None, structure=None, **kwargs):
'''
Initialize Graph instance
.. versionchanged:: 2.0
Renamed `from_graph` to `copy_graph`.
.. versionchanged:: 2.2
Added `structure` argument.
Parameters
----------
nodes : int, optional (default: 0)
Number of nodes in the graph.
name : string, optional (default: "Graph")
The name of this :class:`Graph` instance.
weighted : bool, optional (default: True)
Whether the graph edges have weight properties.
directed : bool, optional (default: True)
Whether the graph is directed or undirected.
copy_graph : :class:`~nngt.Graph`, optional
An optional :class:`~nngt.Graph` that will be copied.
structure : :class:`~nngt.Structure`, optional (default: None)
A structure dividing the graph into specific groups, which can
be used to generate specific connectivities and visualise the
connections in a more coarse-grained manner.
kwargs : optional keywords arguments
Optional arguments that can be passed to the graph, e.g. a dict
containing information on the synaptic weights
(``weights={"distribution": "constant", "value": 2.3}`` which is
equivalent to ``weights=2.3``), the synaptic `delays`, or a
``type`` information.
Note
----
When using `copy_graph`, only the topological properties are
copied (nodes, edges, and attributes), spatial and biological
properties are ignored.
To copy a graph exactly, use :func:`~nngt.Graph.copy`.
Returns
-------
self : :class:`~nngt.Graph`
'''
self.__id = self.__class__.__max_id
self._name = name
self._graph_type = kwargs["type"] if "type" in kwargs else "custom"
# check the structure
if structure is not None:
if nodes is None:
nodes = structure.size
else:
assert nodes == structure.size, \
"`nodes` and `structure.size` must be the same."
else:
nodes = 0 if nodes is None else nodes
self._struct = structure
# Init the core.GraphObject
super().__init__(nodes=nodes, copy_graph=copy_graph,
directed=directed, weighted=weighted)
# take care of the weights and delays
if copy_graph is None:
if weighted:
self.new_edge_attribute('weight', 'double')
self._w = _edge_prop(kwargs.get("weights", None))
if "delays" in kwargs:
self.new_edge_attribute('delay', 'double')
self._d = _edge_prop(kwargs.get("delays", None))
if 'inh_weight_factor' in kwargs:
self._iwf = kwargs['inh_weight_factor']
else:
self._w = getattr(copy_graph, "_w", None)
self._d = getattr(copy_graph, "_d", None)
self._iwf = getattr(copy_graph, "_iwf", None)
self._eattr._num_values_set = \
copy_graph._eattr._num_values_set.copy()
# update the counters
self.__class__.__num_graphs += 1
self.__class__.__max_id += 1
def __del__(self):
''' Graph deletion (update graph count) '''
self.__class__.__num_graphs -= 1
def __repr__(self):
''' Provide unambiguous informations regarding the object. '''
d = "directed" if self.is_directed() else "undirected"
w = "weighted" if self.is_weighted() else "binary"
t = self.type
n = self.node_nb()
e = self.edge_nb()
return "<{directed}/{weighted} {obj} object of type '{net_type}' " \
"with {nodes} nodes and {edges} edges at 0x{obj_id}>".format(
directed=d, weighted=w, obj=type(self).__name__,
net_type=t, nodes=n, edges=e, obj_id=id(self))
def __str__(self):
'''
Return the full string description of the object as would be stored
inside a file when saving the graph.
'''
return _as_string(self)
@property
def graph(self):
'''
Returns the underlying library object.
.. warning ::
Do not add or remove edges directly through this object.
See also
--------
:ref:`graph_attr`
:ref:`graph-analysis`.
'''
return self._graph
@property
def structure(self):
'''
Object structuring the graph into specific groups.
.. versionadded: 2.2
Note
----
Points to :py:obj:`~nngt.Network.population` if the graph is a
:class:`~nngt.Network`.
'''
if self.is_network():
return self.population
return self._struct
@structure.setter
def structure(self, structure):
if self.is_network():
self.population = structure
else:
if issubclass(structure.__class__, nngt.Structure):
if self.node_nb() == structure.size:
if structure.is_valid:
self._struct = structure
else:
raise AttributeError(
"Structure is not valid (not all nodes are "
"associated to a group).")
else:
raise AttributeError("Graph and Structure must have same "
"number of nodes.")
else:
raise AttributeError(
"Expecting Structure but received '{}'.".format(
structure.__class__.__name__))
@property
def graph_id(self):
''' Unique :class:`int` identifying the instance. '''
return self.__id
@property
def name(self):
''' Name of the graph. '''
return self._name
@property
def type(self):
''' Type of the graph. '''
return self._graph_type
#-------------------------------------------------------------------------#
# Graph actions
def copy(self):
'''
Returns a deepcopy of the current :class:`~nngt.Graph`
instance
'''
if nngt.get_config("mpi"):
raise NotImplementedError("`copy` is not MPI-safe yet.")
gc_instance = Graph(name=self._name + '_copy',
weighted=self.is_weighted(), copy_graph=self,
directed=self.is_directed())
if self.is_spatial():
nngt.SpatialGraph.make_spatial(
gc_instance, shape=self.shape.copy(),
positions=deepcopy(self._pos))
if self.is_network():
nngt.Network.make_network(gc_instance, self.population.copy())
return gc_instance
def to_file(self, filename, fmt="auto", separator=" ", secondary=";",
attributes=None, notifier="@"):
'''
Save graph to file; options detailed below.
See also
--------
:py:func:`nngt.lib.save_to_file` function for options.
'''
save_to_file(self, filename, fmt=fmt, separator=separator,
secondary=secondary, attributes=attributes,
notifier=notifier)
#~ def inhibitory_subgraph(self):
#~ ''' Create a :class:`~nngt.Graph` instance which graph
#~ contains only the inhibitory edges of the current instance's
#~ :class:`graph_tool.Graph` '''
#~ eprop_b_type = self.new_edge_property(
#~ "bool",-self.edge_properties[TYPE].a+1)
#~ self.set_edge_filter(eprop_b_type)
#~ inhib_graph = Graph( name=self._name + '_inhib',
#~ weighted=self._weighted,
#~ from_graph=core.GraphObject(self.prune=True) )
#~ self.clear_filters()
#~ return inhib_graph
#~ def excitatory_subgraph(self):
#~ '''
#~ Create a :class:`~nngt.Graph` instance which graph contains only the
#~ excitatory edges of the current instance's :class:`core.GraphObject`.
#~ .. warning ::
#~ Only works for graph_tool
#~ .. todo ::
#~ Make this method library independant!
#~ '''
#~ eprop_b_type = self.new_edge_property(
#~ "bool",self.edge_properties[TYPE].a+1)
#~ self.set_edge_filter(eprop_b_type)
#~ exc_graph = Graph( name=self._name + '_exc',
#~ weighted=self._weighted,
#~ graph=core.GraphObject(self.prune=True) )
#~ self.clear_filters()
#~ return exc_graph
def get_structure_graph(self):
'''
Return a coarse-grained version of the graph containing one node
per :class:`nngt.Group`.
Connections between groups are associated to the sum of all connection
weights.
If no structure is present, returns an empty Graph.
'''
struct = self.structure
if struct is None:
return Graph()
names = list(struct.keys())
nodes = len(struct)
g = nngt.Graph(nodes,
name="Structure-graph of '{}'".format(self.name))
eattr = {"weight": []}
if self.is_network():
eattr["delay"] = []
new_edges = []
for i, n1 in enumerate(names):
g1 = struct[n1]
for j, n2 in enumerate(names):
g2 = struct[n2]
edges = self.get_edges(source_node=g1.ids, target_node=g2.ids)
if len(edges):
weights = self.get_weights(edges=edges)
w = np.sum(weights)
eattr["weight"].append(w)
if self.is_network():
delays = self.get_delays(edges=edges)
d = np.average(delays)
eattr["delay"].append(d)
new_edges.append((i, j))
# add edges and attributes
if self.is_network():
g.new_edge_attribute("delay", "double")
g.new_edges(new_edges, attributes=eattr, check_self_loops=False)
# set node attributes
g.new_node_attribute("name", "string", values=names)
return g
#-------------------------------------------------------------------------#
# Getters
def adjacency_matrix(self, types=False, weights=False, mformat="csr"):
'''
Return the graph adjacency matrix.
.. versionchanged: 2.0
Added matrix format option (`mformat`).
Note
----
Source nodes are represented by the rows, targets by the
corresponding columns.
Parameters
----------
types : bool, optional (default: False)
Wether the edge types should be taken into account (negative values
for inhibitory connections).
weights : bool or string, optional (default: False)
Whether the adjacecy matrix should be weighted. If True, all
connections are multiply bythe associated synaptic strength; if
weight is a string, the connections are scaled bythe corresponding
edge attribute.
mformat : str, optional (default: "csr")
Type of :mod:`scipy.sparse` matrix that will be returned, by
default :class:`scipy.sparse.csr_matrix`.
Returns
-------
mat : :mod:`scipy.sparse` matrix
The adjacency matrix of the graph.
'''
weights = "weight" if weights is True else weights
mat = None
if types:
if self.is_network():
# use inhibitory nodes
mat = nngt.analyze_graph["adjacency"](self, weights)
inh = self.population.inhibitory
if np.any(inh):
mat[inh, :] *= -1
elif 'type' in self.node_attributes:
mat = nngt.analyze_graph["adjacency"](self, weights)
tarray = np.where(self.node_attributes['type'] < 0)[0]
if np.any(tarray):
mat[tarray] *= -1
elif types and 'type' in self.edge_attributes:
data = None
if nonstring_container(weights):
data = weights
elif weights in {None, False}:
data = np.ones(self.edge_nb())
else:
data = self.get_edge_attributes(name=weights)
data *= self.get_edge_attributes(name="type")
edges = self.edges_array
num_nodes = self.node_nb()
mat = ssp.coo_matrix(
(data, (edges[:, 0], edges[:, 1])),
shape=(num_nodes, num_nodes)).tocsr()
if not self.is_directed():
mat += mat.T
return mat.asformat(mformat)
# untyped
mat = nngt.analyze_graph["adjacency"](self, weights, mformat=mformat)
return mat
@property
def node_attributes(self):
'''
Access node attributes.
See also
--------
:attr:`~nngt.Graph.edge_attributes`,
:attr:`~nngt.Graph.get_node_attributes`,
:attr:`~nngt.Graph.new_node_attribute`,
:attr:`~nngt.Graph.set_node_attribute`.
'''
return self._nattr
@property
def edge_attributes(self):
'''
Access edge attributes.
See also
--------
:attr:`~nngt.Graph.node_attributes`,
:attr:`~nngt.Graph.get_edge_attributes`,
:attr:`~nngt.Graph.new_edge_attribute`,
:attr:`~nngt.Graph.set_edge_attribute`.
'''
return self._eattr
def get_nodes(self, attribute=None, value=None):
'''
Return the nodes in the network fulfilling a given condition.
Parameters
----------
attribute : str, optional (default: all nodes)
Whether the `attribute` of the returned nodes should have a specific
value.
value : object, optional (default : None)
If an `attribute` name is passed, then only nodes with `attribute`
being equal to `value` will be returned.
See also
--------
:func:`~nngt.Graph.get_edges`, :attr:`~nngt.Graph.node_attributes`
'''
if attribute is None:
return [i for i in range(self.node_nb())]
vtype = self._nattr.value_type(attribute)
if value is None and vtype != "object":
raise ValueError("`value` cannot be None for attribute '" +
attribute + "'.")
return np.where(
self.get_node_attributes(name=attribute) == value)[0]
def get_edges(self, attribute=None, value=None, source_node=None,
target_node=None):
'''
Return the edges in the network fulfilling a given condition.
Parameters
----------
attribute : str, optional (default: all nodes)
Whether the `attribute` of the returned edges should have a specific
value.
value : object, optional (default : None)
If an `attribute` name is passed, then only edges with `attribute`
being equal to `value` will be returned.
source_node : int or list of ints, optional (default: all nodes)
Retrict the edges to those stemming from `source_node`.
target_node : int or list of ints, optional (default: all nodes)
Retrict the edges to those arriving at `target_node`.
See also
--------
:func:`~nngt.Graph.get_nodes`, :attr:`~nngt.Graph.edge_attributes`
'''
edges = None
if source_node is None and target_node is None:
edges = self.edges_array
elif is_integer(source_node) and is_integer(target_node):
# check that the edge exists, throw error otherwise
self.edge_id((source_node, target_node))
edges = np.array([[source_node, target_node]])
else:
if source_node is None or target_node is None:
# backend-specific implementation for source or target
edges = self._get_edges(source_node=source_node,
target_node=target_node)
else:
# we need to use the adjacency matrix, get its subparts,
# then use the list of nodes to get the original ids back
# to do that we first convert source/target_node to lists
# (note that this has no significant speed impact)
src, tgt = None, None
if source_node is None:
src = np.array(
[i for i in range(self.node_nb())], dtype=int)
elif is_integer(source_node):
src = np.array([source_node], dtype=int)
else:
src = np.sort(source_node)
if target_node is None:
tgt = np.array(
[i for i in range(self.node_nb())], dtype=int)
elif is_integer(target_node):
tgt = np.array([target_node], dtype=int)
else:
tgt = np.sort(target_node)
mat = self.adjacency_matrix()
nnz = mat[src].tocsc()[:, tgt].nonzero()
edges = np.array([src[nnz[0]], tgt[nnz[1]]], dtype=int).T
# remove reciprocal if graph is undirected
if not self.is_directed():
edges.sort()
edges = _unique_rows(edges)
# check attributes
if attribute is None:
return edges
vtype = self._eattr.value_type(attribute)
if value is None and vtype != "object":
raise ValueError("`value` cannot be None for attribute '" +
attribute + "'.")
desired = (self.get_edge_attributes(edges, attribute) == value)
return self.edges_array[desired]
def get_edge_attributes(self, edges=None, name=None):
'''
Attributes of the graph's edges.
.. versionchanged:: 1.0
Returns the full dict of edges attributes if called without
arguments.
.. versionadded:: 0.8
Parameters
----------
edge : tuple or list of tuples, optional (default: ``None``)
Edge whose attribute should be displayed.
name : str, optional (default: ``None``)
Name of the desired attribute.
Returns
-------
Dict containing all graph's attributes (synaptic weights, delays...)
by default. If `edge` is specified, returns only the values for these
edges. If `name` is specified, returns value of the attribute for each
edge.
Note
----
The attributes values are ordered as the edges in
:func:`~nngt.Graph.edges_array` if `edges` is None.
See also
--------
:func:`~nngt.Graph.get_node_attributes`,
:func:`~nngt.Graph.new_edge_attribute`,
:func:`~nngt.Graph.set_edge_attribute`,
:func:`~nngt.Graph.new_node_attribute`,
:func:`~nngt.Graph.set_node_attribute`
'''
if name is not None and edges is not None:
if isinstance(edges, slice):
return self._eattr[name][edges]
elif len(edges):
return self._eattr[edges][name]
return np.array([])
elif name is None and edges is None:
return {k: self._eattr[k]
for k in self._eattr.keys()}
elif name is None:
return self._eattr[edges]
else:
return self._eattr[name]
def get_node_attributes(self, nodes=None, name=None):
'''
Attributes of the graph's edges.
.. versionchanged:: 1.0.1
Corrected default behavior and made it the same as
:func:`~nngt.Graph.get_edge_attributes`.
.. versionadded:: 0.9
Parameters
----------
nodes : list of ints, optional (default: ``None``)
Nodes whose attribute should be displayed.
name : str, optional (default: ``None``)
Name of the desired attribute.
Returns
-------
Dict containing all nodes attributes by default. If `nodes` is
specified, returns a ``dict`` containing only the attributes of these
nodes. If `name` is specified, returns a list containing the values of
the specific attribute for the required nodes (or all nodes if
unspecified).
See also
--------
:func:`~nngt.Graph.get_edge_attributes`,
:func:`~nngt.Graph.new_node_attribute`,
:func:`~nngt.Graph.set_node_attribute`,
:func:`~nngt.Graph.new_edge_attributes`,
:func:`~nngt.Graph.set_edge_attribute`
'''
res = None
if name is None:
res = {k: self._nattr[k] for k in self._nattr.keys()}
else:
res = self._nattr[name]
if nodes is None:
return res
if isinstance(nodes, (slice, int)) or nonstring_container(nodes):
if isinstance(res, dict):
return {k: v[nodes] for k, v in res.items()}
return res[nodes]
else:
raise ValueError("Invalid `nodes`: "
"{}, use slice, int, or list".format(nodes))
def get_attribute_type(self, attribute_name, attribute_class=None):
'''
Return the type of an attribute (e.g. string, double, int).
Parameters
----------
attribute_name : str
Name of the attribute.
attribute_class : str, optional (default: both)
Whether `attribute_name` is a "node" or an "edge" attribute.
Returns
-------
type : str
Type of the attribute.
'''
if attribute_class is None:
is_eattr = attribute_name in self._eattr
is_nattr = attribute_name in self._nattr
if is_eattr and is_nattr:
raise RuntimeError("Both edge and node attributes with name '"
+ attribute_name + "' exist, please "
"specify `attribute_class`")
elif is_eattr:
return self._eattr.value_type(attribute_name)
elif is_nattr:
return self._nattr.value_type(attribute_name)
else:
raise KeyError("No '{}' attribute.".format(attribute_name))
else:
if attribute_class == "edge":
return self._eattr.value_type(attribute_name)
elif attribute_class == "node":
return self._nattr.value_type(attribute_name)
else:
raise InvalidArgument(
"Unknown attribute class '{}'.".format(attribute_class))
def get_density(self):
'''
Density of the graph: :math:`\\frac{E}{N^2}`, where `E` is the number
of edges and `N` the number of nodes.
'''
return self.edge_nb() / self.node_nb()**2
def is_weighted(self):
''' Whether the edges have weights '''
return "weight" in self.edge_attributes
def is_directed(self):
''' Whether the graph is directed or not '''
return self._graph.is_directed()
def is_connected(self, mode="strong"):
'''
Return whether the graph is connected.
Parameters
----------
mode : str, optional (default: "strong")
Whether to test connectedness with directed ("strong") or
undirected ("weak") connections.
References
----------
.. [ig-connected] :igdoc:`is_connected`
'''
return super().is_connected()
def get_degrees(self, mode="total", nodes=None, weights=None,
edge_type="all"):
'''
Degree sequence of all the nodes.
.. versionchanged:: 2.0
Changed `deg_type` to `mode`, `node_list` to `nodes`, `use_weights`
to `weights`, and `edge_type` to `edge_type`.
Parameters
----------
mode : string, optional (default: "total")
Degree type (among 'in', 'out' or 'total').
nodes : list, optional (default: None)
List of the nodes which degree should be returned
weights : bool or str, optional (default: binary edges)
Whether edge weights should be considered; if ``None`` or ``False``
then use binary edges; if ``True``, uses the 'weight' edge
attribute, otherwise uses any valid edge attribute required.
edge_type : int or str, optional (default: all)
Restrict to a given synaptic type ("excitatory", 1, or
"inhibitory", -1), using either the "type" edge attribute for
non-:class:`~nngt.Network` or the
:py:attr:`~nngt.NeuralPop.inhibitory` nodes.
Returns
-------
degrees : :class:`numpy.array`
.. warning ::
When using MPI with "nngt" (distributed) backend, returns only the
degrees associated to local edges. "Complete" degrees are obtained
by taking the sum of the results on all MPI processes.
'''
mode = _set_degree_type(mode)
if edge_type == "all":
return super().get_degrees(
mode=mode, nodes=nodes, weights=weights)
elif edge_type in {"excitatory", 1}:
edge_type = 1
elif edge_type in {"inhibitory", -1}:
edge_type = -1
else:
raise InvalidArgument(
"Invalid edge type '{}'".format(edge_type))
degrees = np.zeros(self.node_nb())
if isinstance(self, nngt.Network):
neurons = []
for g in self.population.values():
if g.neuron_type == edge_type:
neurons.extend(g.ids)
if mode in {"in", "all"} or not self.is_directed():
degrees += self.adjacency_matrix(
weights=weights,
types=False)[neurons, :].sum(axis=0).A1
if mode in {"out", "all"} and self.is_directed():
degrees += self.adjacency_matrix(
weights=weights,
types=False)[neurons, :].sum(axis=1).A1
else:
edges = np.where(
self.get_edge_attributes(name="type") == edge_type)[0]
w = None
if weights is None:
w = np.ones(len(edges))
elif weights in self.edge_attributes:
w = self.edge_attributes[weights]
elif nonstring_container(weights):
w = np.array(weights)
else:
raise InvalidArgument(
"Invalid `weights` '{}'".format(weights))
# count in-degrees
if mode in {"in", "all"} or not self.is_directed():
np.add.at(degrees, edges[1], weights)
if mode in {"out", "all"} and self.is_directed():
np.add.at(degrees, edges[0], weights)
if nodes is None:
return degrees
return degrees[nodes]
def get_betweenness(self, btype="both", weights=None):
'''
Returns the normalized betweenness centrality of the nodes and edges.
Parameters
----------
g : :class:`~nngt.Graph`
Graph to analyze.
btype : str, optional (default 'both')
The centrality that should be returned (either 'node', 'edge', or
'both'). By default, both betweenness centralities are computed.
weights : bool or str, optional (default: binary edges)
Whether edge weights should be considered; if ``None`` or
``False`` then use binary edges; if ``True``, uses the 'weight'
edge attribute, otherwise uses any valid edge attribute required.
Returns
-------
nb : :class:`numpy.ndarray`
The nodes' betweenness if `btype` is 'node' or 'both'
eb : :class:`numpy.ndarray`
The edges' betweenness if `btype` is 'edge' or 'both'
See also
--------
:func:`~nngt.analysis.betweenness`
'''
from nngt.analysis import betweenness
return betweenness(self, btype=btype, weights=weights)
def get_edge_types(self, edges=None):
'''
Return the type of all or a subset of the edges.
.. versionchanged:: 1.0.1
Added the possibility to ask for a subset of edges.
Parameters
----------
edges : (E, 2) array, optional (default: all edges)
Edges for which the type should be returned.
Returns
-------
the list of types (1 for excitatory, -1 for inhibitory)
'''
if TYPE in self.edge_attributes:
return self.get_edge_attributes(name=TYPE, edges=edges)
else:
size = self.edge_nb() if edges is None else len(edges)
return np.ones(size)
def get_weights(self, edges=None):
'''
Returns the weights of all or a subset of the edges.
.. versionchanged:: 1.0.1
Added the possibility to ask for a subset of edges.
Parameters
----------
edges : (E, 2) array, optional (default: all edges)
Edges for which the type should be returned.
Returns
-------
the list of weights
'''
if self.is_weighted():
if edges is None:
return self._eattr["weight"]
else:
if len(edges) == 0:
return np.array([])
return np.asarray(self._eattr[edges]["weight"])
else:
size = self.edge_nb() if edges is None else len(edges)
return np.ones(size)
def get_delays(self, edges=None):
'''
Returns the delays of all or a subset of the edges.
.. versionchanged:: 1.0.1
Added the possibility to ask for a subset of edges.
Parameters
----------
edges : (E, 2) array, optional (default: all edges)
Edges for which the type should be returned.
Returns
-------
the list of delays
'''
if edges is None:
return self._eattr["delay"]
else:
return self._eattr[edges]["delay"]
def neighbours(self, node, mode="all"):
'''
Return the neighbours of `node`.
Parameters
----------
node : int
Index of the node of interest.
mode : string, optional (default: "all")
Type of neighbours that will be returned: "all" returns all the
neighbours regardless of directionality, "in" returns the
in-neighbours (also called predecessors) and "out" retruns the
out-neighbours (or successors).
Returns
-------
neighbours : set
The neighbours of `node`.
'''
return super().neighbours(node, mode=mode)
def is_spatial(self):
'''
Whether the graph is embedded in space (i.e. is a subclass of
:class:`~nngt.SpatialGraph`).
'''
return issubclass(self.__class__, nngt.SpatialGraph)
def is_network(self):
'''
Whether the graph is a subclass of :class:`~nngt.Network` (i.e. if it
has a :class:`~nngt.NeuralPop` attribute).
'''
return issubclass(self.__class__, nngt.Network)
#-------------------------------------------------------------------------#
# Setters
def set_name(self, name=""):
''' set graph name '''
if name != "":
self._name = name
else:
self._name = "Graph_" + str(self.__id)
def new_edge_attribute(self, name, value_type, values=None, val=None):
'''
Create a new attribute for the edges.
Parameters
----------
name : str
The name of the new attribute.
value_type : str
Type of the attribute, among 'int', 'double', 'string', or 'object'
values : array, optional (default: None)
Values with which the edge attribute should be initialized.
(must have one entry per node in the graph)
val : int, float or str , optional (default: None)
Identical value for all edges.
'''
assert name != "eid", "`eid` is a reserved internal edge-attribute."
self._eattr.new_attribute(
name, value_type, values=values, val=val)
def new_node_attribute(self, name, value_type, values=None, val=None):
'''
Create a new attribute for the nodes.
Parameters
----------
name : str
The name of the new attribute.
value_type : str
Type of the attribute, among 'int', 'double', 'string', or 'object'
values : array, optional (default: None)
Values with which the node attribute should be initialized.
(must have one entry per node in the graph)
val : int, float or str , optional (default: None)
Identical value for all nodes.
See also
--------
:func:`~nngt.Graph.new_edge_attribute`,
:func:`~nngt.Graph.set_node_attribute`,
:func:`~nngt.Graph.get_node_attributes`,
:func:`~nngt.Graph.set_edge_attribute`,
:func:`~nngt.Graph.get_edge_attributes`
'''
self._nattr.new_attribute(
name, value_type, values=values, val=val)
def set_edge_attribute(self, attribute, values=None, val=None,
value_type=None, edges=None):
'''
Set attributes to the connections between neurons.
.. warning ::
The special "type" attribute cannot be modified when using graphs
that inherit from the :class:`~nngt.Network` class. This is because
for biological networks, neurons make only one kind of synapse,
which is determined by the :class:`nngt.NeuralGroup` they
belong to.
Parameters
----------
attribute : str
The name of the attribute.
value_type : str
Type of the attribute, among 'int', 'double', 'string'
values : array, optional (default: None)
Values with which the edge attribute should be initialized.
(must have one entry per node in the graph)
val : int, float or str , optional (default: None)
Identical value for all edges.
value_type : str, optional (default: None)
Type of the attribute, among 'int', 'double', 'string'. Only used
if the attribute does not exist and must be created.
edges : list of edges or array of shape (E, 2), optional (default: all)
Edges whose attributes should be set. Others will remain unchanged.
See also
--------
:func:`~nngt.Graph.set_node_attribute`,
:func:`~nngt.Graph.get_edge_attributes`,
:func:`~nngt.Graph.new_edge_attribute`,
:func:`~nngt.Graph.new_node_attribute`,
:func:`~nngt.Graph.get_node_attributes`
'''
if attribute not in self.edge_attributes:
assert value_type is not None, "`value_type` is necessary for " +\
"new attributes."
self.new_edge_attribute(name=attribute, value_type=value_type,
values=values, val=val)
else:
num_edges = self.edge_nb() if edges is None else len(edges)
if values is None:
if val is not None:
values = [deepcopy(val) for _ in range(num_edges)]
else:
raise InvalidArgument("At least one of the `values` and "
"`val` arguments should not be ``None``.")
self._eattr.set_attribute(attribute, values, edges=edges)
def set_node_attribute(self, attribute, values=None, val=None,
value_type=None, nodes=None):
'''
Set attributes to the connections between neurons.
Parameters
----------
attribute : str
The name of the attribute.
value_type : str
Type of the attribute, among 'int', 'double', 'string'
values : array, optional (default: None)
Values with which the edge attribute should be initialized.
(must have one entry per node in the graph)
val : int, float or str , optional (default: None)
Identical value for all edges.
value_type : str, optional (default: None)
Type of the attribute, among 'int', 'double', 'string'. Only used
if the attribute does not exist and must be created.
nodes : list of nodes, optional (default: all)
Nodes whose attributes should be set. Others will remain unchanged.
See also
--------
:func:`~nngt.Graph.set_edge_attribute`,
:func:`~nngt.Graph.new_node_attribute`,
:func:`~nngt.Graph.get_node_attributes`,
:func:`~nngt.Graph.new_edge_attribute`,
:func:`~nngt.Graph.get_edge_attributes`,
'''
if attribute not in self.node_attributes:
assert value_type is not None, "`value_type` is necessary for " +\
"new attributes."
self.new_node_attribute(name=attribute, value_type=value_type,
values=values, val=val)
else:
num_nodes = self.node_nb() if nodes is None else len(nodes)
if values is None:
if val is not None:
values = [deepcopy(val) for _ in range(num_nodes)]
else:
raise InvalidArgument("At least one of the `values` and "
"`val` arguments should not be ``None``.")
self._nattr.set_attribute(attribute, values, nodes=nodes)
def set_weights(self, weight=None, elist=None, distribution=None,
parameters=None, noise_scale=None):
'''
Set the synaptic weights.
Parameters
----------
weight : float or class:`numpy.array`, optional (default: None)
Value or list of the weights (for user defined weights).
elist : class:`numpy.array`, optional (default: None)
List of the edges (for user defined weights).
distribution : class:`string`, optional (default: None)
Type of distribution (choose among "constant", "uniform",
"gaussian", "lognormal", "lin_corr", "log_corr").
parameters : dict, optional (default: {})
Dictionary containing the properties of the weight distribution.
Properties are as follow for the distributions
- 'constant': 'value'
- 'uniform': 'lower', 'upper'
- 'gaussian': 'avg', 'std'
- 'lognormal': 'position', 'scale'
noise_scale : class:`int`, optional (default: None)
Scale of the multiplicative Gaussian noise that should be applied
on the weights.
Note
----
If `distribution` and `parameters` are provided and the weights are set
for the whole graph (`elist` is None), then the distribution properties
will be kept as the new default for subsequent edges. That is, if new
edges are created without specifying their weights, then these new
weights will automatically be drawn from this previous distribution.
'''
if isinstance(weight, float):
size = self.edge_nb() if elist is None else len(elist)
self._w = {"distribution": "constant", "value": weight}
weight = np.repeat(weight, size)
elif not nonstring_container(weight) and weight is not None:
raise AttributeError("Invalid `weight` value: must be either "
"float, array-like or None.")
elif weight is not None:
self._w = {"distribution": "custom"}
elif None not in (distribution, parameters) and elist is None:
self._w = {"distribution": distribution}
self._w.update(parameters)
if distribution is None:
distribution = self._w.get("distribution", None)
if parameters is None:
parameters = self._w
Connections.weights(
self, elist=elist, wlist=weight, distribution=distribution,
parameters=parameters, noise_scale=noise_scale)
def set_types(self, edge_type, nodes=None, fraction=None):
'''
Set the synaptic/connection types.
.. versionchanged :: 2.0
Changed `syn_type` to `edge_type`.
.. warning ::
The special "type" attribute cannot be modified when using graphs
that inherit from the :class:`~nngt.Network` class. This is because
for biological networks, neurons make only one kind of synapse,
which is determined by the :class:`nngt.NeuralGroup` they
belong to.
Parameters
----------
edge_type : int, string, or array of ints
Type of the connection among 'excitatory' (also `1`) or
'inhibitory' (also `-1`).
nodes : int, float or list, optional (default: `None`)
If `nodes` is an int, number of nodes of the required type that
will be created in the graph (all connections from inhibitory nodes
are inhibitory); if it is a float, ratio of `edge_type` nodes in the
graph; if it is a list, ids of the `edge_type` nodes.
fraction : float, optional (default: `None`)
Fraction of the selected edges that will be set as `edge_type` (if
`nodes` is not `None`, it is the fraction of the specified nodes'
edges, otherwise it is the fraction of all edges in the graph).
Returns
-------
t_list : :class:`numpy.ndarray`
List of the types in an order that matches the `edges` attribute of
the graph.
'''
inhib_nodes = None
if nonstring_container(edge_type):
return Connections.types(self, values=edge_type)
elif edge_type in ('excitatory', 1):
if is_integer(nodes):
inhib_nodes = self.node_nb() - nodes
elif nonstring_container(nodes):
inhib_nodes = list(range(self.node_nb()))
nodes.sort()
for node in nodes[::-1]:
del inhib_nodes[node]
elif nodes is not None:
raise ValueError("`nodes` should be integer or array of ids.")
elif edge_type in ('inhibitory', -1):
if is_integer(nodes) or nonstring_container(nodes):
inhib_nodes = nodes
elif nodes is not None:
raise ValueError("`nodes` should be integer or array of ids.")
return Connections.types(self, inhib_nodes, fraction)
def set_delays(self, delay=None, elist=None, distribution=None,
parameters=None, noise_scale=None):
'''
Set the delay for spike propagation between neurons.
Parameters
----------
delay : float or class:`numpy.array`, optional (default: None)
Value or list of delays (for user defined delays).
elist : class:`numpy.array`, optional (default: None)
List of the edges (for user defined delays).
distribution : class:`string`, optional (default: None)
Type of distribution (choose among "constant", "uniform",
"gaussian", "lognormal", "lin_corr", "log_corr").
parameters : dict, optional (default: {})
Dictionary containing the properties of the delay distribution.
noise_scale : class:`int`, optional (default: None)
Scale of the multiplicative Gaussian noise that should be applied
on the delays.
'''
# check special cases and set self._d
if isinstance(delay, float):
size = self.edge_nb() if elist is None else len(elist)
self._d = {"distribution": "constant", "value": delay}
delay = np.repeat(delay, size)
elif not nonstring_container(delay) and delay is not None:
raise AttributeError("Invalid `delay` value: must be either "
"float, array-like or None")
elif delay is not None:
self._d = {"distribution": "custom"}
elif None not in (distribution, parameters):
self._d = {"distribution": distribution}
self._d.update(parameters)
if distribution is None:
if hasattr(self, "_d"):
distribution = self._d["distribution"]
else:
raise AttributeError(
"Invalid `distribution` value: cannot be None if "
"default delays were not set at graph creation.")
if parameters is None:
if hasattr(self, "_d"):
parameters = self._d
else:
raise AttributeError(
"Invalid `parameters` value: cannot be None if default"
" delays were not set at graph creation.")
return Connections.delays(
self, elist=elist, dlist=delay, distribution=distribution,
parameters=parameters, noise_scale=noise_scale)
| gpl-3.0 |
abhiQmar/servo | tests/wpt/web-platform-tests/tools/pytest/testing/test_pytester.py | 203 | 3498 | import pytest
import os
from _pytest.pytester import HookRecorder
from _pytest.config import PytestPluginManager
from _pytest.main import EXIT_OK, EXIT_TESTSFAILED
def test_make_hook_recorder(testdir):
item = testdir.getitem("def test_func(): pass")
recorder = testdir.make_hook_recorder(item.config.pluginmanager)
assert not recorder.getfailures()
pytest.xfail("internal reportrecorder tests need refactoring")
class rep:
excinfo = None
passed = False
failed = True
skipped = False
when = "call"
recorder.hook.pytest_runtest_logreport(report=rep)
failures = recorder.getfailures()
assert failures == [rep]
failures = recorder.getfailures()
assert failures == [rep]
class rep:
excinfo = None
passed = False
failed = False
skipped = True
when = "call"
rep.passed = False
rep.skipped = True
recorder.hook.pytest_runtest_logreport(report=rep)
modcol = testdir.getmodulecol("")
rep = modcol.config.hook.pytest_make_collect_report(collector=modcol)
rep.passed = False
rep.failed = True
rep.skipped = False
recorder.hook.pytest_collectreport(report=rep)
passed, skipped, failed = recorder.listoutcomes()
assert not passed and skipped and failed
numpassed, numskipped, numfailed = recorder.countoutcomes()
assert numpassed == 0
assert numskipped == 1
assert numfailed == 1
assert len(recorder.getfailedcollections()) == 1
recorder.unregister()
recorder.clear()
recorder.hook.pytest_runtest_logreport(report=rep)
pytest.raises(ValueError, "recorder.getfailures()")
def test_parseconfig(testdir):
config1 = testdir.parseconfig()
config2 = testdir.parseconfig()
assert config2 != config1
assert config1 != pytest.config
def test_testdir_runs_with_plugin(testdir):
testdir.makepyfile("""
pytest_plugins = "pytester"
def test_hello(testdir):
assert 1
""")
result = testdir.runpytest()
result.assert_outcomes(passed=1)
def make_holder():
class apiclass:
def pytest_xyz(self, arg):
"x"
def pytest_xyz_noarg(self):
"x"
apimod = type(os)('api')
def pytest_xyz(arg):
"x"
def pytest_xyz_noarg():
"x"
apimod.pytest_xyz = pytest_xyz
apimod.pytest_xyz_noarg = pytest_xyz_noarg
return apiclass, apimod
@pytest.mark.parametrize("holder", make_holder())
def test_hookrecorder_basic(holder):
pm = PytestPluginManager()
pm.addhooks(holder)
rec = HookRecorder(pm)
pm.hook.pytest_xyz(arg=123)
call = rec.popcall("pytest_xyz")
assert call.arg == 123
assert call._name == "pytest_xyz"
pytest.raises(pytest.fail.Exception, "rec.popcall('abc')")
pm.hook.pytest_xyz_noarg()
call = rec.popcall("pytest_xyz_noarg")
assert call._name == "pytest_xyz_noarg"
def test_makepyfile_unicode(testdir):
global unichr
try:
unichr(65)
except NameError:
unichr = chr
testdir.makepyfile(unichr(0xfffd))
def test_inline_run_clean_modules(testdir):
test_mod = testdir.makepyfile("def test_foo(): assert True")
result = testdir.inline_run(str(test_mod))
assert result.ret == EXIT_OK
# rewrite module, now test should fail if module was re-imported
test_mod.write("def test_foo(): assert False")
result2 = testdir.inline_run(str(test_mod))
assert result2.ret == EXIT_TESTSFAILED
| mpl-2.0 |
nkgilley/home-assistant | homeassistant/components/xs1/__init__.py | 8 | 2700 | """Support for the EZcontrol XS1 gateway."""
import asyncio
import logging
import voluptuous as vol
import xs1_api_client
from homeassistant.const import (
CONF_HOST,
CONF_PASSWORD,
CONF_PORT,
CONF_SSL,
CONF_USERNAME,
)
from homeassistant.helpers import discovery
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
DOMAIN = "xs1"
ACTUATORS = "actuators"
SENSORS = "sensors"
# define configuration parameters
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_PASSWORD): cv.string,
vol.Optional(CONF_PORT, default=80): cv.string,
vol.Optional(CONF_SSL, default=False): cv.boolean,
vol.Optional(CONF_USERNAME): cv.string,
}
)
},
extra=vol.ALLOW_EXTRA,
)
XS1_COMPONENTS = ["climate", "sensor", "switch"]
# Lock used to limit the amount of concurrent update requests
# as the XS1 Gateway can only handle a very
# small amount of concurrent requests
UPDATE_LOCK = asyncio.Lock()
def setup(hass, config):
"""Set up XS1 Component."""
_LOGGER.debug("Initializing XS1")
host = config[DOMAIN][CONF_HOST]
port = config[DOMAIN][CONF_PORT]
ssl = config[DOMAIN][CONF_SSL]
user = config[DOMAIN].get(CONF_USERNAME)
password = config[DOMAIN].get(CONF_PASSWORD)
# initialize XS1 API
try:
xs1 = xs1_api_client.XS1(
host=host, port=port, ssl=ssl, user=user, password=password
)
except ConnectionError as error:
_LOGGER.error(
"Failed to create XS1 API client because of a connection error: %s", error,
)
return False
_LOGGER.debug("Establishing connection to XS1 gateway and retrieving data...")
hass.data[DOMAIN] = {}
actuators = xs1.get_all_actuators(enabled=True)
sensors = xs1.get_all_sensors(enabled=True)
hass.data[DOMAIN][ACTUATORS] = actuators
hass.data[DOMAIN][SENSORS] = sensors
_LOGGER.debug("Loading components for XS1 platform...")
# Load components for supported devices
for component in XS1_COMPONENTS:
discovery.load_platform(hass, component, DOMAIN, {}, config)
return True
class XS1DeviceEntity(Entity):
"""Representation of a base XS1 device."""
def __init__(self, device):
"""Initialize the XS1 device."""
self.device = device
async def async_update(self):
"""Retrieve latest device state."""
async with UPDATE_LOCK:
await self.hass.async_add_executor_job(self.device.update)
| apache-2.0 |
xyzz/vcmi-build | project/jni/python/src/Lib/test/test___all__.py | 52 | 6085 | import unittest
from test.test_support import run_unittest
import sys
import warnings
class AllTest(unittest.TestCase):
def check_all(self, modname):
names = {}
with warnings.catch_warnings():
warnings.filterwarnings("ignore", ".* (module|package)",
DeprecationWarning)
try:
exec "import %s" % modname in names
except ImportError:
# Silent fail here seems the best route since some modules
# may not be available in all environments.
return
self.failUnless(hasattr(sys.modules[modname], "__all__"),
"%s has no __all__ attribute" % modname)
names = {}
exec "from %s import *" % modname in names
if "__builtins__" in names:
del names["__builtins__"]
keys = set(names)
all = set(sys.modules[modname].__all__)
self.assertEqual(keys, all)
def test_all(self):
if not sys.platform.startswith('java'):
# In case _socket fails to build, make this test fail more gracefully
# than an AttributeError somewhere deep in CGIHTTPServer.
import _socket
self.check_all("BaseHTTPServer")
self.check_all("Bastion")
self.check_all("CGIHTTPServer")
self.check_all("ConfigParser")
self.check_all("Cookie")
self.check_all("MimeWriter")
self.check_all("Queue")
self.check_all("SimpleHTTPServer")
self.check_all("SocketServer")
self.check_all("StringIO")
self.check_all("UserString")
self.check_all("aifc")
self.check_all("atexit")
self.check_all("audiodev")
self.check_all("base64")
self.check_all("bdb")
self.check_all("binhex")
self.check_all("calendar")
self.check_all("cgi")
self.check_all("cmd")
self.check_all("code")
self.check_all("codecs")
self.check_all("codeop")
self.check_all("colorsys")
self.check_all("commands")
self.check_all("compileall")
self.check_all("copy")
self.check_all("copy_reg")
self.check_all("csv")
self.check_all("dbhash")
self.check_all("decimal")
self.check_all("difflib")
self.check_all("dircache")
self.check_all("dis")
self.check_all("doctest")
self.check_all("dummy_thread")
self.check_all("dummy_threading")
self.check_all("filecmp")
self.check_all("fileinput")
self.check_all("fnmatch")
self.check_all("fpformat")
self.check_all("ftplib")
self.check_all("getopt")
self.check_all("getpass")
self.check_all("gettext")
self.check_all("glob")
self.check_all("gzip")
self.check_all("heapq")
self.check_all("htmllib")
self.check_all("httplib")
self.check_all("ihooks")
self.check_all("imaplib")
self.check_all("imghdr")
self.check_all("imputil")
self.check_all("keyword")
self.check_all("linecache")
self.check_all("locale")
self.check_all("macpath")
self.check_all("macurl2path")
self.check_all("mailbox")
self.check_all("mailcap")
self.check_all("mhlib")
self.check_all("mimetools")
self.check_all("mimetypes")
self.check_all("mimify")
self.check_all("multifile")
self.check_all("netrc")
self.check_all("nntplib")
self.check_all("ntpath")
self.check_all("opcode")
self.check_all("optparse")
self.check_all("os")
self.check_all("os2emxpath")
self.check_all("pdb")
self.check_all("pickle")
self.check_all("pickletools")
self.check_all("pipes")
self.check_all("popen2")
self.check_all("poplib")
self.check_all("posixpath")
self.check_all("pprint")
self.check_all("profile")
self.check_all("pstats")
self.check_all("pty")
self.check_all("py_compile")
self.check_all("pyclbr")
self.check_all("quopri")
self.check_all("random")
self.check_all("re")
self.check_all("repr")
self.check_all("rexec")
self.check_all("rfc822")
self.check_all("rlcompleter")
self.check_all("robotparser")
self.check_all("sched")
self.check_all("sets")
self.check_all("sgmllib")
self.check_all("shelve")
self.check_all("shlex")
self.check_all("shutil")
self.check_all("smtpd")
self.check_all("smtplib")
self.check_all("sndhdr")
self.check_all("socket")
self.check_all("_strptime")
self.check_all("symtable")
self.check_all("tabnanny")
self.check_all("tarfile")
self.check_all("telnetlib")
self.check_all("tempfile")
self.check_all("test.test_support")
self.check_all("textwrap")
self.check_all("threading")
self.check_all("timeit")
self.check_all("toaiff")
self.check_all("tokenize")
self.check_all("traceback")
self.check_all("tty")
self.check_all("unittest")
self.check_all("urllib")
self.check_all("urlparse")
self.check_all("uu")
self.check_all("warnings")
self.check_all("wave")
self.check_all("weakref")
self.check_all("webbrowser")
self.check_all("xdrlib")
self.check_all("zipfile")
# rlcompleter needs special consideration; it import readline which
# initializes GNU readline which calls setlocale(LC_CTYPE, "")... :-(
try:
self.check_all("rlcompleter")
finally:
try:
import locale
except ImportError:
pass
else:
locale.setlocale(locale.LC_CTYPE, 'C')
def test_main():
run_unittest(AllTest)
if __name__ == "__main__":
test_main()
| lgpl-2.1 |
vrettasm/VGPA | code/numerics/runge_kutta4.py | 1 | 6322 | import numpy as np
from .ode_solver import OdeSolver
class RungeKutta4(OdeSolver):
"""
Runge-Kutta (4th order) method of integration:
https://en.wikipedia.org/wiki/Runge%E2%80%93Kutta_methods
"""
def __init__(self, dt, single_dim):
"""
Default constructor.
:param dt: discrete time step.
:param single_dim: flags the ode as 1D or nD.
"""
# Call the constructor of the parent class.
super().__init__(dt, single_dim)
# _end_def_
def solve_fwd(self, lin_a, off_b, m0, s0, sigma):
"""
Runge-Kutta (4) integration method. This provides the actual solution.
:param lin_a: Linear variational parameters (dim_n x dim_d x dim_d).
:param off_b: Offset variational parameters (dim_n x dim_d).
:param m0: Initial marginal mean (dim_d x 1).
:param s0: Initial marginal variance (dim_d x dim_d).
:param sigma: System noise variance (dim_d x dim_d).
:return: 1) mt: posterior means values (dim_n x dim_d).
2) st: posterior variance values (dim_n x dim_d x dim_d).
"""
# Pre-allocate memory according to single_dim.
if self.single_dim:
# Number of discrete time points.
dim_n = off_b.size
# Return arrays.
mt = np.zeros(dim_n)
st = np.zeros(dim_n)
else:
# Get the dimensions.
dim_n, dim_d = off_b.shape
# Return arrays.
mt = np.zeros((dim_n, dim_d))
st = np.zeros((dim_n, dim_d, dim_d))
# _end_if_
# Initialize the first moments.
mt[0], st[0] = m0, s0
# Discrete time step.
dt = self.dt
# Local copies of auxiliary functions.
fun_mt = self.fun_mt
fun_st = self.fun_st
# Compute the midpoints at time 't + 0.5*dt'.
ak_mid = 0.5 * (lin_a[0:-1] + lin_a[1:])
bk_mid = 0.5 * (off_b[0:-1] + off_b[1:])
# Half step-size.
h = 0.5 * dt
# Run through all time points.
for k in range(dim_n - 1):
# Get the values at time 'tk'.
ak = lin_a[k]
bk = off_b[k]
# Marginal moments.
sk = st[k]
mk = mt[k]
# Get the midpoints at time 't + 0.5*dt'.
a_mid = ak_mid[k]
b_mid = bk_mid[k]
# Intermediate steps.
K1 = fun_mt(mk, ak, bk)
K2 = fun_mt((mk + h * K1), a_mid, b_mid)
K3 = fun_mt((mk + h * K2), a_mid, b_mid)
K4 = fun_mt((mk + dt * K3), lin_a[k + 1], off_b[k + 1])
# NEW "mean" point.
mt[k + 1] = mk + dt * (K1 + 2.0 * (K2 + K3) + K4) / 6.0
# Intermediate steps.
L1 = fun_st(sk, ak, sigma)
L2 = fun_st((sk + h * L1), a_mid, sigma)
L3 = fun_st((sk + h * L2), a_mid, sigma)
L4 = fun_st((sk + dt * L3), lin_a[k + 1], sigma)
# NEW "variance" point
st[k + 1] = sk + dt * (L1 + 2.0 * (L2 + L3) + L4) / 6.0
# _end_for_
# Marginal moments.
return mt, st
# _end_def_
def solve_bwd(self, lin_a, dEsde_dm, dEsde_ds, dEobs_dm, dEobs_ds):
"""
RK4 integration method. Provides the actual solution.
:param lin_a: Linear variational parameters (dim_n x dim_d x dim_d).
:param dEsde_dm: Derivative of Esde w.r.t. m(t), (dim_n x dim_d).
:param dEsde_ds: Derivative of Esde w.r.t. s(t), (dim_n x dim_d x dim_d).
:param dEobs_dm: Derivative of Eobs w.r.t. m(t), (dim_n x dim_d).
:param dEobs_ds: Derivative of Eobs w.r.t. s(t), (dim_n x dim_d x dim_d).
:return: 1) lam: Lagrange multipliers for the mean values (dim_n x dim_d),
2) psi: Lagrange multipliers for the var values (dim_n x dim_d x dim_d).
"""
# Pre-allocate memory according to single_dim.
if self.single_dim:
# Number of discrete points.
dim_n = dEsde_dm.size
# Return arrays.
lam = np.zeros(dim_n)
psi = np.zeros(dim_n)
else:
# Get the dimensions.
dim_n, dim_d = dEsde_dm.shape
# Return arrays.
lam = np.zeros((dim_n, dim_d))
psi = np.zeros((dim_n, dim_d, dim_d))
# _end_if_
# Discrete time step.
dt = self.dt
# Half step-size.
h = 0.5 * dt
# Local copies of auxiliary functions.
fun_lam = self.fun_lam
fun_psi = self.fun_psi
# Compute the midpoints at time 't + 0.5*dt'.
ak_mid = 0.5 * (lin_a[0:-1] + lin_a[1:])
dEmk_mid = 0.5 * (dEsde_dm[0:-1] + dEsde_dm[1:])
dEsk_mid = 0.5 * (dEsde_ds[0:-1] + dEsde_ds[1:])
# Correct dimensions, by adding a zero at the end.
ak_mid = np.array([*ak_mid, 0.0])
dEmk_mid = np.array([*dEmk_mid, 0.0])
dEsk_mid = np.array([*dEsk_mid, 0.0])
# Run through all time points.
for t in range(dim_n - 1, 0, -1):
# Get the values at time 't'.
at = lin_a[t]
lamt = lam[t]
psit = psi[t]
# Get the midpoints at time 't - 0.5*dt'.
ak = ak_mid[t-1]
dEmk = dEmk_mid[t-1]
dEsk = dEsk_mid[t-1]
# Lambda (backward) propagation: Intermediate steps.
K1 = fun_lam(dEsde_dm[t], at, lamt)
K2 = fun_lam(dEmk, ak, (lamt - h * K1))
K3 = fun_lam(dEmk, ak, (lamt - h * K2))
K4 = fun_lam(dEsde_dm[t - 1], lin_a[t - 1], (lamt - dt * K3))
# NEW "Lambda" point.
lam[t - 1] = lamt - dt * (K1 + 2.0 * (K2 + K3) + K4) / 6.0 + dEobs_dm[t - 1]
# Psi (backward) propagation: Intermediate steps.
L1 = fun_psi(dEsde_ds[t], at, psit)
L2 = fun_psi(dEsk, ak, (psit - h * L1))
L3 = fun_psi(dEsk, ak, (psit - h * L2))
L4 = fun_psi(dEsde_ds[t - 1], lin_a[t - 1], (psit - dt * L3))
# NEW "Psi" point.
psi[t - 1] = psit - dt * (L1 + 2.0 * (L2 + L3) + L4) / 6.0 + dEobs_ds[t - 1]
# _end_for_
# Lagrange multipliers.
return lam, psi
# _end_def_
# _end_class_
| gpl-3.0 |
patdaburu/mothergeo-py | tests/test_logging.py | 1 | 1665 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
from mothergeo.logging import loggable_class as loggable
class TestLoggable(unittest.TestCase):
"""
These test cases test the :py:func:`loggable_class` decorator method.
"""
def test_loggable_class_without_logger_name(self):
"""
This test creates a new loggable class without overriding the default logger's name, then creates a new
instance and verifies the logger's properties.
"""
# Define the test class.
@loggable()
class TestClass(object):
pass
# Create an instance of the test class.
test_obj = TestClass()
# Verify the test object has a logger.
self.assertTrue(hasattr(test_obj, 'logger'))
# Verify the test object logger's name meets the expected pattern.
self.assertTrue('{module}.{cls}'.format(module=__name__, cls=test_obj.__class__.__name__), test_obj.logger.name)
def test_loggable_class_with_logger_name(self):
"""
This test creates a new loggable class, overriding the default logger's name, then creates a new instance and
verifies the logger's properties.
"""
# Define the test class.
@loggable(logger_name='yabba.dabba.doo')
class TestClass(object):
pass
# Create an instance of the test class.
test_obj = TestClass()
# Verify the test object has a logger.
self.assertTrue(hasattr(test_obj, 'logger'))
# Verify the test object logger's name meets the value we supplied.
self.assertTrue('yabba.dabba.doo', test_obj.logger.name)
| gpl-2.0 |
beni55/networkx | networkx/algorithms/bipartite/basic.py | 12 | 6061 | # -*- coding: utf-8 -*-
"""
==========================
Bipartite Graph Algorithms
==========================
"""
# Copyright (C) 2013-2015 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
import networkx as nx
__author__ = """\n""".join(['Jordi Torrents <jtorrents@milnou.net>',
'Aric Hagberg <aric.hagberg@gmail.com>'])
__all__ = [ 'is_bipartite',
'is_bipartite_node_set',
'color',
'sets',
'density',
'degrees']
def color(G):
"""Returns a two-coloring of the graph.
Raises an exception if the graph is not bipartite.
Parameters
----------
G : NetworkX graph
Returns
-------
color : dictionary
A dictionary keyed by node with a 1 or 0 as data for each node color.
Raises
------
NetworkXError if the graph is not two-colorable.
Examples
--------
>>> from networkx.algorithms import bipartite
>>> G = nx.path_graph(4)
>>> c = bipartite.color(G)
>>> print(c)
{0: 1, 1: 0, 2: 1, 3: 0}
You can use this to set a node attribute indicating the biparite set:
>>> nx.set_node_attributes(G, 'bipartite', c)
>>> print(G.node[0]['bipartite'])
1
>>> print(G.node[1]['bipartite'])
0
"""
if G.is_directed():
import itertools
def neighbors(v):
return itertools.chain.from_iterable([G.predecessors(v),
G.successors(v)])
else:
neighbors=G.neighbors
color = {}
for n in G: # handle disconnected graphs
if n in color or len(G[n])==0: # skip isolates
continue
queue = [n]
color[n] = 1 # nodes seen with color (1 or 0)
while queue:
v = queue.pop()
c = 1 - color[v] # opposite color of node v
for w in neighbors(v):
if w in color:
if color[w] == color[v]:
raise nx.NetworkXError("Graph is not bipartite.")
else:
color[w] = c
queue.append(w)
# color isolates with 0
color.update(dict.fromkeys(nx.isolates(G),0))
return color
def is_bipartite(G):
""" Returns True if graph G is bipartite, False if not.
Parameters
----------
G : NetworkX graph
Examples
--------
>>> from networkx.algorithms import bipartite
>>> G = nx.path_graph(4)
>>> print(bipartite.is_bipartite(G))
True
See Also
--------
color, is_bipartite_node_set
"""
try:
color(G)
return True
except nx.NetworkXError:
return False
def is_bipartite_node_set(G,nodes):
"""Returns True if nodes and G/nodes are a bipartition of G.
Parameters
----------
G : NetworkX graph
nodes: list or container
Check if nodes are a one of a bipartite set.
Examples
--------
>>> from networkx.algorithms import bipartite
>>> G = nx.path_graph(4)
>>> X = set([1,3])
>>> bipartite.is_bipartite_node_set(G,X)
True
Notes
-----
For connected graphs the bipartite sets are unique. This function handles
disconnected graphs.
"""
S=set(nodes)
for CC in nx.connected_component_subgraphs(G):
X,Y=sets(CC)
if not ( (X.issubset(S) and Y.isdisjoint(S)) or
(Y.issubset(S) and X.isdisjoint(S)) ):
return False
return True
def sets(G):
"""Returns bipartite node sets of graph G.
Raises an exception if the graph is not bipartite.
Parameters
----------
G : NetworkX graph
Returns
-------
(X,Y) : two-tuple of sets
One set of nodes for each part of the bipartite graph.
Examples
--------
>>> from networkx.algorithms import bipartite
>>> G = nx.path_graph(4)
>>> X, Y = bipartite.sets(G)
>>> list(X)
[0, 2]
>>> list(Y)
[1, 3]
See Also
--------
color
"""
c = color(G)
X = set(n for n in c if c[n]) # c[n] == 1
Y = set(n for n in c if not c[n]) # c[n] == 0
return (X, Y)
def density(B, nodes):
"""Return density of bipartite graph B.
Parameters
----------
G : NetworkX graph
nodes: list or container
Nodes in one set of the bipartite graph.
Returns
-------
d : float
The bipartite density
Examples
--------
>>> from networkx.algorithms import bipartite
>>> G = nx.complete_bipartite_graph(3,2)
>>> X=set([0,1,2])
>>> bipartite.density(G,X)
1.0
>>> Y=set([3,4])
>>> bipartite.density(G,Y)
1.0
See Also
--------
color
"""
n=len(B)
m=nx.number_of_edges(B)
nb=len(nodes)
nt=n-nb
if m==0: # includes cases n==0 and n==1
d=0.0
else:
if B.is_directed():
d=m/(2.0*float(nb*nt))
else:
d= m/float(nb*nt)
return d
def degrees(B, nodes, weight=None):
"""Return the degrees of the two node sets in the bipartite graph B.
Parameters
----------
G : NetworkX graph
nodes: list or container
Nodes in one set of the bipartite graph.
weight : string or None, optional (default=None)
The edge attribute that holds the numerical value used as a weight.
If None, then each edge has weight 1.
The degree is the sum of the edge weights adjacent to the node.
Returns
-------
(degX,degY) : tuple of dictionaries
The degrees of the two bipartite sets as dictionaries keyed by node.
Examples
--------
>>> from networkx.algorithms import bipartite
>>> G = nx.complete_bipartite_graph(3,2)
>>> Y=set([3,4])
>>> degX,degY=bipartite.degrees(G,Y)
>>> dict(degX)
{0: 2, 1: 2, 2: 2}
See Also
--------
color, density
"""
bottom=set(nodes)
top=set(B)-bottom
return (B.degree(top,weight),B.degree(bottom,weight))
| bsd-3-clause |
gg7/sentry | src/sentry/search/solr/client.py | 13 | 9036 | # -*- coding: utf-8 -*-
"""
sentry.search.solr.client
~~~~~~~~~~~~~~~~~~~~~~~~~
A majority of the Solr client is heavily inspired by Pysolr:
https://github.com/toastdriven/pysolr
The main differences are we focus on Python 2, and we must remove the
dependency on the ``requests`` library.
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
import urllib3
try:
# Prefer lxml, if installed.
from lxml import etree as ET
except ImportError:
try:
from xml.etree import cElementTree as ET
except ImportError:
raise ImportError("No suitable ElementTree implementation was found.")
from urlparse import urljoin
from nydus.db.backends import BaseConnection
import six
# Using two-tuples to preserve order.
REPLACEMENTS = (
# Nuke nasty control characters.
('\x00', ''), # Start of heading
('\x01', ''), # Start of heading
('\x02', ''), # Start of text
('\x03', ''), # End of text
('\x04', ''), # End of transmission
('\x05', ''), # Enquiry
('\x06', ''), # Acknowledge
('\x07', ''), # Ring terminal bell
('\x08', ''), # Backspace
('\x0b', ''), # Vertical tab
('\x0c', ''), # Form feed
('\x0e', ''), # Shift out
('\x0f', ''), # Shift in
('\x10', ''), # Data link escape
('\x11', ''), # Device control 1
('\x12', ''), # Device control 2
('\x13', ''), # Device control 3
('\x14', ''), # Device control 4
('\x15', ''), # Negative acknowledge
('\x16', ''), # Synchronous idle
('\x17', ''), # End of transmission block
('\x18', ''), # Cancel
('\x19', ''), # End of medium
('\x1a', ''), # Substitute character
('\x1b', ''), # Escape
('\x1c', ''), # File separator
('\x1d', ''), # Group separator
('\x1e', ''), # Record separator
('\x1f', ''), # Unit separator
)
def sanitize(data):
if isinstance(data, six.text_type):
data = data.encode('utf-8')
for bad, good in REPLACEMENTS:
data = data.replace(bad, good)
return data.decode('utf-8')
def is_valid_xml_char_ordinal(i):
"""
Defines whether char is valid to use in xml document
XML standard defines a valid char as::
Char ::= #x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD] | [#x10000-#x10FFFF]
"""
return (
# conditions ordered by presumed frequency
0x20 <= i <= 0xD7FF
or i in (0x9, 0xA, 0xD)
or 0xE000 <= i <= 0xFFFD
or 0x10000 <= i <= 0x10FFFF
)
def clean_xml_string(s):
"""
Cleans string from invalid xml chars
Solution was found there::
http://stackoverflow.com/questions/8733233/filtering-out-certain-bytes-in-python
"""
return ''.join(c for c in s if is_valid_xml_char_ordinal(ord(c)))
class SolrError(Exception):
pass
class SolrClient(object):
"""
Inspired by Pysolr, but retrofitted to support a limited scope of features
and remove the ``requests`` dependency.
"""
def __init__(self, url, timeout=60):
self.url = url
self.timeout = timeout
self.http = urllib3.connection_from_url(self.url)
def _send_request(self, method, path='', body=None, headers=None):
url = urljoin(self.url, path.lstrip('/'))
method = method.lower()
if headers is None:
headers = {}
if not any(key.lower() == 'content-type' for key in headers.iterkeys()):
headers['Content-Type'] = 'application/xml; charset=UTF-8'
if isinstance(body, six.text_type):
body = body.encode('utf-8')
resp = self.http.urlopen(
method, url, body=body, headers=headers, timeout=self.timeout)
if resp.status != 200:
raise SolrError(self._extract_error(resp))
return resp
def _extract_error(self, response):
if not response.headers.get('content-type', '').startswith('application/xml'):
return six.text_type(response.status)
dom_tree = ET.fromstring(response.data)
reason_node = dom_tree.find('response/lst/str')
if reason_node is None:
return response.data
return reason_node.text
def _is_null_value(self, value):
if value is None:
return True
if isinstance(value, six.string_types) and len(value) == 0:
return True
return False
def _add_doc_field(self, doc, key, value):
if not isinstance(value, dict):
return self._add_doc_field(doc, key, {None: value})
# dict is expected to be something like
# {key: {'add': [value]}}
for action, action_value in value.iteritems():
# To avoid multiple code-paths we'd like to treat all of our values
# as iterables:
if isinstance(action_value, (list, tuple)):
action_value = action_value
else:
action_value = (action_value, )
for bit in action_value:
if self._is_null_value(bit):
continue
attrs = {
'name': key,
}
if action:
attrs['update'] = action
field = ET.Element('field', **attrs)
field.text = self._from_python(bit)
doc.append(field)
def _from_python(self, value):
"""
Converts python values to a form suitable for insertion into the xml
we send to solr.
"""
if hasattr(value, 'strftime'):
if hasattr(value, 'hour'):
value = u"%sZ" % value.isoformat()
else:
value = u"%sT00:00:00Z" % value.isoformat()
elif isinstance(value, bool):
if value:
value = u'true'
else:
value = u'false'
else:
if isinstance(value, str):
value = six.text_type(value, errors='replace')
value = u"{0}".format(value)
return clean_xml_string(value)
def _build_doc(self, doc):
doc_elem = ET.Element('doc')
for key, value in doc.items():
self._add_doc_field(doc_elem, key, value)
return doc_elem
def _update(self, message, commit=None, waitFlush=None, waitSearcher=None,
softCommit=None):
"""
Posts the given xml message to http://<self.url>/update and
returns the result.
Passing `sanitize` as False will prevent the message from being cleaned
of control characters (default True). This is done by default because
these characters would cause Solr to fail to parse the XML. Only pass
False if you're positive your data is clean.
"""
path = '/update'
# Per http://wiki.apache.org/solr/UpdateXmlMessages, we can append a
# ``commit=true`` to the URL and have the commit happen without a
# second request.
query_vars = []
if commit is not None:
query_vars.append('commit=%s' % str(bool(commit)).lower())
if waitFlush is not None:
query_vars.append('waitFlush=%s' % str(bool(waitFlush)).lower())
if waitSearcher is not None:
query_vars.append('waitSearcher=%s' % str(bool(waitSearcher)).lower())
if query_vars:
path = '%s?%s' % (path, '&'.join(query_vars))
# remove ctrl characters
message = sanitize(message)
return self._send_request('post', path, message, {
'Content-type': 'text/xml; charset=utf-8'
})
def add(self, docs, commit=None, commitWithin=None, waitFlush=None,
waitSearcher=None):
"""
>>> solr.add([
>>> {
>>> "id": "doc_1",
>>> "title": "A test document",
>>> },
>>> {
>>> "id": "doc_2",
>>> "title": "The Banana: Tasty or Dangerous?",
>>> "tags": {
>>> "add": ["foo", "bar"],
>>> },
>>> },
>>> ])
"""
message = ET.Element('add')
if commitWithin:
message.set('commitWithin', commitWithin)
for doc in docs:
message.append(self._build_doc(doc))
m = ET.tostring(message, encoding='utf-8')
return self._update(m, commit=commit, waitFlush=waitFlush,
waitSearcher=waitSearcher)
class Solr(BaseConnection):
retryable_exceptions = frozenset([urllib3.Timeout])
def __init__(self, num, url, timeout=60):
self.url = url
self.timeout = timeout
super(Solr, self).__init__(num)
@property
def identifier(self):
return 'solr+%(url)s' % vars(self)
def connect(self):
return SolrClient(self.url, timeout=self.timeout)
def disconnect(self):
pass
| bsd-3-clause |
tobetter/hardkernel-linux | tools/perf/scripts/python/sched-migration.py | 11215 | 11670 | #!/usr/bin/python
#
# Cpu task migration overview toy
#
# Copyright (C) 2010 Frederic Weisbecker <fweisbec@gmail.com>
#
# perf script event handlers have been generated by perf script -g python
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import os
import sys
from collections import defaultdict
from UserList import UserList
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
sys.path.append('scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from SchedGui import *
threads = { 0 : "idle"}
def thread_name(pid):
return "%s:%d" % (threads[pid], pid)
class RunqueueEventUnknown:
@staticmethod
def color():
return None
def __repr__(self):
return "unknown"
class RunqueueEventSleep:
@staticmethod
def color():
return (0, 0, 0xff)
def __init__(self, sleeper):
self.sleeper = sleeper
def __repr__(self):
return "%s gone to sleep" % thread_name(self.sleeper)
class RunqueueEventWakeup:
@staticmethod
def color():
return (0xff, 0xff, 0)
def __init__(self, wakee):
self.wakee = wakee
def __repr__(self):
return "%s woke up" % thread_name(self.wakee)
class RunqueueEventFork:
@staticmethod
def color():
return (0, 0xff, 0)
def __init__(self, child):
self.child = child
def __repr__(self):
return "new forked task %s" % thread_name(self.child)
class RunqueueMigrateIn:
@staticmethod
def color():
return (0, 0xf0, 0xff)
def __init__(self, new):
self.new = new
def __repr__(self):
return "task migrated in %s" % thread_name(self.new)
class RunqueueMigrateOut:
@staticmethod
def color():
return (0xff, 0, 0xff)
def __init__(self, old):
self.old = old
def __repr__(self):
return "task migrated out %s" % thread_name(self.old)
class RunqueueSnapshot:
def __init__(self, tasks = [0], event = RunqueueEventUnknown()):
self.tasks = tuple(tasks)
self.event = event
def sched_switch(self, prev, prev_state, next):
event = RunqueueEventUnknown()
if taskState(prev_state) == "R" and next in self.tasks \
and prev in self.tasks:
return self
if taskState(prev_state) != "R":
event = RunqueueEventSleep(prev)
next_tasks = list(self.tasks[:])
if prev in self.tasks:
if taskState(prev_state) != "R":
next_tasks.remove(prev)
elif taskState(prev_state) == "R":
next_tasks.append(prev)
if next not in next_tasks:
next_tasks.append(next)
return RunqueueSnapshot(next_tasks, event)
def migrate_out(self, old):
if old not in self.tasks:
return self
next_tasks = [task for task in self.tasks if task != old]
return RunqueueSnapshot(next_tasks, RunqueueMigrateOut(old))
def __migrate_in(self, new, event):
if new in self.tasks:
self.event = event
return self
next_tasks = self.tasks[:] + tuple([new])
return RunqueueSnapshot(next_tasks, event)
def migrate_in(self, new):
return self.__migrate_in(new, RunqueueMigrateIn(new))
def wake_up(self, new):
return self.__migrate_in(new, RunqueueEventWakeup(new))
def wake_up_new(self, new):
return self.__migrate_in(new, RunqueueEventFork(new))
def load(self):
""" Provide the number of tasks on the runqueue.
Don't count idle"""
return len(self.tasks) - 1
def __repr__(self):
ret = self.tasks.__repr__()
ret += self.origin_tostring()
return ret
class TimeSlice:
def __init__(self, start, prev):
self.start = start
self.prev = prev
self.end = start
# cpus that triggered the event
self.event_cpus = []
if prev is not None:
self.total_load = prev.total_load
self.rqs = prev.rqs.copy()
else:
self.rqs = defaultdict(RunqueueSnapshot)
self.total_load = 0
def __update_total_load(self, old_rq, new_rq):
diff = new_rq.load() - old_rq.load()
self.total_load += diff
def sched_switch(self, ts_list, prev, prev_state, next, cpu):
old_rq = self.prev.rqs[cpu]
new_rq = old_rq.sched_switch(prev, prev_state, next)
if old_rq is new_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def migrate(self, ts_list, new, old_cpu, new_cpu):
if old_cpu == new_cpu:
return
old_rq = self.prev.rqs[old_cpu]
out_rq = old_rq.migrate_out(new)
self.rqs[old_cpu] = out_rq
self.__update_total_load(old_rq, out_rq)
new_rq = self.prev.rqs[new_cpu]
in_rq = new_rq.migrate_in(new)
self.rqs[new_cpu] = in_rq
self.__update_total_load(new_rq, in_rq)
ts_list.append(self)
if old_rq is not out_rq:
self.event_cpus.append(old_cpu)
self.event_cpus.append(new_cpu)
def wake_up(self, ts_list, pid, cpu, fork):
old_rq = self.prev.rqs[cpu]
if fork:
new_rq = old_rq.wake_up_new(pid)
else:
new_rq = old_rq.wake_up(pid)
if new_rq is old_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def next(self, t):
self.end = t
return TimeSlice(t, self)
class TimeSliceList(UserList):
def __init__(self, arg = []):
self.data = arg
def get_time_slice(self, ts):
if len(self.data) == 0:
slice = TimeSlice(ts, TimeSlice(-1, None))
else:
slice = self.data[-1].next(ts)
return slice
def find_time_slice(self, ts):
start = 0
end = len(self.data)
found = -1
searching = True
while searching:
if start == end or start == end - 1:
searching = False
i = (end + start) / 2
if self.data[i].start <= ts and self.data[i].end >= ts:
found = i
end = i
continue
if self.data[i].end < ts:
start = i
elif self.data[i].start > ts:
end = i
return found
def set_root_win(self, win):
self.root_win = win
def mouse_down(self, cpu, t):
idx = self.find_time_slice(t)
if idx == -1:
return
ts = self[idx]
rq = ts.rqs[cpu]
raw = "CPU: %d\n" % cpu
raw += "Last event : %s\n" % rq.event.__repr__()
raw += "Timestamp : %d.%06d\n" % (ts.start / (10 ** 9), (ts.start % (10 ** 9)) / 1000)
raw += "Duration : %6d us\n" % ((ts.end - ts.start) / (10 ** 6))
raw += "Load = %d\n" % rq.load()
for t in rq.tasks:
raw += "%s \n" % thread_name(t)
self.root_win.update_summary(raw)
def update_rectangle_cpu(self, slice, cpu):
rq = slice.rqs[cpu]
if slice.total_load != 0:
load_rate = rq.load() / float(slice.total_load)
else:
load_rate = 0
red_power = int(0xff - (0xff * load_rate))
color = (0xff, red_power, red_power)
top_color = None
if cpu in slice.event_cpus:
top_color = rq.event.color()
self.root_win.paint_rectangle_zone(cpu, color, top_color, slice.start, slice.end)
def fill_zone(self, start, end):
i = self.find_time_slice(start)
if i == -1:
return
for i in xrange(i, len(self.data)):
timeslice = self.data[i]
if timeslice.start > end:
return
for cpu in timeslice.rqs:
self.update_rectangle_cpu(timeslice, cpu)
def interval(self):
if len(self.data) == 0:
return (0, 0)
return (self.data[0].start, self.data[-1].end)
def nr_rectangles(self):
last_ts = self.data[-1]
max_cpu = 0
for cpu in last_ts.rqs:
if cpu > max_cpu:
max_cpu = cpu
return max_cpu
class SchedEventProxy:
def __init__(self):
self.current_tsk = defaultdict(lambda : -1)
self.timeslices = TimeSliceList()
def sched_switch(self, headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
""" Ensure the task we sched out this cpu is really the one
we logged. Otherwise we may have missed traces """
on_cpu_task = self.current_tsk[headers.cpu]
if on_cpu_task != -1 and on_cpu_task != prev_pid:
print "Sched switch event rejected ts: %s cpu: %d prev: %s(%d) next: %s(%d)" % \
(headers.ts_format(), headers.cpu, prev_comm, prev_pid, next_comm, next_pid)
threads[prev_pid] = prev_comm
threads[next_pid] = next_comm
self.current_tsk[headers.cpu] = next_pid
ts = self.timeslices.get_time_slice(headers.ts())
ts.sched_switch(self.timeslices, prev_pid, prev_state, next_pid, headers.cpu)
def migrate(self, headers, pid, prio, orig_cpu, dest_cpu):
ts = self.timeslices.get_time_slice(headers.ts())
ts.migrate(self.timeslices, pid, orig_cpu, dest_cpu)
def wake_up(self, headers, comm, pid, success, target_cpu, fork):
if success == 0:
return
ts = self.timeslices.get_time_slice(headers.ts())
ts.wake_up(self.timeslices, pid, target_cpu, fork)
def trace_begin():
global parser
parser = SchedEventProxy()
def trace_end():
app = wx.App(False)
timeslices = parser.timeslices
frame = RootFrame(timeslices, "Migration")
app.MainLoop()
def sched__sched_stat_runtime(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, runtime, vruntime):
pass
def sched__sched_stat_iowait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_sleep(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_process_fork(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
parent_comm, parent_pid, child_comm, child_pid):
pass
def sched__sched_process_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_free(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_migrate_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, orig_cpu,
dest_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.migrate(headers, pid, prio, orig_cpu, dest_cpu)
def sched__sched_switch(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.sched_switch(headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio)
def sched__sched_wakeup_new(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 1)
def sched__sched_wakeup(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 0)
def sched__sched_wait_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_kthread_stop_ret(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
ret):
pass
def sched__sched_kthread_stop(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid):
pass
def trace_unhandled(event_name, context, common_cpu, common_secs, common_nsecs,
common_pid, common_comm):
pass
| gpl-2.0 |
M157q/pelican-plugins | goodreads_activity/goodreads_activity.py | 76 | 1871 | # -*- coding: utf-8 -*-
"""
Goodreads Activity
==================
A Pelican plugin to lists books from your Goodreads shelves.
Copyright (c) Talha Mansoor
"""
from __future__ import unicode_literals
import logging
logger = logging.getLogger(__name__)
from pelican import signals
class GoodreadsActivity():
def __init__(self, generator):
import feedparser
self.activities = feedparser.parse(
generator.settings['GOODREADS_ACTIVITY_FEED'])
def fetch(self):
goodreads_activity = {
'shelf_title': self.activities.feed.title,
'books': []
}
for entry in self.activities['entries']:
book = {
'title': entry.title,
'author': entry.author_name,
'link': entry.link,
'l_cover': entry.book_large_image_url,
'm_cover': entry.book_medium_image_url,
's_cover': entry.book_small_image_url,
'description': entry.book_description,
'rating': entry.user_rating,
'review': entry.user_review,
'tags': entry.user_shelves
}
goodreads_activity['books'].append(book)
return goodreads_activity
def fetch_goodreads_activity(gen, metadata):
if 'GOODREADS_ACTIVITY_FEED' in gen.settings:
gen.context['goodreads_activity'] = gen.goodreads.fetch()
def initialize_feedparser(generator):
generator.goodreads = GoodreadsActivity(generator)
def register():
try:
signals.article_generator_init.connect(initialize_feedparser)
signals.article_generator_context.connect(fetch_goodreads_activity)
except ImportError:
logger.warning('`goodreads_activity` failed to load dependency `feedparser`.'
'`goodreads_activity` plugin not loaded.')
| agpl-3.0 |
codenote/chromium-test | chrome/tools/process_dumps/process_dumps_linux.py | 53 | 9548 | #!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A tool to collect crash signatures for Chrome builds on Linux."""
import fnmatch
import optparse
import os
import shutil
import subprocess
import struct
import sys
import tempfile
def VerifySymbolAndCopyToTempDir(symbol_file, temp_dir, sym_module_name):
"""Verify the symbol file looks correct and copy it to the right place
in temp_dir.
Args:
symbol_file: the path to the symbol file.
temp_dir: the base of the temp directory where the symbol file will reside.
Returns:
True on success.
"""
symbol = open(symbol_file)
signature_line = symbol.readline().strip().split()
symbol.close()
# signature_line should look like:
# MODULE Linux x86 28D8A79A426807B5462CBA24F56746750 chrome
if (len(signature_line) == 5 and signature_line[0] == 'MODULE' and
signature_line[1] == 'Linux' and signature_line[4] == sym_module_name and
len(signature_line[3]) == 33):
dest = os.path.join(temp_dir, signature_line[4], signature_line[3])
os.makedirs(dest)
dest_file = os.path.join(dest, '%s.sym' % signature_line[4])
shutil.copyfile(symbol_file, dest_file)
return True
return False
def GetCommandOutput(command):
"""Runs the command list, returning its output.
Prints the given command (which should be a list of one or more strings),
then runs it and returns its output (stdout and stderr) as a string.
If the command exits with an error, raises OSError.
From chromium_utils.
"""
proc = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, bufsize=1)
output = proc.communicate()[0]
if proc.returncode:
raise OSError('%s: %s' % (subprocess.list2cmdline(command), output))
return output
def GetCrashDumpDir():
"""Returns the default crash dump directory used by Chromium."""
config_home = os.environ.get('XDG_CONFIG_HOME')
if not config_home:
home = os.path.expanduser('~')
if not home:
return ''
config_home = os.path.join(home, '.config')
return os.path.join(config_home, 'chromium', 'Crash Reports')
def GetStackTrace(processor_bin, symbol_path, dump_file):
"""Gets and prints the stack trace from a crash dump file.
Args:
processor_bin: the path to the processor.
symbol_path: root dir for the symbols.
dump_file: the path to the dump file.
Returns:
A string representing the stack trace.
"""
# Run processor to analyze crash dump.
cmd = [processor_bin, '-m', dump_file, symbol_path]
try:
output = GetCommandOutput(cmd)
except OSError:
return 'Cannot get stack trace.'
# Retrieve stack trace from processor output. Processor output looks like:
# ----------------
# Debug output
# ...
# Debug output
# Module ...
# ...
# Module ...
#
# N|... <--+
# ... |--- crashed thread stack trace
# N|... <--+
# M|...
# ...
# ----------------
# where each line of the stack trace looks like:
# ThreadNumber|FrameNumber|ExeName|Function|SourceFile|LineNo|Offset
stack_trace_frames = []
idx = output.find('\nModule')
if idx >= 0:
output = output[idx+1:]
idx = output.find('\n\n')
if idx >= 0:
output = output[idx+2:].splitlines()
if output:
first_line = output[0].split('|')
if first_line:
crashed_thread = first_line[0]
for line in output:
line_split = line.split('|')
if not line_split:
break
if line_split[0] != crashed_thread:
break
stack_trace_frames.append(line_split)
if not stack_trace_frames:
return 'Cannot get stack trace.'
stack_trace = []
for frame in stack_trace_frames:
if len(frame) != 7:
continue
(exe, func, source, line, offset) = frame[2:]
if not exe or not source or not line or not offset:
continue
idx = func.find('(')
if idx >= 0:
func = func[:idx]
if not func:
continue
frame_output = '%s!%s+%s [%s @ %s]' % (exe, func, offset, source, line)
stack_trace.append(frame_output)
return '\n'.join(stack_trace)
def LocateFiles(pattern, root=os.curdir):
"""Yields files matching pattern found in root and its subdirectories.
An exception is thrown if root doesn't exist.
From chromium_utils."""
root = os.path.expanduser(root)
for path, dirs, files in os.walk(os.path.abspath(root)):
for filename in fnmatch.filter(files, pattern):
yield os.path.join(path, filename)
def ProcessDump(dump_file, temp_dir):
"""Extracts the part of the dump file that minidump_stackwalk can read.
Args:
dump_file: the dump file that needs to be processed.
temp_dir: the temp directory to put the dump file in.
Returns:
path of the processed dump file.
"""
dump = open(dump_file, 'rb')
dump_data = dump.read()
dump.close()
idx = dump_data.find('MDMP')
if idx < 0:
return ''
dump_data = dump_data[idx:]
if not dump_data:
return ''
(dump_fd, dump_name) = tempfile.mkstemp(suffix='chromedump', dir=temp_dir)
os.write(dump_fd, dump_data)
os.close(dump_fd)
return dump_name
def main_linux(options, args):
# minidump_stackwalk is part of Google Breakpad. You may need to checkout
# the code and build your own copy. http://google-breakpad.googlecode.com/
LINUX_PROCESSOR = 'minidump_stackwalk'
processor_bin = None
if options.processor_dir:
bin = os.path.join(os.path.expanduser(options.processor_dir),
LINUX_PROCESSOR)
if os.access(bin, os.X_OK):
processor_bin = bin
else:
for path in os.environ['PATH'].split(':'):
bin = os.path.join(path, LINUX_PROCESSOR)
if os.access(bin, os.X_OK):
processor_bin = bin
break
if not processor_bin:
print 'Cannot find minidump_stackwalk.'
return 1
if options.symbol_filename:
symbol_file = options.symbol_filename
else:
if options.architecture:
bits = options.architecture
else:
bits = struct.calcsize('P') * 8
if bits == 32:
symbol_file = 'chrome.breakpad.ia32'
elif bits == 64:
symbol_file = 'chrome.breakpad.x64'
else:
print 'Unknown architecture'
return 1
symbol_dir = options.symbol_dir
if not options.symbol_dir:
symbol_dir = os.curdir
symbol_dir = os.path.abspath(os.path.expanduser(symbol_dir))
symbol_file = os.path.join(symbol_dir, symbol_file)
if not os.path.exists(symbol_file):
print 'Cannot find symbols.'
return 1
symbol_time = os.path.getmtime(symbol_file)
dump_files = []
if options.dump_file:
dump_files.append(options.dump_file)
else:
dump_dir = options.dump_dir
if not dump_dir:
dump_dir = GetCrashDumpDir()
if not dump_dir:
print 'Cannot find dump files.'
return 1
for dump_file in LocateFiles(pattern='*.dmp', root=dump_dir):
file_time = os.path.getmtime(dump_file)
if file_time < symbol_time:
# Ignore dumps older than symbol file.
continue
dump_files.append(dump_file)
temp_dir = tempfile.mkdtemp(suffix='chromedump')
if not VerifySymbolAndCopyToTempDir(symbol_file, temp_dir,
options.sym_module_name):
print 'Cannot parse symbols.'
shutil.rmtree(temp_dir)
return 1
dump_count = 0
for dump_file in dump_files:
processed_dump_file = ProcessDump(dump_file, temp_dir)
if not processed_dump_file:
continue
print '-------------------------'
print GetStackTrace(processor_bin, temp_dir, processed_dump_file)
print
os.remove(processed_dump_file)
dump_count += 1
shutil.rmtree(temp_dir)
print '%s dumps found' % dump_count
return 0
def main():
if not sys.platform.startswith('linux'):
return 1
parser = optparse.OptionParser()
parser.add_option('', '--processor-dir', type='string', default='',
help='The directory where the processor is installed. '
'The processor is used to get stack trace from dumps. '
'Searches $PATH by default')
parser.add_option('', '--dump-file', type='string', default='',
help='The path of the dump file to be processed. '
'Overwrites dump-path.')
parser.add_option('', '--dump-dir', type='string', default='',
help='The directory where dump files are stored. '
'Searches this directory if dump-file is not '
'specified. Default is the Chromium crash directory.')
parser.add_option('', '--symbol-dir', default='',
help='The directory with the symbols file. [Required]')
parser.add_option('', '--symbol-filename', default='',
help='The name of the symbols file to use. '
'This argument overrides --architecture.')
parser.add_option('', '--architecture', type='int', default=None,
help='Override automatic x86/x86-64 detection. '
'Valid values are 32 and 64')
parser.add_option('', '--sym-module-name', type='string', default='chrome',
help='The module name for the symbol file. '
'Default: chrome')
(options, args) = parser.parse_args()
return main_linux(options, args)
if '__main__' == __name__:
sys.exit(main())
| bsd-3-clause |
broadinstitute/ebola-predictor | apps/ebolacare/utils.py | 1 | 3618 | """
Utility functions used in the neural network.
@copyright: The Broad Institute of MIT and Harvard 2015
"""
import numpy as np
"""Formats the vector theta containing the neural net coefficients into matrix form
"""
def linear_index(mat_idx, N, L, S, K):
l = mat_idx[0] # layer
n = mat_idx[1] # node
i = mat_idx[2] # input
if l < 1:
return n * N + i
elif l < L - 1:
return (S - 1) * N + (l - 1) * (S - 1) * S + n * S + i
else:
return (S - 1) * N + (L - 2) * (S - 1) * S + n * S + i
"""Formats the vector theta containing the neural net coefficients into matrix form
"""
def thetaMatrix(theta, N, L, S, K):
# The cost argument is a 1D-array that needs to be reshaped into the
# parameter matrix for each layer:
thetam = [None] * L
C = (S - 1) * N
thetam[0] = theta[0 : C].reshape((S - 1, N))
for l in range(1, L - 1):
thetam[l] = theta[C : C + (S - 1) * S].reshape((S - 1, S))
C = C + (S - 1) * S
thetam[L - 1] = theta[C : C + K * S].reshape((K, S))
return thetam
"""Converts the gradient matrix into array form
"""
def gradientArray(gmatrix, N, L, S, K):
garray = np.zeros((S - 1) * N + (L - 2) * (S - 1) * S + K * S)
C0 = (S - 1) * N
# http://docs.scipy.org/doc/numpy/reference/generated/numpy.copyto.html
np.copyto(garray[0 : C0], gmatrix[0].reshape(C0))
C = C0
for l in range(1, L - 1):
Ch = (S - 1) * S
np.copyto(garray[C : C + Ch], gmatrix[l].reshape(Ch))
C = C + Ch
Ck = K * S
np.copyto(garray[C : C + Ck], gmatrix[L - 1].reshape(Ck))
return garray
"""Evaluates the sigmoid function
"""
def sigmoid(v):
return 1 / (1 + np.exp(-v))
"""Performs forward propagation
"""
def forwardProp(x, thetam, L):
a = [None] * (L + 1)
a[0] = x
for l in range(0, L):
z = np.dot(thetam[l], a[l])
res = sigmoid(z)
a[l + 1] = np.insert(res, 0, 1) if l < L - 1 else res
return a
"""Performs backward propagation
"""
def backwardProp(y, a, thetam, L, N):
err = [None] * (L + 1)
err[L] = a[L] - y
for l in range(L - 1, 0, -1):
backp = np.dot(np.transpose(thetam[l]), err[l + 1])
deriv = np.multiply(a[l], 1 - a[l])
err[l] = np.delete(np.multiply(backp, deriv), 0)
err[0] = np.zeros(N);
return err
"""Computes a prediction (in the form of probabilities) for the given data vector
"""
def predict(x, theta, N, L, S, K):
thetam = thetaMatrix(theta, N, L, S, K)
a = forwardProp(x, thetam, L)
h = a[L]
return h;
"""Return a function that gives a prediction from a design matrix row
"""
def gen_predictor(params_filename="./data/nnet-params"):
with open(params_filename, "rb") as pfile:
i = 0
for line in pfile.readlines():
[name, value] = line.strip().split(":")
if i == 0:
N = int(value.strip()) + 1
elif i == 1:
L = int(value.strip()) + 1
elif i == 2:
S = int(value.strip()) + 1
elif i == 3:
K = int(value.strip())
R = (S - 1) * N + (L - 2) * (S - 1) * S + K * S
theta = np.ones(R)
else:
idx = [int(s.strip().split(" ")[1]) for s in name.split(",")]
n = linear_index(idx, N, L, S, K)
theta[n] = float(value.strip())
i = i + 1
def predictor(X):
scores = []
for i in range(0, len(X)):
scores.extend(predict(X[i,:], theta, N, L, S, K))
return scores
return predictor
| bsd-2-clause |
tomcounsell/Cobra | apps/seller/models/photo.py | 2 | 1222 | from django.db import models
from apps.seller.models.product import Product
from settings import CLOUDINARY
class Photo(models.Model): #exclusively product photos.
from settings import MEDIA_URL
product = models.ForeignKey(Product, related_name="photos")
rank = models.SmallIntegerField(null=True)
is_progress = models.BooleanField(default=False)#of incomplete product or commission
original = models.URLField(max_length=200)
#update history
created_at = models.DateTimeField(auto_now_add = True)
updated_at = models.DateTimeField(auto_now = True)
def __unicode__(self):
return unicode(self.original).replace(CLOUDINARY['download_url'],'')
@property
def thumb_size(self):
return u'%s' % self.original.replace("upload", "upload/c_fill,g_center,h_281,q_85,w_375")
@property
def pinky_size(self):
return u'%s' % self.original.replace("upload", "upload/c_fill,g_center,h_75,q_70,w_100")
@property
def product_size(self):
return u'%s' % self.original.replace("upload", "upload/c_pad,h_600,q_70,w_800")
class Meta:
unique_together = ('product', 'rank')
ordering = ['product','rank',]
# MODEL PROPERTIES
# MODEL FUNCTIONS
| gpl-2.0 |
chrisenuf/fullerite | src/diamond/collectors/flume/test/testflume.py | 34 | 3344 | #!/usr/bin/python
# coding=utf-8
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from mock import patch
from mock import Mock
from diamond.collector import Collector
from flume import FlumeCollector
class TestFlumeCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('FlumeCollector', {
'interval': 10
})
self.collector = FlumeCollector(config, None)
def test_import(self):
self.assertTrue(FlumeCollector)
@patch.object(Collector, 'publish')
@patch.object(Collector, 'publish_gauge')
@patch.object(Collector, 'publish_counter')
def test_collect_should_work(self,
publish_mock,
publish_gauge_mock,
publish_counter_mock):
patch_urlopen = patch('urllib2.urlopen',
Mock(return_value=self.getFixture('metrics')))
patch_urlopen.start()
self.collector.collect()
patch_urlopen.stop()
metrics = {
'CHANNEL.channel1.ChannelFillPercentage': 0.0,
'CHANNEL.channel1.EventPutAttempt': 50272828,
'CHANNEL.channel1.EventPutSuccess': 50255318,
'CHANNEL.channel1.EventTakeAttempt': 50409933,
'CHANNEL.channel1.EventTakeSuccess': 50255318,
'SINK.sink1.BatchComplete': 251705,
'SINK.sink1.BatchEmpty': 76250,
'SINK.sink1.BatchUnderflow': 379,
'SINK.sink1.ConnectionClosed': 6,
'SINK.sink1.ConnectionCreated': 7,
'SINK.sink1.ConnectionFailed': 0,
'SINK.sink1.EventDrainAttempt': 25190171,
'SINK.sink1.EventDrainSuccess': 25189571,
'SOURCE.source1.AppendAccepted': 0,
'SOURCE.source1.AppendBatchAccepted': 56227,
'SOURCE.source1.AppendBatchReceived': 56258,
'SOURCE.source1.AppendReceived': 0,
'SOURCE.source1.EventAccepted': 50282681,
'SOURCE.source1.EventReceived': 50311681,
'SOURCE.source1.OpenConnection': 0
}
self.setDocExample(collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
self.assertPublishedMany([publish_mock,
publish_gauge_mock,
publish_counter_mock
], metrics)
@patch.object(Collector, 'publish')
def test_blank_should_fail_gracefully(self, publish_mock):
patch_urlopen = patch('urllib2.urlopen', Mock(
return_value=self.getFixture('metrics_blank')))
patch_urlopen.start()
self.collector.collect()
patch_urlopen.stop()
self.assertPublishedMany(publish_mock, {})
@patch.object(Collector, 'publish')
def test_invalid_should_fail_gracefully(self, publish_mock):
patch_urlopen = patch(
'urllib2.urlopen',
Mock(return_value=self.getFixture('metrics_invalid')))
patch_urlopen.start()
self.collector.collect()
patch_urlopen.stop()
self.assertPublishedMany(publish_mock, {})
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
geoff-reid/RackHD | test/util/load_sku_packs.py | 13 | 3381 | '''
Copyright 2016, EMC, Inc.
Author(s):
This script load SKU packs from sources specified in install_default.json
'''
import fit_path # NOQA: unused import
import os
import sys
import subprocess
import fit_common
from nose.plugins.attrib import attr
@attr(all=True)
class fit_template(fit_common.unittest.TestCase):
def test01_download_sku_packs(self):
# Download SKU packs from GitHub
rc = subprocess.call("rm -rf temp.sku; rm -rf on-skupack;mkdir -p on-skupack", shell=True)
self.assertEqual(rc, 0, "Unable to make on-skupack directory")
# download all SKU repos and merge into on-skupack
for url in fit_common.fitskupack():
print "**** Cloning SKU Packs from " + url
subprocess.call("git clone " + url + " temp.sku", shell=True)
subprocess.call('cp -R temp.sku/* on-skupack; rm -rf temp.sku', shell=True)
def test02_build_sku_packs(self):
# build build SKU packs
for subdir, dirs, files in os.walk('on-skupack'):
for skus in dirs:
if skus not in ["debianstatic", ".git"] and os.path.isfile('on-skupack/' + skus + '/config.json'):
subprocess.call("cd on-skupack;mkdir -p " + skus + "/tasks " + skus + "/static "
+ skus + "/workflows " + skus + "/templates", shell=True)
subprocess.call("cd on-skupack; ./build-package.bash "
+ skus + " " + skus + " >/dev/null 2>&1", shell=True)
break
def test03_upload_sku_packs(self):
# upload SKU packs to RackHD
for subdir, dirs, files in os.walk('on-skupack/tarballs'):
for skupacks in files:
print "\n**** Loading SKU Pack for " + skupacks
fit_common.rackhdapi("/api/2.0/skus/pack", action="binary-post",
payload=file(fit_common.TEST_PATH + "on-skupack/tarballs/" + skupacks).read())
break
print "\n"
def test04_verify_sku_packs(self):
# check SKU directory against source files
error_message = ""
skulist = fit_common.json.dumps(fit_common.rackhdapi("/api/2.0/skus")['json'])
for subdir, dirs, files in os.walk('on-skupack'):
for skus in dirs:
if skus not in ["debianstatic", ".git", "packagebuild", "tarballs"] and \
os.path.isfile('on-skupack/' + skus + '/config.json'):
try:
configfile = fit_common.json.loads(open("on-skupack/" + skus + "/config.json").read())
# check if sku pack got installed
if configfile['name'] not in skulist:
print "FAILURE - Missing SKU: " + configfile['name']
error_message += " Missing SKU: " + configfile['name']
except:
# Check if the sku pack config.json file is valid format, fails skupack install if invalid
print "FAILURE - Corrupt config.json in SKU Pack: " + str(skus) + " - not loaded"
error_message += " Corrupt config.json in SKU Pack: " + str(skus)
break
self.assertEqual(error_message, "", error_message)
if __name__ == '__main__':
fit_common.unittest.main()
| apache-2.0 |
titasakgm/brc-stock | openerp/addons/sale_margin/__openerp__.py | 171 | 1634 | ##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Margins in Sales Orders',
'version':'1.0',
'category' : 'Sales Management',
'description': """
This module adds the 'Margin' on sales order.
=============================================
This gives the profitability by calculating the difference between the Unit
Price and Cost Price.
""",
'author':'OpenERP SA',
'images':['images/sale_margin.jpeg'],
'depends':['sale'],
'demo':['sale_margin_demo.xml'],
'test': ['test/sale_margin.yml'],
'data':['security/ir.model.access.csv','sale_margin_view.xml'],
'auto_install': False,
'installable': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
lllucius/climacast | requests/packages/urllib3/util/connection.py | 221 | 4237 | from __future__ import absolute_import
import socket
from .wait import wait_for_read
from .selectors import HAS_SELECT, SelectorError
def is_connection_dropped(conn): # Platform-specific
"""
Returns True if the connection is dropped and should be closed.
:param conn:
:class:`httplib.HTTPConnection` object.
Note: For platforms like AppEngine, this will always return ``False`` to
let the platform handle connection recycling transparently for us.
"""
sock = getattr(conn, 'sock', False)
if sock is False: # Platform-specific: AppEngine
return False
if sock is None: # Connection already closed (such as by httplib).
return True
if not HAS_SELECT:
return False
try:
return bool(wait_for_read(sock, timeout=0.0))
except SelectorError:
return True
# This function is copied from socket.py in the Python 2.7 standard
# library test suite. Added to its signature is only `socket_options`.
# One additional modification is that we avoid binding to IPv6 servers
# discovered in DNS if the system doesn't have IPv6 functionality.
def create_connection(address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
source_address=None, socket_options=None):
"""Connect to *address* and return the socket object.
Convenience function. Connect to *address* (a 2-tuple ``(host,
port)``) and return the socket object. Passing the optional
*timeout* parameter will set the timeout on the socket instance
before attempting to connect. If no *timeout* is supplied, the
global default timeout setting returned by :func:`getdefaulttimeout`
is used. If *source_address* is set it must be a tuple of (host, port)
for the socket to bind as a source address before making the connection.
An host of '' or port 0 tells the OS to use the default.
"""
host, port = address
if host.startswith('['):
host = host.strip('[]')
err = None
# Using the value from allowed_gai_family() in the context of getaddrinfo lets
# us select whether to work with IPv4 DNS records, IPv6 records, or both.
# The original create_connection function always returns all records.
family = allowed_gai_family()
for res in socket.getaddrinfo(host, port, family, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
sock = None
try:
sock = socket.socket(af, socktype, proto)
# If provided, set socket level options before connecting.
_set_socket_options(sock, socket_options)
if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT:
sock.settimeout(timeout)
if source_address:
sock.bind(source_address)
sock.connect(sa)
return sock
except socket.error as e:
err = e
if sock is not None:
sock.close()
sock = None
if err is not None:
raise err
raise socket.error("getaddrinfo returns an empty list")
def _set_socket_options(sock, options):
if options is None:
return
for opt in options:
sock.setsockopt(*opt)
def allowed_gai_family():
"""This function is designed to work in the context of
getaddrinfo, where family=socket.AF_UNSPEC is the default and
will perform a DNS search for both IPv6 and IPv4 records."""
family = socket.AF_INET
if HAS_IPV6:
family = socket.AF_UNSPEC
return family
def _has_ipv6(host):
""" Returns True if the system can bind an IPv6 address. """
sock = None
has_ipv6 = False
if socket.has_ipv6:
# has_ipv6 returns true if cPython was compiled with IPv6 support.
# It does not tell us if the system has IPv6 support enabled. To
# determine that we must bind to an IPv6 address.
# https://github.com/shazow/urllib3/pull/611
# https://bugs.python.org/issue658327
try:
sock = socket.socket(socket.AF_INET6)
sock.bind((host, 0))
has_ipv6 = True
except Exception:
pass
if sock:
sock.close()
return has_ipv6
HAS_IPV6 = _has_ipv6('::1')
| agpl-3.0 |
mlperf/training_results_v0.7 | Intel/benchmarks/minigo/8-nodes-32s-cpx-tensorflow/symmetries.py | 1 | 5113 | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import random
import go
import numpy as np
import tensorflow as tf
"""
Allowable symmetries:
identity [12][34]
rot90 [24][13]
rot180 [43][21]
rot270 [31][42]
flip [13][24]
fliprot90 [34][12]
fliprot180 [42][31]
fliprot270 [21][43]
"""
INVERSES = {
'identity': 'identity',
'rot90': 'rot270',
'rot180': 'rot180',
'rot270': 'rot90',
'flip': 'flip',
'fliprot90': 'fliprot90',
'fliprot180': 'fliprot180',
'fliprot270': 'fliprot270',
}
IMPLS = {
'identity': lambda x: x,
'rot90': np.rot90,
'rot180': functools.partial(np.rot90, k=2),
'rot270': functools.partial(np.rot90, k=3),
'flip': lambda x: np.rot90(np.fliplr(x)),
'fliprot90': np.flipud,
'fliprot180': lambda x: np.rot90(np.flipud(x)),
'fliprot270': np.fliplr,
}
assert set(IMPLS.keys()) == set(INVERSES.keys())
# A symmetry is just a string describing the transformation.
SYMMETRIES = list(INVERSES.keys())
def invert_symmetry(s):
return INVERSES[s]
def apply_symmetry_feat(sym, features):
return IMPLS[sym](features)
def apply_symmetry_pi(s, pi):
pi = np.copy(pi)
# rotate all moves except for the pass move at end
pi[:-1] = IMPLS[s](pi[:-1].reshape([go.N, go.N])).ravel()
return pi
def randomize_symmetries_feat(features):
symmetries_used = [random.choice(SYMMETRIES) for _ in features]
return symmetries_used, [apply_symmetry_feat(s, f)
for s, f in zip(symmetries_used, features)]
def invert_symmetries_pi(symmetries, pis):
return [apply_symmetry_pi(invert_symmetry(s), pi)
for s, pi in zip(symmetries, pis)]
def rotate_train_nhwc(x, pi):
sym = tf.compat.v1.random_uniform(
[],
minval=0,
maxval=len(SYMMETRIES),
dtype=tf.int32,
seed=123)
def rotate(tensor):
# flipLeftRight
tensor = tf.where(
tf.bitwise.bitwise_and(sym, 1) > 0,
tf.reverse(tensor, axis=[0]),
tensor)
# flipUpDown
tensor = tf.where(
tf.bitwise.bitwise_and(sym, 2) > 0,
tf.reverse(tensor, axis=[1]),
tensor)
# flipDiagonal
tensor = tf.where(
tf.bitwise.bitwise_and(sym, 4) > 0,
tf.transpose(tensor, perm=[1, 0, 2]),
tensor)
return tensor
# TODO(tommadams): use tf.ensure_shape instead of tf.assert_equal.
squares = go.N * go.N
assert_shape_pi = tf.assert_equal(pi.shape.as_list(), [squares + 1])
x_shape = x.shape.as_list()
assert_shape_x = tf.assert_equal(x_shape, [go.N, go.N, x_shape[2]])
pi_move = tf.slice(pi, [0], [squares], name="slice_moves")
pi_pass = tf.slice(pi, [squares], [1], name="slice_pass")
# Add a final dim so that x and pi have same shape: [N,N,num_features].
pi_n_by_n = tf.reshape(pi_move, [go.N, go.N, 1])
with tf.control_dependencies([assert_shape_x, assert_shape_pi]):
pi_rot = tf.concat(
[tf.reshape(rotate(pi_n_by_n), [squares]), pi_pass],
axis=0)
return rotate(x), pi_rot
def rotate_train_nchw(x, pi):
sym = tf.compat.v1.random_uniform(
[],
minval=0,
maxval=len(SYMMETRIES),
dtype=tf.int32,
seed=123)
def rotate(tensor):
# flipLeftRight
tensor = tf.where(
tf.bitwise.bitwise_and(sym, 1) > 0,
tf.reverse(tensor, axis=[1]),
tensor)
# flipUpDown
tensor = tf.where(
tf.bitwise.bitwise_and(sym, 2) > 0,
tf.reverse(tensor, axis=[2]),
tensor)
# flipDiagonal
tensor = tf.where(
tf.bitwise.bitwise_and(sym, 4) > 0,
tf.transpose(tensor, perm=[0, 2, 1]),
tensor)
return tensor
# TODO(tommadams): use tf.ensure_shape instead of tf.assert_equal.
squares = go.N * go.N
assert_shape_pi = tf.assert_equal(pi.shape.as_list(), [squares + 1])
x_shape = x.shape.as_list()
assert_shape_x = tf.assert_equal(x_shape, [x_shape[0], go.N, go.N])
pi_move = tf.slice(pi, [0], [squares], name="slice_moves")
pi_pass = tf.slice(pi, [squares], [1], name="slice_pass")
# Add a dim so that x and pi have same shape: [num_features,N,N].
pi_n_by_n = tf.reshape(pi_move, [1, go.N, go.N])
with tf.control_dependencies([assert_shape_x, assert_shape_pi]):
pi_rot = tf.concat(
[tf.reshape(rotate(pi_n_by_n), [squares]), pi_pass],
axis=0)
return rotate(x), pi_rot
| apache-2.0 |
mislavcimpersak/anydo-widget | anydo.widget/requests/packages/chardet/sbcharsetprober.py | 2927 | 4793 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from . import constants
from .charsetprober import CharSetProber
from .compat import wrap_ord
SAMPLE_SIZE = 64
SB_ENOUGH_REL_THRESHOLD = 1024
POSITIVE_SHORTCUT_THRESHOLD = 0.95
NEGATIVE_SHORTCUT_THRESHOLD = 0.05
SYMBOL_CAT_ORDER = 250
NUMBER_OF_SEQ_CAT = 4
POSITIVE_CAT = NUMBER_OF_SEQ_CAT - 1
#NEGATIVE_CAT = 0
class SingleByteCharSetProber(CharSetProber):
def __init__(self, model, reversed=False, nameProber=None):
CharSetProber.__init__(self)
self._mModel = model
# TRUE if we need to reverse every pair in the model lookup
self._mReversed = reversed
# Optional auxiliary prober for name decision
self._mNameProber = nameProber
self.reset()
def reset(self):
CharSetProber.reset(self)
# char order of last character
self._mLastOrder = 255
self._mSeqCounters = [0] * NUMBER_OF_SEQ_CAT
self._mTotalSeqs = 0
self._mTotalChar = 0
# characters that fall in our sampling range
self._mFreqChar = 0
def get_charset_name(self):
if self._mNameProber:
return self._mNameProber.get_charset_name()
else:
return self._mModel['charsetName']
def feed(self, aBuf):
if not self._mModel['keepEnglishLetter']:
aBuf = self.filter_without_english_letters(aBuf)
aLen = len(aBuf)
if not aLen:
return self.get_state()
for c in aBuf:
order = self._mModel['charToOrderMap'][wrap_ord(c)]
if order < SYMBOL_CAT_ORDER:
self._mTotalChar += 1
if order < SAMPLE_SIZE:
self._mFreqChar += 1
if self._mLastOrder < SAMPLE_SIZE:
self._mTotalSeqs += 1
if not self._mReversed:
i = (self._mLastOrder * SAMPLE_SIZE) + order
model = self._mModel['precedenceMatrix'][i]
else: # reverse the order of the letters in the lookup
i = (order * SAMPLE_SIZE) + self._mLastOrder
model = self._mModel['precedenceMatrix'][i]
self._mSeqCounters[model] += 1
self._mLastOrder = order
if self.get_state() == constants.eDetecting:
if self._mTotalSeqs > SB_ENOUGH_REL_THRESHOLD:
cf = self.get_confidence()
if cf > POSITIVE_SHORTCUT_THRESHOLD:
if constants._debug:
sys.stderr.write('%s confidence = %s, we have a'
'winner\n' %
(self._mModel['charsetName'], cf))
self._mState = constants.eFoundIt
elif cf < NEGATIVE_SHORTCUT_THRESHOLD:
if constants._debug:
sys.stderr.write('%s confidence = %s, below negative'
'shortcut threshhold %s\n' %
(self._mModel['charsetName'], cf,
NEGATIVE_SHORTCUT_THRESHOLD))
self._mState = constants.eNotMe
return self.get_state()
def get_confidence(self):
r = 0.01
if self._mTotalSeqs > 0:
r = ((1.0 * self._mSeqCounters[POSITIVE_CAT]) / self._mTotalSeqs
/ self._mModel['mTypicalPositiveRatio'])
r = r * self._mFreqChar / self._mTotalChar
if r >= 1.0:
r = 0.99
return r
| mit |
LSSTDESC/ReprocessingTaskForce | config/v14.0/jointcalConfig.py | 4 | 1223 | # Configuration file for jointcal
from lsst.meas.algorithms import LoadIndexedReferenceObjectsTask
# Select external catalogs for Astrometry
config.astrometryRefObjLoader.retarget(LoadIndexedReferenceObjectsTask)
config.astrometryRefObjLoader.ref_dataset_name='pan-starrs'
config.astrometryRefObjLoader.filterMap = {
'u':'g',
'g':'g',
'r':'r',
'i':'i',
'i2': 'i',
'z':'z',
'y':'y',
}
# Select external catalogs for Photometry
config.doPhotometry = True # comment out to run the photometric calibration
config.photometryRefObjLoader.retarget(LoadIndexedReferenceObjectsTask)
config.photometryRefObjLoader.ref_dataset_name='sdss'
config.photometryRefObjLoader.filterMap = {
'u': 'U',
'g': 'G',
'r': 'R',
'i': 'I',
'i2': 'I',
'z': 'Z',
'y': 'Z',
}
# These are the default values
# Minimum allowed signal-to-noise ratio for sources used for matching
# (in the flux specified by sourceFluxType); <= 0 for no limit
# config.sourceSelector['matcher'].minSnr = 40.0
# Minimum allowed signal-to-noise ratio for sources used for matching
# (in the flux specified by sourceFluxType); <= 0 for no limit
config.sourceSelector['astrometry'].minSnr = 40.0 # default is 10
| gpl-2.0 |
youdonghai/intellij-community | python/helpers/pydev/third_party/pep8/lib2to3/lib2to3/fixes/fix_isinstance.py | 326 | 1609 | # Copyright 2008 Armin Ronacher.
# Licensed to PSF under a Contributor Agreement.
"""Fixer that cleans up a tuple argument to isinstance after the tokens
in it were fixed. This is mainly used to remove double occurrences of
tokens as a leftover of the long -> int / unicode -> str conversion.
eg. isinstance(x, (int, long)) -> isinstance(x, (int, int))
-> isinstance(x, int)
"""
from .. import fixer_base
from ..fixer_util import token
class FixIsinstance(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
power<
'isinstance'
trailer< '(' arglist< any ',' atom< '('
args=testlist_gexp< any+ >
')' > > ')' >
>
"""
run_order = 6
def transform(self, node, results):
names_inserted = set()
testlist = results["args"]
args = testlist.children
new_args = []
iterator = enumerate(args)
for idx, arg in iterator:
if arg.type == token.NAME and arg.value in names_inserted:
if idx < len(args) - 1 and args[idx + 1].type == token.COMMA:
iterator.next()
continue
else:
new_args.append(arg)
if arg.type == token.NAME:
names_inserted.add(arg.value)
if new_args and new_args[-1].type == token.COMMA:
del new_args[-1]
if len(new_args) == 1:
atom = testlist.parent
new_args[0].prefix = atom.prefix
atom.replace(new_args[0])
else:
args[:] = new_args
node.changed()
| apache-2.0 |
75651/kbengine_cloud | kbe/src/lib/python/Lib/lib2to3/fixes/fix_unicode.py | 136 | 1256 | r"""Fixer for unicode.
* Changes unicode to str and unichr to chr.
* If "...\u..." is not unicode literal change it into "...\\u...".
* Change u"..." into "...".
"""
from ..pgen2 import token
from .. import fixer_base
_mapping = {"unichr" : "chr", "unicode" : "str"}
class FixUnicode(fixer_base.BaseFix):
BM_compatible = True
PATTERN = "STRING | 'unicode' | 'unichr'"
def start_tree(self, tree, filename):
super(FixUnicode, self).start_tree(tree, filename)
self.unicode_literals = 'unicode_literals' in tree.future_features
def transform(self, node, results):
if node.type == token.NAME:
new = node.clone()
new.value = _mapping[node.value]
return new
elif node.type == token.STRING:
val = node.value
if not self.unicode_literals and val[0] in '\'"' and '\\' in val:
val = r'\\'.join([
v.replace('\\u', r'\\u').replace('\\U', r'\\U')
for v in val.split(r'\\')
])
if val[0] in 'uU':
val = val[1:]
if val == node.value:
return node
new = node.clone()
new.value = val
return new
| lgpl-3.0 |
GitAngel/django | tests/postgres_tests/test_ranges.py | 161 | 24567 | import datetime
import json
import unittest
from django import forms
from django.core import exceptions, serializers
from django.db import connection
from django.db.models import F
from django.test import TestCase, override_settings
from django.utils import timezone
from . import PostgreSQLTestCase
from .models import RangeLookupsModel, RangesModel
try:
from psycopg2.extras import DateRange, DateTimeTZRange, NumericRange
from django.contrib.postgres import fields as pg_fields, forms as pg_forms
from django.contrib.postgres.validators import (
RangeMaxValueValidator, RangeMinValueValidator,
)
except ImportError:
pass
def skipUnlessPG92(test):
try:
PG_VERSION = connection.pg_version
except AttributeError:
PG_VERSION = 0
if PG_VERSION < 90200:
return unittest.skip('PostgreSQL >= 9.2 required')(test)
return test
@skipUnlessPG92
class TestSaveLoad(TestCase):
def test_all_fields(self):
now = timezone.now()
instance = RangesModel(
ints=NumericRange(0, 10),
bigints=NumericRange(10, 20),
floats=NumericRange(20, 30),
timestamps=DateTimeTZRange(now - datetime.timedelta(hours=1), now),
dates=DateRange(now.date() - datetime.timedelta(days=1), now.date()),
)
instance.save()
loaded = RangesModel.objects.get()
self.assertEqual(instance.ints, loaded.ints)
self.assertEqual(instance.bigints, loaded.bigints)
self.assertEqual(instance.floats, loaded.floats)
self.assertEqual(instance.timestamps, loaded.timestamps)
self.assertEqual(instance.dates, loaded.dates)
def test_range_object(self):
r = NumericRange(0, 10)
instance = RangesModel(ints=r)
instance.save()
loaded = RangesModel.objects.get()
self.assertEqual(r, loaded.ints)
def test_tuple(self):
instance = RangesModel(ints=(0, 10))
instance.save()
loaded = RangesModel.objects.get()
self.assertEqual(NumericRange(0, 10), loaded.ints)
def test_range_object_boundaries(self):
r = NumericRange(0, 10, '[]')
instance = RangesModel(floats=r)
instance.save()
loaded = RangesModel.objects.get()
self.assertEqual(r, loaded.floats)
self.assertTrue(10 in loaded.floats)
def test_unbounded(self):
r = NumericRange(None, None, '()')
instance = RangesModel(floats=r)
instance.save()
loaded = RangesModel.objects.get()
self.assertEqual(r, loaded.floats)
def test_empty(self):
r = NumericRange(empty=True)
instance = RangesModel(ints=r)
instance.save()
loaded = RangesModel.objects.get()
self.assertEqual(r, loaded.ints)
def test_null(self):
instance = RangesModel(ints=None)
instance.save()
loaded = RangesModel.objects.get()
self.assertIsNone(loaded.ints)
@skipUnlessPG92
class TestQuerying(TestCase):
@classmethod
def setUpTestData(cls):
cls.objs = [
RangesModel.objects.create(ints=NumericRange(0, 10)),
RangesModel.objects.create(ints=NumericRange(5, 15)),
RangesModel.objects.create(ints=NumericRange(None, 0)),
RangesModel.objects.create(ints=NumericRange(empty=True)),
RangesModel.objects.create(ints=None),
]
def test_exact(self):
self.assertSequenceEqual(
RangesModel.objects.filter(ints__exact=NumericRange(0, 10)),
[self.objs[0]],
)
def test_isnull(self):
self.assertSequenceEqual(
RangesModel.objects.filter(ints__isnull=True),
[self.objs[4]],
)
def test_isempty(self):
self.assertSequenceEqual(
RangesModel.objects.filter(ints__isempty=True),
[self.objs[3]],
)
def test_contains(self):
self.assertSequenceEqual(
RangesModel.objects.filter(ints__contains=8),
[self.objs[0], self.objs[1]],
)
def test_contains_range(self):
self.assertSequenceEqual(
RangesModel.objects.filter(ints__contains=NumericRange(3, 8)),
[self.objs[0]],
)
def test_contained_by(self):
self.assertSequenceEqual(
RangesModel.objects.filter(ints__contained_by=NumericRange(0, 20)),
[self.objs[0], self.objs[1], self.objs[3]],
)
def test_overlap(self):
self.assertSequenceEqual(
RangesModel.objects.filter(ints__overlap=NumericRange(3, 8)),
[self.objs[0], self.objs[1]],
)
def test_fully_lt(self):
self.assertSequenceEqual(
RangesModel.objects.filter(ints__fully_lt=NumericRange(5, 10)),
[self.objs[2]],
)
def test_fully_gt(self):
self.assertSequenceEqual(
RangesModel.objects.filter(ints__fully_gt=NumericRange(5, 10)),
[],
)
def test_not_lt(self):
self.assertSequenceEqual(
RangesModel.objects.filter(ints__not_lt=NumericRange(5, 10)),
[self.objs[1]],
)
def test_not_gt(self):
self.assertSequenceEqual(
RangesModel.objects.filter(ints__not_gt=NumericRange(5, 10)),
[self.objs[0], self.objs[2]],
)
def test_adjacent_to(self):
self.assertSequenceEqual(
RangesModel.objects.filter(ints__adjacent_to=NumericRange(0, 5)),
[self.objs[1], self.objs[2]],
)
def test_startswith(self):
self.assertSequenceEqual(
RangesModel.objects.filter(ints__startswith=0),
[self.objs[0]],
)
def test_endswith(self):
self.assertSequenceEqual(
RangesModel.objects.filter(ints__endswith=0),
[self.objs[2]],
)
def test_startswith_chaining(self):
self.assertSequenceEqual(
RangesModel.objects.filter(ints__startswith__gte=0),
[self.objs[0], self.objs[1]],
)
@skipUnlessPG92
class TestQueringWithRanges(TestCase):
def test_date_range(self):
objs = [
RangeLookupsModel.objects.create(date='2015-01-01'),
RangeLookupsModel.objects.create(date='2015-05-05'),
]
self.assertSequenceEqual(
RangeLookupsModel.objects.filter(date__contained_by=DateRange('2015-01-01', '2015-05-04')),
[objs[0]],
)
def test_date_range_datetime_field(self):
objs = [
RangeLookupsModel.objects.create(timestamp='2015-01-01'),
RangeLookupsModel.objects.create(timestamp='2015-05-05'),
]
self.assertSequenceEqual(
RangeLookupsModel.objects.filter(timestamp__date__contained_by=DateRange('2015-01-01', '2015-05-04')),
[objs[0]],
)
def test_datetime_range(self):
objs = [
RangeLookupsModel.objects.create(timestamp='2015-01-01T09:00:00'),
RangeLookupsModel.objects.create(timestamp='2015-05-05T17:00:00'),
]
self.assertSequenceEqual(
RangeLookupsModel.objects.filter(
timestamp__contained_by=DateTimeTZRange('2015-01-01T09:00', '2015-05-04T23:55')
),
[objs[0]],
)
def test_integer_range(self):
objs = [
RangeLookupsModel.objects.create(integer=5),
RangeLookupsModel.objects.create(integer=99),
RangeLookupsModel.objects.create(integer=-1),
]
self.assertSequenceEqual(
RangeLookupsModel.objects.filter(integer__contained_by=NumericRange(1, 98)),
[objs[0]]
)
def test_biginteger_range(self):
objs = [
RangeLookupsModel.objects.create(big_integer=5),
RangeLookupsModel.objects.create(big_integer=99),
RangeLookupsModel.objects.create(big_integer=-1),
]
self.assertSequenceEqual(
RangeLookupsModel.objects.filter(big_integer__contained_by=NumericRange(1, 98)),
[objs[0]]
)
def test_float_range(self):
objs = [
RangeLookupsModel.objects.create(float=5),
RangeLookupsModel.objects.create(float=99),
RangeLookupsModel.objects.create(float=-1),
]
self.assertSequenceEqual(
RangeLookupsModel.objects.filter(float__contained_by=NumericRange(1, 98)),
[objs[0]]
)
def test_f_ranges(self):
parent = RangesModel.objects.create(floats=NumericRange(0, 10))
objs = [
RangeLookupsModel.objects.create(float=5, parent=parent),
RangeLookupsModel.objects.create(float=99, parent=parent),
]
self.assertSequenceEqual(
RangeLookupsModel.objects.filter(float__contained_by=F('parent__floats')),
[objs[0]]
)
def test_exclude(self):
objs = [
RangeLookupsModel.objects.create(float=5),
RangeLookupsModel.objects.create(float=99),
RangeLookupsModel.objects.create(float=-1),
]
self.assertSequenceEqual(
RangeLookupsModel.objects.exclude(float__contained_by=NumericRange(0, 100)),
[objs[2]]
)
@skipUnlessPG92
class TestSerialization(TestCase):
test_data = (
'[{"fields": {"ints": "{\\"upper\\": \\"10\\", \\"lower\\": \\"0\\", '
'\\"bounds\\": \\"[)\\"}", "floats": "{\\"empty\\": true}", '
'"bigints": null, "timestamps": "{\\"upper\\": \\"2014-02-02T12:12:12+00:00\\", '
'\\"lower\\": \\"2014-01-01T00:00:00+00:00\\", \\"bounds\\": \\"[)\\"}", '
'"dates": "{\\"upper\\": \\"2014-02-02\\", \\"lower\\": \\"2014-01-01\\", \\"bounds\\": \\"[)\\"}" }, '
'"model": "postgres_tests.rangesmodel", "pk": null}]'
)
lower_date = datetime.date(2014, 1, 1)
upper_date = datetime.date(2014, 2, 2)
lower_dt = datetime.datetime(2014, 1, 1, 0, 0, 0, tzinfo=timezone.utc)
upper_dt = datetime.datetime(2014, 2, 2, 12, 12, 12, tzinfo=timezone.utc)
def test_dumping(self):
instance = RangesModel(ints=NumericRange(0, 10), floats=NumericRange(empty=True),
timestamps=DateTimeTZRange(self.lower_dt, self.upper_dt),
dates=DateRange(self.lower_date, self.upper_date))
data = serializers.serialize('json', [instance])
dumped = json.loads(data)
for field in ('ints', 'dates', 'timestamps'):
dumped[0]['fields'][field] = json.loads(dumped[0]['fields'][field])
check = json.loads(self.test_data)
for field in ('ints', 'dates', 'timestamps'):
check[0]['fields'][field] = json.loads(check[0]['fields'][field])
self.assertEqual(dumped, check)
def test_loading(self):
instance = list(serializers.deserialize('json', self.test_data))[0].object
self.assertEqual(instance.ints, NumericRange(0, 10))
self.assertEqual(instance.floats, NumericRange(empty=True))
self.assertEqual(instance.bigints, None)
class TestValidators(PostgreSQLTestCase):
def test_max(self):
validator = RangeMaxValueValidator(5)
validator(NumericRange(0, 5))
with self.assertRaises(exceptions.ValidationError) as cm:
validator(NumericRange(0, 10))
self.assertEqual(cm.exception.messages[0], 'Ensure that this range is completely less than or equal to 5.')
self.assertEqual(cm.exception.code, 'max_value')
def test_min(self):
validator = RangeMinValueValidator(5)
validator(NumericRange(10, 15))
with self.assertRaises(exceptions.ValidationError) as cm:
validator(NumericRange(0, 10))
self.assertEqual(cm.exception.messages[0], 'Ensure that this range is completely greater than or equal to 5.')
self.assertEqual(cm.exception.code, 'min_value')
class TestFormField(PostgreSQLTestCase):
def test_valid_integer(self):
field = pg_forms.IntegerRangeField()
value = field.clean(['1', '2'])
self.assertEqual(value, NumericRange(1, 2))
def test_valid_floats(self):
field = pg_forms.FloatRangeField()
value = field.clean(['1.12345', '2.001'])
self.assertEqual(value, NumericRange(1.12345, 2.001))
def test_valid_timestamps(self):
field = pg_forms.DateTimeRangeField()
value = field.clean(['01/01/2014 00:00:00', '02/02/2014 12:12:12'])
lower = datetime.datetime(2014, 1, 1, 0, 0, 0)
upper = datetime.datetime(2014, 2, 2, 12, 12, 12)
self.assertEqual(value, DateTimeTZRange(lower, upper))
def test_valid_dates(self):
field = pg_forms.DateRangeField()
value = field.clean(['01/01/2014', '02/02/2014'])
lower = datetime.date(2014, 1, 1)
upper = datetime.date(2014, 2, 2)
self.assertEqual(value, DateRange(lower, upper))
def test_using_split_datetime_widget(self):
class SplitDateTimeRangeField(pg_forms.DateTimeRangeField):
base_field = forms.SplitDateTimeField
class SplitForm(forms.Form):
field = SplitDateTimeRangeField()
form = SplitForm()
self.assertHTMLEqual(str(form), '''
<tr>
<th>
<label for="id_field_0">Field:</label>
</th>
<td>
<input id="id_field_0_0" name="field_0_0" type="text" />
<input id="id_field_0_1" name="field_0_1" type="text" />
<input id="id_field_1_0" name="field_1_0" type="text" />
<input id="id_field_1_1" name="field_1_1" type="text" />
</td>
</tr>
''')
form = SplitForm({
'field_0_0': '01/01/2014',
'field_0_1': '00:00:00',
'field_1_0': '02/02/2014',
'field_1_1': '12:12:12',
})
self.assertTrue(form.is_valid())
lower = datetime.datetime(2014, 1, 1, 0, 0, 0)
upper = datetime.datetime(2014, 2, 2, 12, 12, 12)
self.assertEqual(form.cleaned_data['field'], DateTimeTZRange(lower, upper))
def test_none(self):
field = pg_forms.IntegerRangeField(required=False)
value = field.clean(['', ''])
self.assertEqual(value, None)
def test_rendering(self):
class RangeForm(forms.Form):
ints = pg_forms.IntegerRangeField()
self.assertHTMLEqual(str(RangeForm()), '''
<tr>
<th><label for="id_ints_0">Ints:</label></th>
<td>
<input id="id_ints_0" name="ints_0" type="number" />
<input id="id_ints_1" name="ints_1" type="number" />
</td>
</tr>
''')
def test_integer_lower_bound_higher(self):
field = pg_forms.IntegerRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['10', '2'])
self.assertEqual(cm.exception.messages[0], 'The start of the range must not exceed the end of the range.')
self.assertEqual(cm.exception.code, 'bound_ordering')
def test_integer_open(self):
field = pg_forms.IntegerRangeField()
value = field.clean(['', '0'])
self.assertEqual(value, NumericRange(None, 0))
def test_integer_incorrect_data_type(self):
field = pg_forms.IntegerRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('1')
self.assertEqual(cm.exception.messages[0], 'Enter two whole numbers.')
self.assertEqual(cm.exception.code, 'invalid')
def test_integer_invalid_lower(self):
field = pg_forms.IntegerRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['a', '2'])
self.assertEqual(cm.exception.messages[0], 'Enter a whole number.')
def test_integer_invalid_upper(self):
field = pg_forms.IntegerRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['1', 'b'])
self.assertEqual(cm.exception.messages[0], 'Enter a whole number.')
def test_integer_required(self):
field = pg_forms.IntegerRangeField(required=True)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['', ''])
self.assertEqual(cm.exception.messages[0], 'This field is required.')
value = field.clean([1, ''])
self.assertEqual(value, NumericRange(1, None))
def test_float_lower_bound_higher(self):
field = pg_forms.FloatRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['1.8', '1.6'])
self.assertEqual(cm.exception.messages[0], 'The start of the range must not exceed the end of the range.')
self.assertEqual(cm.exception.code, 'bound_ordering')
def test_float_open(self):
field = pg_forms.FloatRangeField()
value = field.clean(['', '3.1415926'])
self.assertEqual(value, NumericRange(None, 3.1415926))
def test_float_incorrect_data_type(self):
field = pg_forms.FloatRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('1.6')
self.assertEqual(cm.exception.messages[0], 'Enter two numbers.')
self.assertEqual(cm.exception.code, 'invalid')
def test_float_invalid_lower(self):
field = pg_forms.FloatRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['a', '3.1415926'])
self.assertEqual(cm.exception.messages[0], 'Enter a number.')
def test_float_invalid_upper(self):
field = pg_forms.FloatRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['1.61803399', 'b'])
self.assertEqual(cm.exception.messages[0], 'Enter a number.')
def test_float_required(self):
field = pg_forms.FloatRangeField(required=True)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['', ''])
self.assertEqual(cm.exception.messages[0], 'This field is required.')
value = field.clean(['1.61803399', ''])
self.assertEqual(value, NumericRange(1.61803399, None))
def test_date_lower_bound_higher(self):
field = pg_forms.DateRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['2013-04-09', '1976-04-16'])
self.assertEqual(cm.exception.messages[0], 'The start of the range must not exceed the end of the range.')
self.assertEqual(cm.exception.code, 'bound_ordering')
def test_date_open(self):
field = pg_forms.DateRangeField()
value = field.clean(['', '2013-04-09'])
self.assertEqual(value, DateRange(None, datetime.date(2013, 4, 9)))
def test_date_incorrect_data_type(self):
field = pg_forms.DateRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('1')
self.assertEqual(cm.exception.messages[0], 'Enter two valid dates.')
self.assertEqual(cm.exception.code, 'invalid')
def test_date_invalid_lower(self):
field = pg_forms.DateRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['a', '2013-04-09'])
self.assertEqual(cm.exception.messages[0], 'Enter a valid date.')
def test_date_invalid_upper(self):
field = pg_forms.DateRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['2013-04-09', 'b'])
self.assertEqual(cm.exception.messages[0], 'Enter a valid date.')
def test_date_required(self):
field = pg_forms.DateRangeField(required=True)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['', ''])
self.assertEqual(cm.exception.messages[0], 'This field is required.')
value = field.clean(['1976-04-16', ''])
self.assertEqual(value, DateRange(datetime.date(1976, 4, 16), None))
def test_datetime_lower_bound_higher(self):
field = pg_forms.DateTimeRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['2006-10-25 14:59', '2006-10-25 14:58'])
self.assertEqual(cm.exception.messages[0], 'The start of the range must not exceed the end of the range.')
self.assertEqual(cm.exception.code, 'bound_ordering')
def test_datetime_open(self):
field = pg_forms.DateTimeRangeField()
value = field.clean(['', '2013-04-09 11:45'])
self.assertEqual(value, DateTimeTZRange(None, datetime.datetime(2013, 4, 9, 11, 45)))
def test_datetime_incorrect_data_type(self):
field = pg_forms.DateTimeRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('2013-04-09 11:45')
self.assertEqual(cm.exception.messages[0], 'Enter two valid date/times.')
self.assertEqual(cm.exception.code, 'invalid')
def test_datetime_invalid_lower(self):
field = pg_forms.DateTimeRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['45', '2013-04-09 11:45'])
self.assertEqual(cm.exception.messages[0], 'Enter a valid date/time.')
def test_datetime_invalid_upper(self):
field = pg_forms.DateTimeRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['2013-04-09 11:45', 'sweet pickles'])
self.assertEqual(cm.exception.messages[0], 'Enter a valid date/time.')
def test_datetime_required(self):
field = pg_forms.DateTimeRangeField(required=True)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['', ''])
self.assertEqual(cm.exception.messages[0], 'This field is required.')
value = field.clean(['2013-04-09 11:45', ''])
self.assertEqual(value, DateTimeTZRange(datetime.datetime(2013, 4, 9, 11, 45), None))
@override_settings(USE_TZ=True, TIME_ZONE='Africa/Johannesburg')
def test_datetime_prepare_value(self):
field = pg_forms.DateTimeRangeField()
value = field.prepare_value(
DateTimeTZRange(datetime.datetime(2015, 5, 22, 16, 6, 33, tzinfo=timezone.utc), None)
)
self.assertEqual(value, [datetime.datetime(2015, 5, 22, 18, 6, 33), None])
def test_model_field_formfield_integer(self):
model_field = pg_fields.IntegerRangeField()
form_field = model_field.formfield()
self.assertIsInstance(form_field, pg_forms.IntegerRangeField)
def test_model_field_formfield_biginteger(self):
model_field = pg_fields.BigIntegerRangeField()
form_field = model_field.formfield()
self.assertIsInstance(form_field, pg_forms.IntegerRangeField)
def test_model_field_formfield_float(self):
model_field = pg_fields.FloatRangeField()
form_field = model_field.formfield()
self.assertIsInstance(form_field, pg_forms.FloatRangeField)
def test_model_field_formfield_date(self):
model_field = pg_fields.DateRangeField()
form_field = model_field.formfield()
self.assertIsInstance(form_field, pg_forms.DateRangeField)
def test_model_field_formfield_datetime(self):
model_field = pg_fields.DateTimeRangeField()
form_field = model_field.formfield()
self.assertIsInstance(form_field, pg_forms.DateTimeRangeField)
class TestWidget(PostgreSQLTestCase):
def test_range_widget(self):
f = pg_forms.ranges.DateTimeRangeField()
self.assertHTMLEqual(
f.widget.render('datetimerange', ''),
'<input type="text" name="datetimerange_0" /><input type="text" name="datetimerange_1" />'
)
self.assertHTMLEqual(
f.widget.render('datetimerange', None),
'<input type="text" name="datetimerange_0" /><input type="text" name="datetimerange_1" />'
)
dt_range = DateTimeTZRange(
datetime.datetime(2006, 1, 10, 7, 30),
datetime.datetime(2006, 2, 12, 9, 50)
)
self.assertHTMLEqual(
f.widget.render('datetimerange', dt_range),
'<input type="text" name="datetimerange_0" value="2006-01-10 07:30:00" /><input type="text" name="datetimerange_1" value="2006-02-12 09:50:00" />'
)
| bsd-3-clause |
mezz64/home-assistant | homeassistant/components/smartthings/config_flow.py | 10 | 9410 | """Config flow to configure SmartThings."""
import logging
from aiohttp import ClientResponseError
from pysmartthings import APIResponseError, AppOAuth, SmartThings
from pysmartthings.installedapp import format_install_url
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import (
CONF_ACCESS_TOKEN,
CONF_CLIENT_ID,
CONF_CLIENT_SECRET,
HTTP_FORBIDDEN,
HTTP_UNAUTHORIZED,
)
from homeassistant.helpers.aiohttp_client import async_get_clientsession
# pylint: disable=unused-import
from .const import (
APP_OAUTH_CLIENT_NAME,
APP_OAUTH_SCOPES,
CONF_APP_ID,
CONF_INSTALLED_APP_ID,
CONF_LOCATION_ID,
CONF_REFRESH_TOKEN,
DOMAIN,
VAL_UID_MATCHER,
)
from .smartapp import (
create_app,
find_app,
format_unique_id,
get_webhook_url,
setup_smartapp,
setup_smartapp_endpoint,
update_app,
validate_webhook_requirements,
)
_LOGGER = logging.getLogger(__name__)
class SmartThingsFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle configuration of SmartThings integrations."""
VERSION = 2
CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_PUSH
def __init__(self):
"""Create a new instance of the flow handler."""
self.access_token = None
self.app_id = None
self.api = None
self.oauth_client_secret = None
self.oauth_client_id = None
self.installed_app_id = None
self.refresh_token = None
self.location_id = None
async def async_step_import(self, user_input=None):
"""Occurs when a previously entry setup fails and is re-initiated."""
return await self.async_step_user(user_input)
async def async_step_user(self, user_input=None):
"""Validate and confirm webhook setup."""
await setup_smartapp_endpoint(self.hass)
webhook_url = get_webhook_url(self.hass)
# Abort if the webhook is invalid
if not validate_webhook_requirements(self.hass):
return self.async_abort(
reason="invalid_webhook_url",
description_placeholders={
"webhook_url": webhook_url,
"component_url": "https://www.home-assistant.io/integrations/smartthings/",
},
)
# Show the confirmation
if user_input is None:
return self.async_show_form(
step_id="user",
description_placeholders={"webhook_url": webhook_url},
)
# Show the next screen
return await self.async_step_pat()
async def async_step_pat(self, user_input=None):
"""Get the Personal Access Token and validate it."""
errors = {}
if user_input is None or CONF_ACCESS_TOKEN not in user_input:
return self._show_step_pat(errors)
self.access_token = user_input[CONF_ACCESS_TOKEN]
# Ensure token is a UUID
if not VAL_UID_MATCHER.match(self.access_token):
errors[CONF_ACCESS_TOKEN] = "token_invalid_format"
return self._show_step_pat(errors)
# Setup end-point
self.api = SmartThings(async_get_clientsession(self.hass), self.access_token)
try:
app = await find_app(self.hass, self.api)
if app:
await app.refresh() # load all attributes
await update_app(self.hass, app)
# Find an existing entry to copy the oauth client
existing = next(
(
entry
for entry in self._async_current_entries()
if entry.data[CONF_APP_ID] == app.app_id
),
None,
)
if existing:
self.oauth_client_id = existing.data[CONF_CLIENT_ID]
self.oauth_client_secret = existing.data[CONF_CLIENT_SECRET]
else:
# Get oauth client id/secret by regenerating it
app_oauth = AppOAuth(app.app_id)
app_oauth.client_name = APP_OAUTH_CLIENT_NAME
app_oauth.scope.extend(APP_OAUTH_SCOPES)
client = await self.api.generate_app_oauth(app_oauth)
self.oauth_client_secret = client.client_secret
self.oauth_client_id = client.client_id
else:
app, client = await create_app(self.hass, self.api)
self.oauth_client_secret = client.client_secret
self.oauth_client_id = client.client_id
setup_smartapp(self.hass, app)
self.app_id = app.app_id
except APIResponseError as ex:
if ex.is_target_error():
errors["base"] = "webhook_error"
else:
errors["base"] = "app_setup_error"
_LOGGER.exception(
"API error setting up the SmartApp: %s", ex.raw_error_response
)
return self._show_step_pat(errors)
except ClientResponseError as ex:
if ex.status == HTTP_UNAUTHORIZED:
errors[CONF_ACCESS_TOKEN] = "token_unauthorized"
_LOGGER.debug(
"Unauthorized error received setting up SmartApp", exc_info=True
)
elif ex.status == HTTP_FORBIDDEN:
errors[CONF_ACCESS_TOKEN] = "token_forbidden"
_LOGGER.debug(
"Forbidden error received setting up SmartApp", exc_info=True
)
else:
errors["base"] = "app_setup_error"
_LOGGER.exception("Unexpected error setting up the SmartApp")
return self._show_step_pat(errors)
except Exception: # pylint:disable=broad-except
errors["base"] = "app_setup_error"
_LOGGER.exception("Unexpected error setting up the SmartApp")
return self._show_step_pat(errors)
return await self.async_step_select_location()
async def async_step_select_location(self, user_input=None):
"""Ask user to select the location to setup."""
if user_input is None or CONF_LOCATION_ID not in user_input:
# Get available locations
existing_locations = [
entry.data[CONF_LOCATION_ID] for entry in self._async_current_entries()
]
locations = await self.api.locations()
locations_options = {
location.location_id: location.name
for location in locations
if location.location_id not in existing_locations
}
if not locations_options:
return self.async_abort(reason="no_available_locations")
return self.async_show_form(
step_id="select_location",
data_schema=vol.Schema(
{vol.Required(CONF_LOCATION_ID): vol.In(locations_options)}
),
)
self.location_id = user_input[CONF_LOCATION_ID]
await self.async_set_unique_id(format_unique_id(self.app_id, self.location_id))
return await self.async_step_authorize()
async def async_step_authorize(self, user_input=None):
"""Wait for the user to authorize the app installation."""
user_input = {} if user_input is None else user_input
self.installed_app_id = user_input.get(CONF_INSTALLED_APP_ID)
self.refresh_token = user_input.get(CONF_REFRESH_TOKEN)
if self.installed_app_id is None:
# Launch the external setup URL
url = format_install_url(self.app_id, self.location_id)
return self.async_external_step(step_id="authorize", url=url)
return self.async_external_step_done(next_step_id="install")
def _show_step_pat(self, errors):
if self.access_token is None:
# Get the token from an existing entry to make it easier to setup multiple locations.
self.access_token = next(
(
entry.data.get(CONF_ACCESS_TOKEN)
for entry in self._async_current_entries()
),
None,
)
return self.async_show_form(
step_id="pat",
data_schema=vol.Schema(
{vol.Required(CONF_ACCESS_TOKEN, default=self.access_token): str}
),
errors=errors,
description_placeholders={
"token_url": "https://account.smartthings.com/tokens",
"component_url": "https://www.home-assistant.io/integrations/smartthings/",
},
)
async def async_step_install(self, data=None):
"""Create a config entry at completion of a flow and authorization of the app."""
data = {
CONF_ACCESS_TOKEN: self.access_token,
CONF_REFRESH_TOKEN: self.refresh_token,
CONF_CLIENT_ID: self.oauth_client_id,
CONF_CLIENT_SECRET: self.oauth_client_secret,
CONF_LOCATION_ID: self.location_id,
CONF_APP_ID: self.app_id,
CONF_INSTALLED_APP_ID: self.installed_app_id,
}
location = await self.api.location(data[CONF_LOCATION_ID])
return self.async_create_entry(title=location.name, data=data)
| apache-2.0 |
HybridF5/nova | nova/tests/unit/virt/libvirt/fake_imagebackend.py | 14 | 2649 | # Copyright 2012 Grid Dynamics
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from nova.virt.libvirt import config
from nova.virt.libvirt import imagebackend
class Backend(object):
def __init__(self, use_cow):
pass
def image(self, instance, name, image_type=''):
class FakeImage(imagebackend.Image):
def __init__(self, instance, name):
self.path = os.path.join(instance['name'], name)
def create_image(self, prepare_template, base,
size, *args, **kwargs):
pass
def resize_image(self, size):
pass
def cache(self, fetch_func, filename, size=None, *args, **kwargs):
pass
def snapshot(self, name):
pass
def libvirt_info(self, disk_bus, disk_dev, device_type,
cache_mode, extra_specs, hypervisor_version):
info = config.LibvirtConfigGuestDisk()
info.source_type = 'file'
info.source_device = device_type
info.target_bus = disk_bus
info.target_dev = disk_dev
info.driver_cache = cache_mode
info.driver_format = 'raw'
info.source_path = self.path
return info
return FakeImage(instance, name)
def snapshot(self, instance, disk_path, image_type=''):
# NOTE(bfilippov): this is done in favor for
# snapshot tests in test_libvirt.LibvirtConnTestCase
return imagebackend.Backend(True).snapshot(instance,
disk_path,
image_type=image_type)
class Raw(imagebackend.Image):
# NOTE(spandhe) Added for test_rescue and test_rescue_config_drive
def __init__(self, instance=None, disk_name=None, path=None):
pass
def _get_driver_format(self):
pass
def correct_format(self):
pass
def create_image(self, prepare_template, base, size, *args, **kwargs):
pass
def resize_image(self, size):
pass
| apache-2.0 |
SingTel-DataCo/incubator-superset | superset/migrations/versions/956a063c52b3_adjusting_key_length.py | 17 | 4649 | """adjusting key length
Revision ID: 956a063c52b3
Revises: f0fbf6129e13
Create Date: 2016-05-11 17:28:32.407340
"""
# revision identifiers, used by Alembic.
revision = '956a063c52b3'
down_revision = 'f0fbf6129e13'
from alembic import op
import sqlalchemy as sa
def upgrade():
with op.batch_alter_table('clusters', schema=None) as batch_op:
batch_op.alter_column('broker_endpoint',
existing_type=sa.VARCHAR(length=256),
type_=sa.String(length=255),
existing_nullable=True)
batch_op.alter_column('broker_host',
existing_type=sa.VARCHAR(length=256),
type_=sa.String(length=255),
existing_nullable=True)
batch_op.alter_column('coordinator_endpoint',
existing_type=sa.VARCHAR(length=256),
type_=sa.String(length=255),
existing_nullable=True)
batch_op.alter_column('coordinator_host',
existing_type=sa.VARCHAR(length=256),
type_=sa.String(length=255),
existing_nullable=True)
with op.batch_alter_table('columns', schema=None) as batch_op:
batch_op.alter_column('column_name',
existing_type=sa.VARCHAR(length=256),
type_=sa.String(length=255),
existing_nullable=True)
with op.batch_alter_table('datasources', schema=None) as batch_op:
batch_op.alter_column('datasource_name',
existing_type=sa.VARCHAR(length=256),
type_=sa.String(length=255),
existing_nullable=True)
with op.batch_alter_table('table_columns', schema=None) as batch_op:
batch_op.alter_column('column_name',
existing_type=sa.VARCHAR(length=256),
type_=sa.String(length=255),
existing_nullable=True)
with op.batch_alter_table('tables', schema=None) as batch_op:
batch_op.alter_column('schema',
existing_type=sa.VARCHAR(length=256),
type_=sa.String(length=255),
existing_nullable=True)
def downgrade():
with op.batch_alter_table('tables', schema=None) as batch_op:
batch_op.alter_column('schema',
existing_type=sa.String(length=255),
type_=sa.VARCHAR(length=256),
existing_nullable=True)
with op.batch_alter_table('table_columns', schema=None) as batch_op:
batch_op.alter_column('column_name',
existing_type=sa.String(length=255),
type_=sa.VARCHAR(length=256),
existing_nullable=True)
with op.batch_alter_table('datasources', schema=None) as batch_op:
batch_op.alter_column('datasource_name',
existing_type=sa.String(length=255),
type_=sa.VARCHAR(length=256),
existing_nullable=True)
with op.batch_alter_table('columns', schema=None) as batch_op:
batch_op.alter_column('column_name',
existing_type=sa.String(length=255),
type_=sa.VARCHAR(length=256),
existing_nullable=True)
with op.batch_alter_table('clusters', schema=None) as batch_op:
batch_op.alter_column('coordinator_host',
existing_type=sa.String(length=255),
type_=sa.VARCHAR(length=256),
existing_nullable=True)
batch_op.alter_column('coordinator_endpoint',
existing_type=sa.String(length=255),
type_=sa.VARCHAR(length=256),
existing_nullable=True)
batch_op.alter_column('broker_host',
existing_type=sa.String(length=255),
type_=sa.VARCHAR(length=256),
existing_nullable=True)
batch_op.alter_column('broker_endpoint',
existing_type=sa.String(length=255),
type_=sa.VARCHAR(length=256),
existing_nullable=True)
| apache-2.0 |
pandeyop/tempest | tempest/api/volume/test_volumes_get.py | 11 | 6285 | # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest_lib.common.utils import data_utils
from testtools import matchers
from tempest.api.volume import base
from tempest import config
from tempest import test
CONF = config.CONF
class VolumesV2GetTest(base.BaseVolumeTest):
@classmethod
def setup_clients(cls):
super(VolumesV2GetTest, cls).setup_clients()
cls.client = cls.volumes_client
@classmethod
def resource_setup(cls):
super(VolumesV2GetTest, cls).resource_setup()
cls.name_field = cls.special_fields['name_field']
cls.descrip_field = cls.special_fields['descrip_field']
def _delete_volume(self, volume_id):
self.client.delete_volume(volume_id)
self.client.wait_for_resource_deletion(volume_id)
def _volume_create_get_update_delete(self, **kwargs):
# Create a volume, Get it's details and Delete the volume
volume = {}
v_name = data_utils.rand_name('Volume')
metadata = {'Type': 'Test'}
# Create a volume
kwargs[self.name_field] = v_name
kwargs['metadata'] = metadata
volume = self.client.create_volume(**kwargs)
self.assertIn('id', volume)
self.addCleanup(self._delete_volume, volume['id'])
self.client.wait_for_volume_status(volume['id'], 'available')
self.assertIn(self.name_field, volume)
self.assertEqual(volume[self.name_field], v_name,
"The created volume name is not equal "
"to the requested name")
self.assertTrue(volume['id'] is not None,
"Field volume id is empty or not found.")
# Get Volume information
fetched_volume = self.client.show_volume(volume['id'])
self.assertEqual(v_name,
fetched_volume[self.name_field],
'The fetched Volume name is different '
'from the created Volume')
self.assertEqual(volume['id'],
fetched_volume['id'],
'The fetched Volume id is different '
'from the created Volume')
self.assertThat(fetched_volume['metadata'].items(),
matchers.ContainsAll(metadata.items()),
'The fetched Volume metadata misses data '
'from the created Volume')
if 'imageRef' in kwargs:
self.assertEqual('true', fetched_volume['bootable'])
if 'imageRef' not in kwargs:
self.assertEqual('false', fetched_volume['bootable'])
# Update Volume
# Test volume update when display_name is same with original value
params = {self.name_field: v_name}
self.client.update_volume(volume['id'], **params)
# Test volume update when display_name is new
new_v_name = data_utils.rand_name('new-Volume')
new_desc = 'This is the new description of volume'
params = {self.name_field: new_v_name,
self.descrip_field: new_desc}
update_volume = self.client.update_volume(volume['id'], **params)
# Assert response body for update_volume method
self.assertEqual(new_v_name, update_volume[self.name_field])
self.assertEqual(new_desc, update_volume[self.descrip_field])
# Assert response body for show_volume method
updated_volume = self.client.show_volume(volume['id'])
self.assertEqual(volume['id'], updated_volume['id'])
self.assertEqual(new_v_name, updated_volume[self.name_field])
self.assertEqual(new_desc, updated_volume[self.descrip_field])
self.assertThat(updated_volume['metadata'].items(),
matchers.ContainsAll(metadata.items()),
'The fetched Volume metadata misses data '
'from the created Volume')
# Test volume create when display_name is none and display_description
# contains specific characters,
# then test volume update if display_name is duplicated
new_volume = {}
new_v_desc = data_utils.rand_name('@#$%^* description')
params = {self.descrip_field: new_v_desc,
'availability_zone': volume['availability_zone']}
new_volume = self.client.create_volume(**params)
self.assertIn('id', new_volume)
self.addCleanup(self._delete_volume, new_volume['id'])
self.client.wait_for_volume_status(new_volume['id'], 'available')
params = {self.name_field: volume[self.name_field],
self.descrip_field: volume[self.descrip_field]}
self.client.update_volume(new_volume['id'], **params)
if 'imageRef' in kwargs:
self.assertEqual('true', updated_volume['bootable'])
if 'imageRef' not in kwargs:
self.assertEqual('false', updated_volume['bootable'])
@test.attr(type='smoke')
@test.idempotent_id('27fb0e9f-fb64-41dd-8bdb-1ffa762f0d51')
def test_volume_create_get_update_delete(self):
self._volume_create_get_update_delete()
@test.attr(type='smoke')
@test.idempotent_id('54a01030-c7fc-447c-86ee-c1182beae638')
@test.services('image')
def test_volume_create_get_update_delete_from_image(self):
self._volume_create_get_update_delete(imageRef=CONF.compute.image_ref)
@test.idempotent_id('3f591b4a-7dc6-444c-bd51-77469506b3a1')
def test_volume_create_get_update_delete_as_clone(self):
origin = self.create_volume()
self._volume_create_get_update_delete(source_volid=origin['id'])
class VolumesV1GetTest(VolumesV2GetTest):
_api_version = 1
| apache-2.0 |
CruiseDevice/coala | tests/coalaDebugFlagTest.py | 13 | 5032 | import os
import re
import sys
import unittest
from unittest.mock import MagicMock, patch
from coalib import coala
from coala_utils.ContextManagers import prepare_file
from tests.TestUtilities import execute_coala, bear_test_module
# patch gets strangely lost when only defined in method or with context where
# actually needed
@patch('coalib.coala_modes.mode_json')
class coalaDebugFlagTest(unittest.TestCase):
def setUp(self):
self.old_argv = sys.argv
def pipReqIsInstalledMock(self):
"""
Prepare a patch for ``PipRequirement.is_installed`` method that
always returns ``True``, used for faking an installed ipdb.
"""
return patch('dependency_management.requirements.PipRequirement.'
'PipRequirement.is_installed', lambda self: True)
def pipReqIsNotInstalledMock(self):
"""
Prepare a patch for ``PipRequirement.is_installed`` method that
always returns ``False``, used for faking a not installed ipdb.
"""
return patch('dependency_management.requirements.PipRequirement.'
'PipRequirement.is_installed', lambda self: False)
def ipdbMock(self):
"""
Prepare a mocked ``ipdb`` module with a mocked
``launch_ipdb_on_exception`` function, which is used in
``coala --debug`` mode to open and ``ipdb>`` prompt when unexpected
exceptions occur
"""
mock = MagicMock()
def __exit__(self, *exc_info):
"""
Make mocked ``ipdb.launch_ipdb_on_exception()`` context just
reraise the exception.
"""
raise
mock.launch_ipdb_on_exception.__enter__ = None
mock.launch_ipdb_on_exception.__exit__ = __exit__
return mock
def tearDown(self):
sys.argv = self.old_argv
def test_no_ipdb(self, mocked_mode_json):
mocked_mode_json.side_effect = None
with bear_test_module(), \
prepare_file(['#fixme '], None) as (lines, filename), \
self.pipReqIsNotInstalledMock():
# additionally use RaiseTestBear to verify independency from
# failing bears
status, stdout, stderr = execute_coala(
coala.main, 'coala', '--debug', '--json',
'-c', os.devnull,
'-f', re.escape(filename),
'-b', 'RaiseTestBear')
assert status == 13
assert not stdout
assert '--debug flag requires ipdb.' in stderr
def test_bear__init__raises(self, mocked_mode_json):
mocked_mode_json.side_effect = None
mocked_ipdb = self.ipdbMock()
with bear_test_module(), \
prepare_file(['#fixme '], None) as (lines, filename), \
self.pipReqIsInstalledMock(), \
patch.dict('sys.modules', ipdb=mocked_ipdb), \
self.assertRaisesRegex(
RuntimeError,
r'^The bear ErrorTestBear does not fulfill all '
r"requirements\. 'I_do_not_exist' is not installed\.$"):
execute_coala(
coala.main, 'coala', '--debug',
'-c', os.devnull,
'-f', re.escape(filename),
'-b', 'ErrorTestBear')
mocked_ipdb.launch_ipdb_on_exception.assert_called_once_with()
def test_bear_run_raises(self, mocked_mode_json):
mocked_mode_json.side_effect = None
mocked_ipdb = self.ipdbMock()
with bear_test_module(), \
prepare_file(['#fixme '], None) as (lines, filename), \
self.pipReqIsInstalledMock(), \
patch.dict('sys.modules', ipdb=mocked_ipdb), \
self.assertRaisesRegex(
RuntimeError, r"^That's all the RaiseTestBear can do\.$"):
execute_coala(
coala.main, 'coala', '--debug',
'-c', os.devnull,
'-f', re.escape(filename),
'-b', 'RaiseTestBear')
mocked_ipdb.launch_ipdb_on_exception.assert_called_once_with()
def test_coala_main_mode_json_launches_ipdb(self, mocked_mode_json):
mocked_mode_json.side_effect = RuntimeError('Mocked mode_json fails.')
mocked_ipdb = self.ipdbMock()
with bear_test_module(), \
prepare_file(['#fixme '], None) as (lines, filename), \
self.pipReqIsInstalledMock(), \
patch.dict('sys.modules', ipdb=mocked_ipdb), \
self.assertRaisesRegex(RuntimeError,
r'^Mocked mode_json fails\.$'):
# additionally use RaiseTestBear to verify independency from
# failing bears
execute_coala(
coala.main, 'coala', '--debug', '--json',
'-c', os.devnull,
'-f', re.escape(filename),
'-b', 'RaiseTestBear')
mocked_ipdb.launch_ipdb_on_exception.assert_called_once_with()
| agpl-3.0 |
Yukarumya/Yukarum-Redfoxes | gfx/angle/src/libANGLE/renderer/angle_format.py | 2 | 1279 | #!/usr/bin/python
# Copyright 2016 The ANGLE Project Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# angle_format.py:
# Utils for ANGLE formats.
import json
import os
def reject_duplicate_keys(pairs):
found_keys = {}
for key, value in pairs:
if key in found_keys:
raise ValueError("duplicate key: %r" % (key,))
else:
found_keys[key] = value
return found_keys
def load_json(path):
with open(path) as map_file:
file_data = map_file.read()
map_file.close()
return json.loads(file_data, object_pairs_hook=reject_duplicate_keys)
def load_forward_table(path):
pairs = load_json(path)
reject_duplicate_keys(pairs)
return { gl: angle for gl, angle in pairs }
def load_inverse_table(path):
pairs = load_json(path)
reject_duplicate_keys(pairs)
return { angle: gl for gl, angle in pairs }
def load_with_override(override_path):
map_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'angle_format_map.json')
results = load_forward_table(map_path)
overrides = load_json(override_path)
for k, v in overrides.iteritems():
results[k] = v
return results
| mpl-2.0 |
atmark-techno/atmark-dist | user/python/Demo/sockets/udpecho.py | 4 | 1284 | #! /usr/bin/env python
# Client and server for udp (datagram) echo.
#
# Usage: udpecho -s [port] (to start a server)
# or: udpecho -c host [port] <file (client)
import sys
from socket import *
ECHO_PORT = 50000 + 7
BUFSIZE = 1024
def main():
if len(sys.argv) < 2:
usage()
if sys.argv[1] == '-s':
server()
elif sys.argv[1] == '-c':
client()
else:
usage()
def usage():
sys.stdout = sys.stderr
print 'Usage: udpecho -s [port] (server)'
print 'or: udpecho -c host [port] <file (client)'
sys.exit(2)
def server():
if len(sys.argv) > 2:
port = eval(sys.argv[2])
else:
port = ECHO_PORT
s = socket(AF_INET, SOCK_DGRAM)
s.bind(('', port))
print 'udp echo server ready'
while 1:
data, addr = s.recvfrom(BUFSIZE)
print 'server received', `data`, 'from', `addr`
s.sendto(data, addr)
def client():
if len(sys.argv) < 3:
usage()
host = sys.argv[2]
if len(sys.argv) > 3:
port = eval(sys.argv[3])
else:
port = ECHO_PORT
addr = host, port
s = socket(AF_INET, SOCK_DGRAM)
s.bind(('', 0))
print 'udp echo client ready, reading stdin'
while 1:
line = sys.stdin.readline()
if not line:
break
s.sendto(line, addr)
data, fromaddr = s.recvfrom(BUFSIZE)
print 'client received', `data`, 'from', `fromaddr`
main()
| gpl-2.0 |
mariano/pyfire | pyfire/message.py | 2 | 5534 | import re
import types
from .entity import CampfireEntity
class Message(CampfireEntity):
""" Campfire message """
_TYPE_ENTER = "EnterMessage"
_TYPE_LEAVE = "LeaveMessage"
_TYPE_KICK = "KickMessage"
_TYPE_PASTE = "PasteMessage"
_TYPE_SOUND = "SoundMessage"
_TYPE_TEXT = "TextMessage"
_TYPE_TIMESTAMP = "TimestampMessage"
_TYPE_TOPIC_CHANGE = "TopicChangeMessage"
_TYPE_TWEET = "TweetMessage"
_TYPE_UPLOAD = "UploadMessage"
def __init__(self, campfire, data):
""" Initialize.
Args:
campfire (:class:`Campfire`): Campfire instance
data (dict or str): If string, message type will be set to either paste or text
"""
dataType = type(data)
if dataType == types.StringType or dataType == types.UnicodeType:
messageType = self._TYPE_PASTE if data.find("\n") >= 0 else self._TYPE_TEXT
if messageType == self._TYPE_TEXT:
matches = re.match("^https?://(www\.)?twitter\.com/([^/]+)/status/(\d+)", data)
if matches:
messageType = self._TYPE_TWEET
data = {
"type": messageType,
"body": data
}
super(Message, self).__init__(campfire)
self.set_data(data, ["created_at"])
self.user = None
self.room = None
if "user_id" in data and data["user_id"]:
self.user = self._campfire.get_user(data["user_id"])
if "room_id" in data and data["room_id"]:
self.room = self._campfire.get_room(data["room_id"])
if self.is_upload():
self.upload = self._connection.get("room/%s/messages/%s/upload" % (self.room.id, self.id), key="upload")
if "full_url" in self.upload:
self.upload["url"] = self.upload["full_url"]
del self.upload["full_url"]
if self.is_tweet():
# Tweet formats may be different if the streaming is line, or transcript based (I know, I know...)
matches = re.match("(.+)\s+--\s+@([^,]+),\s*(.+)$", self.body)
if matches:
self.tweet = {
"tweet": matches.group(1),
"user": matches.group(2),
"url": matches.group(3)
}
else:
tweet_data = {}
if re.match("^---", self.body):
for line in self.body.split("\n")[1:]:
matches = re.match('^:([^:]+):\s*"?(.+)"?$', line)
if matches:
tweet_data[matches.group(1)] = matches.group(2)
if tweet_data and "author_username" in tweet_data and "message" in tweet_data and "id" in tweet_data:
self.tweet = {
"tweet": tweet_data["message"],
"user": tweet_data["author_username"],
"url": "http://twitter.com/%s/status/%s" % (tweet_data["author_username"], tweet_data["id"])
}
else:
self.type = self._TYPE_TEXT
def is_by_current_user(self):
""" Tells if this message was written by the current user.
Returns:
bool. Success
"""
return self.user.id == self._campfire.get_user().id
def is_joining(self):
""" Tells if this message is a room join message.
Returns:
bool. Success
"""
return self.type == self._TYPE_ENTER
def is_kick(self):
""" Tells if this message is a room kick message.
Returns:
bool. Success
"""
return self.type == self._TYPE_KICK
def is_leaving(self):
""" Tells if this message is a room leave message.
Returns:
bool. Success
"""
return self.type == self._TYPE_LEAVE
def is_paste(self):
""" Tells if this message is a paste.
Returns:
bool. Success
"""
return self.type == self._TYPE_PASTE
def is_text(self):
""" Tells if this message is a text message.
Returns:
bool. Success
"""
return self.type in [
self._TYPE_PASTE,
self._TYPE_TEXT,
self._TYPE_TWEET
]
def is_timestamp(self):
""" Tells if this message is a timestamp.
Returns:
bool. Success
"""
return self.type == self._TYPE_TIMESTAMP
def is_topic_change(self):
""" Tells if this message is a topic change.
Returns:
bool. Success
"""
return self.type == self._TYPE_TOPIC_CHANGE
def is_tweet(self):
""" Tells if this message is a tweet.
Returns:
bool. Success
"""
return self.type == self._TYPE_TWEET
def is_upload(self):
""" Tells if this message is an upload message.
Returns:
bool. Success
"""
return self.type == self._TYPE_UPLOAD
def highlight(self):
""" Highlights a message.
Returns:
bool. Success
"""
return self._connection.post("messages/%s/star" % self.id)["success"]
def remove_highlight(self):
""" Removes the highlight of a message.
Returns:
bool. Success
"""
return self._connection.delete("messages/%s/star" % self.id)["success"]
| mit |
cleahcim/go9p | lib/codereview/codereview.py | 8 | 106676 | # coding=utf-8
# (The line above is necessary so that I can use 世界 in the
# *comment* below without Python getting all bent out of shape.)
# Copyright 2007-2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Mercurial interface to codereview.appspot.com.
To configure, set the following options in
your repository's .hg/hgrc file.
[extensions]
codereview = /path/to/codereview.py
[codereview]
server = codereview.appspot.com
The server should be running Rietveld; see http://code.google.com/p/rietveld/.
In addition to the new commands, this extension introduces
the file pattern syntax @nnnnnn, where nnnnnn is a change list
number, to mean the files included in that change list, which
must be associated with the current client.
For example, if change 123456 contains the files x.go and y.go,
"hg diff @123456" is equivalent to"hg diff x.go y.go".
'''
import sys
if __name__ == "__main__":
print >>sys.stderr, "This is a Mercurial extension and should not be invoked directly."
sys.exit(2)
# We require Python 2.6 for the json package.
if sys.version < '2.6':
print >>sys.stderr, "The codereview extension requires Python 2.6 or newer."
print >>sys.stderr, "You are running Python " + sys.version
sys.exit(2)
import json
import os
import re
import stat
import subprocess
import threading
import time
from mercurial import commands as hg_commands
from mercurial import util as hg_util
defaultcc = None
codereview_disabled = None
real_rollback = None
releaseBranch = None
server = "codereview.appspot.com"
server_url_base = None
#######################################################################
# Normally I would split this into multiple files, but it simplifies
# import path headaches to keep it all in one file. Sorry.
# The different parts of the file are separated by banners like this one.
#######################################################################
# Helpers
def RelativePath(path, cwd):
n = len(cwd)
if path.startswith(cwd) and path[n] == '/':
return path[n+1:]
return path
def Sub(l1, l2):
return [l for l in l1 if l not in l2]
def Add(l1, l2):
l = l1 + Sub(l2, l1)
l.sort()
return l
def Intersect(l1, l2):
return [l for l in l1 if l in l2]
#######################################################################
# RE: UNICODE STRING HANDLING
#
# Python distinguishes between the str (string of bytes)
# and unicode (string of code points) types. Most operations
# work on either one just fine, but some (like regexp matching)
# require unicode, and others (like write) require str.
#
# As befits the language, Python hides the distinction between
# unicode and str by converting between them silently, but
# *only* if all the bytes/code points involved are 7-bit ASCII.
# This means that if you're not careful, your program works
# fine on "hello, world" and fails on "hello, 世界". And of course,
# the obvious way to be careful - use static types - is unavailable.
# So the only way is trial and error to find where to put explicit
# conversions.
#
# Because more functions do implicit conversion to str (string of bytes)
# than do implicit conversion to unicode (string of code points),
# the convention in this module is to represent all text as str,
# converting to unicode only when calling a unicode-only function
# and then converting back to str as soon as possible.
def typecheck(s, t):
if type(s) != t:
raise hg_util.Abort("type check failed: %s has type %s != %s" % (repr(s), type(s), t))
# If we have to pass unicode instead of str, ustr does that conversion clearly.
def ustr(s):
typecheck(s, str)
return s.decode("utf-8")
# Even with those, Mercurial still sometimes turns unicode into str
# and then tries to use it as ascii. Change Mercurial's default.
def set_mercurial_encoding_to_utf8():
from mercurial import encoding
encoding.encoding = 'utf-8'
set_mercurial_encoding_to_utf8()
# Even with those we still run into problems.
# I tried to do things by the book but could not convince
# Mercurial to let me check in a change with UTF-8 in the
# CL description or author field, no matter how many conversions
# between str and unicode I inserted and despite changing the
# default encoding. I'm tired of this game, so set the default
# encoding for all of Python to 'utf-8', not 'ascii'.
def default_to_utf8():
import sys
stdout, __stdout__ = sys.stdout, sys.__stdout__
reload(sys) # site.py deleted setdefaultencoding; get it back
sys.stdout, sys.__stdout__ = stdout, __stdout__
sys.setdefaultencoding('utf-8')
default_to_utf8()
#######################################################################
# Status printer for long-running commands
global_status = None
def set_status(s):
# print >>sys.stderr, "\t", time.asctime(), s
global global_status
global_status = s
class StatusThread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
def run(self):
# pause a reasonable amount of time before
# starting to display status messages, so that
# most hg commands won't ever see them.
time.sleep(30)
# now show status every 15 seconds
while True:
time.sleep(15 - time.time() % 15)
s = global_status
if s is None:
continue
if s == "":
s = "(unknown status)"
print >>sys.stderr, time.asctime(), s
def start_status_thread():
t = StatusThread()
t.setDaemon(True) # allowed to exit if t is still running
t.start()
#######################################################################
# Change list parsing.
#
# Change lists are stored in .hg/codereview/cl.nnnnnn
# where nnnnnn is the number assigned by the code review server.
# Most data about a change list is stored on the code review server
# too: the description, reviewer, and cc list are all stored there.
# The only thing in the cl.nnnnnn file is the list of relevant files.
# Also, the existence of the cl.nnnnnn file marks this repository
# as the one where the change list lives.
emptydiff = """Index: ~rietveld~placeholder~
===================================================================
diff --git a/~rietveld~placeholder~ b/~rietveld~placeholder~
new file mode 100644
"""
class CL(object):
def __init__(self, name):
typecheck(name, str)
self.name = name
self.desc = ''
self.files = []
self.reviewer = []
self.cc = []
self.url = ''
self.local = False
self.web = False
self.copied_from = None # None means current user
self.mailed = False
self.private = False
self.lgtm = []
def DiskText(self):
cl = self
s = ""
if cl.copied_from:
s += "Author: " + cl.copied_from + "\n\n"
if cl.private:
s += "Private: " + str(self.private) + "\n"
s += "Mailed: " + str(self.mailed) + "\n"
s += "Description:\n"
s += Indent(cl.desc, "\t")
s += "Files:\n"
for f in cl.files:
s += "\t" + f + "\n"
typecheck(s, str)
return s
def EditorText(self):
cl = self
s = _change_prolog
s += "\n"
if cl.copied_from:
s += "Author: " + cl.copied_from + "\n"
if cl.url != '':
s += 'URL: ' + cl.url + ' # cannot edit\n\n'
if cl.private:
s += "Private: True\n"
s += "Reviewer: " + JoinComma(cl.reviewer) + "\n"
s += "CC: " + JoinComma(cl.cc) + "\n"
s += "\n"
s += "Description:\n"
if cl.desc == '':
s += "\t<enter description here>\n"
else:
s += Indent(cl.desc, "\t")
s += "\n"
if cl.local or cl.name == "new":
s += "Files:\n"
for f in cl.files:
s += "\t" + f + "\n"
s += "\n"
typecheck(s, str)
return s
def PendingText(self, quick=False):
cl = self
s = cl.name + ":" + "\n"
s += Indent(cl.desc, "\t")
s += "\n"
if cl.copied_from:
s += "\tAuthor: " + cl.copied_from + "\n"
if not quick:
s += "\tReviewer: " + JoinComma(cl.reviewer) + "\n"
for (who, line) in cl.lgtm:
s += "\t\t" + who + ": " + line + "\n"
s += "\tCC: " + JoinComma(cl.cc) + "\n"
s += "\tFiles:\n"
for f in cl.files:
s += "\t\t" + f + "\n"
typecheck(s, str)
return s
def Flush(self, ui, repo):
if self.name == "new":
self.Upload(ui, repo, gofmt_just_warn=True, creating=True)
dir = CodeReviewDir(ui, repo)
path = dir + '/cl.' + self.name
f = open(path+'!', "w")
f.write(self.DiskText())
f.close()
if sys.platform == "win32" and os.path.isfile(path):
os.remove(path)
os.rename(path+'!', path)
if self.web and not self.copied_from:
EditDesc(self.name, desc=self.desc,
reviewers=JoinComma(self.reviewer), cc=JoinComma(self.cc),
private=self.private)
def Delete(self, ui, repo):
dir = CodeReviewDir(ui, repo)
os.unlink(dir + "/cl." + self.name)
def Subject(self):
s = line1(self.desc)
if len(s) > 60:
s = s[0:55] + "..."
if self.name != "new":
s = "code review %s: %s" % (self.name, s)
typecheck(s, str)
return s
def Upload(self, ui, repo, send_mail=False, gofmt=True, gofmt_just_warn=False, creating=False, quiet=False):
if not self.files and not creating:
ui.warn("no files in change list\n")
if ui.configbool("codereview", "force_gofmt", True) and gofmt:
CheckFormat(ui, repo, self.files, just_warn=gofmt_just_warn)
set_status("uploading CL metadata + diffs")
os.chdir(repo.root)
form_fields = [
("content_upload", "1"),
("reviewers", JoinComma(self.reviewer)),
("cc", JoinComma(self.cc)),
("description", self.desc),
("base_hashes", ""),
]
if self.name != "new":
form_fields.append(("issue", self.name))
vcs = None
# We do not include files when creating the issue,
# because we want the patch sets to record the repository
# and base revision they are diffs against. We use the patch
# set message for that purpose, but there is no message with
# the first patch set. Instead the message gets used as the
# new CL's overall subject. So omit the diffs when creating
# and then we'll run an immediate upload.
# This has the effect that every CL begins with an empty "Patch set 1".
if self.files and not creating:
vcs = MercurialVCS(upload_options, ui, repo)
data = vcs.GenerateDiff(self.files)
files = vcs.GetBaseFiles(data)
if len(data) > MAX_UPLOAD_SIZE:
uploaded_diff_file = []
form_fields.append(("separate_patches", "1"))
else:
uploaded_diff_file = [("data", "data.diff", data)]
else:
uploaded_diff_file = [("data", "data.diff", emptydiff)]
if vcs and self.name != "new":
form_fields.append(("subject", "diff -r " + vcs.base_rev + " " + ui.expandpath("default")))
else:
# First upload sets the subject for the CL itself.
form_fields.append(("subject", self.Subject()))
ctype, body = EncodeMultipartFormData(form_fields, uploaded_diff_file)
response_body = MySend("/upload", body, content_type=ctype)
patchset = None
msg = response_body
lines = msg.splitlines()
if len(lines) >= 2:
msg = lines[0]
patchset = lines[1].strip()
patches = [x.split(" ", 1) for x in lines[2:]]
if response_body.startswith("Issue updated.") and quiet:
pass
else:
ui.status(msg + "\n")
set_status("uploaded CL metadata + diffs")
if not response_body.startswith("Issue created.") and not response_body.startswith("Issue updated."):
raise hg_util.Abort("failed to update issue: " + response_body)
issue = msg[msg.rfind("/")+1:]
self.name = issue
if not self.url:
self.url = server_url_base + self.name
if not uploaded_diff_file:
set_status("uploading patches")
patches = UploadSeparatePatches(issue, rpc, patchset, data, upload_options)
if vcs:
set_status("uploading base files")
vcs.UploadBaseFiles(issue, rpc, patches, patchset, upload_options, files)
if send_mail:
set_status("sending mail")
MySend("/" + issue + "/mail", payload="")
self.web = True
set_status("flushing changes to disk")
self.Flush(ui, repo)
return
def Mail(self, ui, repo):
pmsg = "Hello " + JoinComma(self.reviewer)
if self.cc:
pmsg += " (cc: %s)" % (', '.join(self.cc),)
pmsg += ",\n"
pmsg += "\n"
repourl = ui.expandpath("default")
if not self.mailed:
pmsg += "I'd like you to review this change to\n" + repourl + "\n"
else:
pmsg += "Please take another look.\n"
typecheck(pmsg, str)
PostMessage(ui, self.name, pmsg, subject=self.Subject())
self.mailed = True
self.Flush(ui, repo)
def GoodCLName(name):
typecheck(name, str)
return re.match("^[0-9]+$", name)
def ParseCL(text, name):
typecheck(text, str)
typecheck(name, str)
sname = None
lineno = 0
sections = {
'Author': '',
'Description': '',
'Files': '',
'URL': '',
'Reviewer': '',
'CC': '',
'Mailed': '',
'Private': '',
}
for line in text.split('\n'):
lineno += 1
line = line.rstrip()
if line != '' and line[0] == '#':
continue
if line == '' or line[0] == ' ' or line[0] == '\t':
if sname == None and line != '':
return None, lineno, 'text outside section'
if sname != None:
sections[sname] += line + '\n'
continue
p = line.find(':')
if p >= 0:
s, val = line[:p].strip(), line[p+1:].strip()
if s in sections:
sname = s
if val != '':
sections[sname] += val + '\n'
continue
return None, lineno, 'malformed section header'
for k in sections:
sections[k] = StripCommon(sections[k]).rstrip()
cl = CL(name)
if sections['Author']:
cl.copied_from = sections['Author']
cl.desc = sections['Description']
for line in sections['Files'].split('\n'):
i = line.find('#')
if i >= 0:
line = line[0:i].rstrip()
line = line.strip()
if line == '':
continue
cl.files.append(line)
cl.reviewer = SplitCommaSpace(sections['Reviewer'])
cl.cc = SplitCommaSpace(sections['CC'])
cl.url = sections['URL']
if sections['Mailed'] != 'False':
# Odd default, but avoids spurious mailings when
# reading old CLs that do not have a Mailed: line.
# CLs created with this update will always have
# Mailed: False on disk.
cl.mailed = True
if sections['Private'] in ('True', 'true', 'Yes', 'yes'):
cl.private = True
if cl.desc == '<enter description here>':
cl.desc = ''
return cl, 0, ''
def SplitCommaSpace(s):
typecheck(s, str)
s = s.strip()
if s == "":
return []
return re.split(", *", s)
def CutDomain(s):
typecheck(s, str)
i = s.find('@')
if i >= 0:
s = s[0:i]
return s
def JoinComma(l):
for s in l:
typecheck(s, str)
return ", ".join(l)
def ExceptionDetail():
s = str(sys.exc_info()[0])
if s.startswith("<type '") and s.endswith("'>"):
s = s[7:-2]
elif s.startswith("<class '") and s.endswith("'>"):
s = s[8:-2]
arg = str(sys.exc_info()[1])
if len(arg) > 0:
s += ": " + arg
return s
def IsLocalCL(ui, repo, name):
return GoodCLName(name) and os.access(CodeReviewDir(ui, repo) + "/cl." + name, 0)
# Load CL from disk and/or the web.
def LoadCL(ui, repo, name, web=True):
typecheck(name, str)
set_status("loading CL " + name)
if not GoodCLName(name):
return None, "invalid CL name"
dir = CodeReviewDir(ui, repo)
path = dir + "cl." + name
if os.access(path, 0):
ff = open(path)
text = ff.read()
ff.close()
cl, lineno, err = ParseCL(text, name)
if err != "":
return None, "malformed CL data: "+err
cl.local = True
else:
cl = CL(name)
if web:
set_status("getting issue metadata from web")
d = JSONGet(ui, "/api/" + name + "?messages=true")
set_status(None)
if d is None:
return None, "cannot load CL %s from server" % (name,)
if 'owner_email' not in d or 'issue' not in d or str(d['issue']) != name:
return None, "malformed response loading CL data from code review server"
cl.dict = d
cl.reviewer = d.get('reviewers', [])
cl.cc = d.get('cc', [])
if cl.local and cl.copied_from and cl.desc:
# local copy of CL written by someone else
# and we saved a description. use that one,
# so that committers can edit the description
# before doing hg submit.
pass
else:
cl.desc = d.get('description', "")
cl.url = server_url_base + name
cl.web = True
cl.private = d.get('private', False) != False
cl.lgtm = []
for m in d.get('messages', []):
if m.get('approval', False) == True or m.get('disapproval', False) == True:
who = re.sub('@.*', '', m.get('sender', ''))
text = re.sub("\n(.|\n)*", '', m.get('text', ''))
cl.lgtm.append((who, text))
set_status("loaded CL " + name)
return cl, ''
class LoadCLThread(threading.Thread):
def __init__(self, ui, repo, dir, f, web):
threading.Thread.__init__(self)
self.ui = ui
self.repo = repo
self.dir = dir
self.f = f
self.web = web
self.cl = None
def run(self):
cl, err = LoadCL(self.ui, self.repo, self.f[3:], web=self.web)
if err != '':
self.ui.warn("loading "+self.dir+self.f+": " + err + "\n")
return
self.cl = cl
# Load all the CLs from this repository.
def LoadAllCL(ui, repo, web=True):
dir = CodeReviewDir(ui, repo)
m = {}
files = [f for f in os.listdir(dir) if f.startswith('cl.')]
if not files:
return m
active = []
first = True
for f in files:
t = LoadCLThread(ui, repo, dir, f, web)
t.start()
if web and first:
# first request: wait in case it needs to authenticate
# otherwise we get lots of user/password prompts
# running in parallel.
t.join()
if t.cl:
m[t.cl.name] = t.cl
first = False
else:
active.append(t)
for t in active:
t.join()
if t.cl:
m[t.cl.name] = t.cl
return m
# Find repository root. On error, ui.warn and return None
def RepoDir(ui, repo):
url = repo.url();
if not url.startswith('file:'):
ui.warn("repository %s is not in local file system\n" % (url,))
return None
url = url[5:]
if url.endswith('/'):
url = url[:-1]
typecheck(url, str)
return url
# Find (or make) code review directory. On error, ui.warn and return None
def CodeReviewDir(ui, repo):
dir = RepoDir(ui, repo)
if dir == None:
return None
dir += '/.hg/codereview/'
if not os.path.isdir(dir):
try:
os.mkdir(dir, 0700)
except:
ui.warn('cannot mkdir %s: %s\n' % (dir, ExceptionDetail()))
return None
typecheck(dir, str)
return dir
# Turn leading tabs into spaces, so that the common white space
# prefix doesn't get confused when people's editors write out
# some lines with spaces, some with tabs. Only a heuristic
# (some editors don't use 8 spaces either) but a useful one.
def TabsToSpaces(line):
i = 0
while i < len(line) and line[i] == '\t':
i += 1
return ' '*(8*i) + line[i:]
# Strip maximal common leading white space prefix from text
def StripCommon(text):
typecheck(text, str)
ws = None
for line in text.split('\n'):
line = line.rstrip()
if line == '':
continue
line = TabsToSpaces(line)
white = line[:len(line)-len(line.lstrip())]
if ws == None:
ws = white
else:
common = ''
for i in range(min(len(white), len(ws))+1):
if white[0:i] == ws[0:i]:
common = white[0:i]
ws = common
if ws == '':
break
if ws == None:
return text
t = ''
for line in text.split('\n'):
line = line.rstrip()
line = TabsToSpaces(line)
if line.startswith(ws):
line = line[len(ws):]
if line == '' and t == '':
continue
t += line + '\n'
while len(t) >= 2 and t[-2:] == '\n\n':
t = t[:-1]
typecheck(t, str)
return t
# Indent text with indent.
def Indent(text, indent):
typecheck(text, str)
typecheck(indent, str)
t = ''
for line in text.split('\n'):
t += indent + line + '\n'
typecheck(t, str)
return t
# Return the first line of l
def line1(text):
typecheck(text, str)
return text.split('\n')[0]
_change_prolog = """# Change list.
# Lines beginning with # are ignored.
# Multi-line values should be indented.
"""
desc_re = '^(.+: |(tag )?(release|weekly)\.|fix build|undo CL)'
desc_msg = '''Your CL description appears not to use the standard form.
The first line of your change description is conventionally a
one-line summary of the change, prefixed by the primary affected package,
and is used as the subject for code review mail; the rest of the description
elaborates.
Examples:
encoding/rot13: new package
math: add IsInf, IsNaN
net: fix cname in LookupHost
unicode: update to Unicode 5.0.2
'''
def promptyesno(ui, msg):
return ui.promptchoice(msg, ["&yes", "&no"], 0) == 0
def promptremove(ui, repo, f):
if promptyesno(ui, "hg remove %s (y/n)?" % (f,)):
if hg_commands.remove(ui, repo, 'path:'+f) != 0:
ui.warn("error removing %s" % (f,))
def promptadd(ui, repo, f):
if promptyesno(ui, "hg add %s (y/n)?" % (f,)):
if hg_commands.add(ui, repo, 'path:'+f) != 0:
ui.warn("error adding %s" % (f,))
def EditCL(ui, repo, cl):
set_status(None) # do not show status
s = cl.EditorText()
while True:
s = ui.edit(s, ui.username())
# We can't trust Mercurial + Python not to die before making the change,
# so, by popular demand, just scribble the most recent CL edit into
# $(hg root)/last-change so that if Mercurial does die, people
# can look there for their work.
try:
f = open(repo.root+"/last-change", "w")
f.write(s)
f.close()
except:
pass
clx, line, err = ParseCL(s, cl.name)
if err != '':
if not promptyesno(ui, "error parsing change list: line %d: %s\nre-edit (y/n)?" % (line, err)):
return "change list not modified"
continue
# Check description.
if clx.desc == '':
if promptyesno(ui, "change list should have a description\nre-edit (y/n)?"):
continue
elif re.search('<enter reason for undo>', clx.desc):
if promptyesno(ui, "change list description omits reason for undo\nre-edit (y/n)?"):
continue
elif not re.match(desc_re, clx.desc.split('\n')[0]):
if promptyesno(ui, desc_msg + "re-edit (y/n)?"):
continue
# Check file list for files that need to be hg added or hg removed
# or simply aren't understood.
pats = ['path:'+f for f in clx.files]
changed = hg_matchPattern(ui, repo, *pats, modified=True, added=True, removed=True)
deleted = hg_matchPattern(ui, repo, *pats, deleted=True)
unknown = hg_matchPattern(ui, repo, *pats, unknown=True)
ignored = hg_matchPattern(ui, repo, *pats, ignored=True)
clean = hg_matchPattern(ui, repo, *pats, clean=True)
files = []
for f in clx.files:
if f in changed:
files.append(f)
continue
if f in deleted:
promptremove(ui, repo, f)
files.append(f)
continue
if f in unknown:
promptadd(ui, repo, f)
files.append(f)
continue
if f in ignored:
ui.warn("error: %s is excluded by .hgignore; omitting\n" % (f,))
continue
if f in clean:
ui.warn("warning: %s is listed in the CL but unchanged\n" % (f,))
files.append(f)
continue
p = repo.root + '/' + f
if os.path.isfile(p):
ui.warn("warning: %s is a file but not known to hg\n" % (f,))
files.append(f)
continue
if os.path.isdir(p):
ui.warn("error: %s is a directory, not a file; omitting\n" % (f,))
continue
ui.warn("error: %s does not exist; omitting\n" % (f,))
clx.files = files
cl.desc = clx.desc
cl.reviewer = clx.reviewer
cl.cc = clx.cc
cl.files = clx.files
cl.private = clx.private
break
return ""
# For use by submit, etc. (NOT by change)
# Get change list number or list of files from command line.
# If files are given, make a new change list.
def CommandLineCL(ui, repo, pats, opts, op="verb", defaultcc=None):
if len(pats) > 0 and GoodCLName(pats[0]):
if len(pats) != 1:
return None, "cannot specify change number and file names"
if opts.get('message'):
return None, "cannot use -m with existing CL"
cl, err = LoadCL(ui, repo, pats[0], web=True)
if err != "":
return None, err
else:
cl = CL("new")
cl.local = True
cl.files = ChangedFiles(ui, repo, pats, taken=Taken(ui, repo))
if not cl.files:
return None, "no files changed (use hg %s <number> to use existing CL)" % op
if opts.get('reviewer'):
cl.reviewer = Add(cl.reviewer, SplitCommaSpace(opts.get('reviewer')))
if opts.get('cc'):
cl.cc = Add(cl.cc, SplitCommaSpace(opts.get('cc')))
if defaultcc:
cl.cc = Add(cl.cc, defaultcc)
if cl.name == "new":
if opts.get('message'):
cl.desc = opts.get('message')
else:
err = EditCL(ui, repo, cl)
if err != '':
return None, err
return cl, ""
#######################################################################
# Change list file management
# Return list of changed files in repository that match pats.
# The patterns came from the command line, so we warn
# if they have no effect or cannot be understood.
def ChangedFiles(ui, repo, pats, taken=None):
taken = taken or {}
# Run each pattern separately so that we can warn about
# patterns that didn't do anything useful.
for p in pats:
for f in hg_matchPattern(ui, repo, p, unknown=True):
promptadd(ui, repo, f)
for f in hg_matchPattern(ui, repo, p, removed=True):
promptremove(ui, repo, f)
files = hg_matchPattern(ui, repo, p, modified=True, added=True, removed=True)
for f in files:
if f in taken:
ui.warn("warning: %s already in CL %s\n" % (f, taken[f].name))
if not files:
ui.warn("warning: %s did not match any modified files\n" % (p,))
# Again, all at once (eliminates duplicates)
l = hg_matchPattern(ui, repo, *pats, modified=True, added=True, removed=True)
l.sort()
if taken:
l = Sub(l, taken.keys())
return l
# Return list of changed files in repository that match pats and still exist.
def ChangedExistingFiles(ui, repo, pats, opts):
l = hg_matchPattern(ui, repo, *pats, modified=True, added=True)
l.sort()
return l
# Return list of files claimed by existing CLs
def Taken(ui, repo):
all = LoadAllCL(ui, repo, web=False)
taken = {}
for _, cl in all.items():
for f in cl.files:
taken[f] = cl
return taken
# Return list of changed files that are not claimed by other CLs
def DefaultFiles(ui, repo, pats):
return ChangedFiles(ui, repo, pats, taken=Taken(ui, repo))
#######################################################################
# File format checking.
def CheckFormat(ui, repo, files, just_warn=False):
set_status("running gofmt")
CheckGofmt(ui, repo, files, just_warn)
CheckTabfmt(ui, repo, files, just_warn)
# Check that gofmt run on the list of files does not change them
def CheckGofmt(ui, repo, files, just_warn):
files = gofmt_required(files)
if not files:
return
cwd = os.getcwd()
files = [RelativePath(repo.root + '/' + f, cwd) for f in files]
files = [f for f in files if os.access(f, 0)]
if not files:
return
try:
cmd = subprocess.Popen(["gofmt", "-l"] + files, shell=False, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=sys.platform != "win32")
cmd.stdin.close()
except:
raise hg_util.Abort("gofmt: " + ExceptionDetail())
data = cmd.stdout.read()
errors = cmd.stderr.read()
cmd.wait()
set_status("done with gofmt")
if len(errors) > 0:
ui.warn("gofmt errors:\n" + errors.rstrip() + "\n")
return
if len(data) > 0:
msg = "gofmt needs to format these files (run hg gofmt):\n" + Indent(data, "\t").rstrip()
if just_warn:
ui.warn("warning: " + msg + "\n")
else:
raise hg_util.Abort(msg)
return
# Check that *.[chys] files indent using tabs.
def CheckTabfmt(ui, repo, files, just_warn):
files = [f for f in files if f.startswith('src/') and re.search(r"\.[chys]$", f) and not re.search(r"\.tab\.[ch]$", f)]
if not files:
return
cwd = os.getcwd()
files = [RelativePath(repo.root + '/' + f, cwd) for f in files]
files = [f for f in files if os.access(f, 0)]
badfiles = []
for f in files:
try:
for line in open(f, 'r'):
# Four leading spaces is enough to complain about,
# except that some Plan 9 code uses four spaces as the label indent,
# so allow that.
if line.startswith(' ') and not re.match(' [A-Za-z0-9_]+:', line):
badfiles.append(f)
break
except:
# ignore cannot open file, etc.
pass
if len(badfiles) > 0:
msg = "these files use spaces for indentation (use tabs instead):\n\t" + "\n\t".join(badfiles)
if just_warn:
ui.warn("warning: " + msg + "\n")
else:
raise hg_util.Abort(msg)
return
#######################################################################
# CONTRIBUTORS file parsing
contributorsCache = None
contributorsURL = None
def ReadContributors(ui, repo):
global contributorsCache
if contributorsCache is not None:
return contributorsCache
try:
if contributorsURL is not None:
opening = contributorsURL
f = urllib2.urlopen(contributorsURL)
else:
opening = repo.root + '/CONTRIBUTORS'
f = open(repo.root + '/CONTRIBUTORS', 'r')
except:
ui.write("warning: cannot open %s: %s\n" % (opening, ExceptionDetail()))
return
contributors = {}
for line in f:
# CONTRIBUTORS is a list of lines like:
# Person <email>
# Person <email> <alt-email>
# The first email address is the one used in commit logs.
if line.startswith('#'):
continue
m = re.match(r"([^<>]+\S)\s+(<[^<>\s]+>)((\s+<[^<>\s]+>)*)\s*$", line)
if m:
name = m.group(1)
email = m.group(2)[1:-1]
contributors[email.lower()] = (name, email)
for extra in m.group(3).split():
contributors[extra[1:-1].lower()] = (name, email)
contributorsCache = contributors
return contributors
def CheckContributor(ui, repo, user=None):
set_status("checking CONTRIBUTORS file")
user, userline = FindContributor(ui, repo, user, warn=False)
if not userline:
raise hg_util.Abort("cannot find %s in CONTRIBUTORS" % (user,))
return userline
def FindContributor(ui, repo, user=None, warn=True):
if not user:
user = ui.config("ui", "username")
if not user:
raise hg_util.Abort("[ui] username is not configured in .hgrc")
user = user.lower()
m = re.match(r".*<(.*)>", user)
if m:
user = m.group(1)
contributors = ReadContributors(ui, repo)
if user not in contributors:
if warn:
ui.warn("warning: cannot find %s in CONTRIBUTORS\n" % (user,))
return user, None
user, email = contributors[user]
return email, "%s <%s>" % (user, email)
#######################################################################
# Mercurial helper functions.
# Read http://mercurial.selenic.com/wiki/MercurialApi before writing any of these.
# We use the ui.pushbuffer/ui.popbuffer + hg_commands.xxx tricks for all interaction
# with Mercurial. It has proved the most stable as they make changes.
hgversion = hg_util.version()
# We require Mercurial 1.9 and suggest Mercurial 2.0.
# The details of the scmutil package changed then,
# so allowing earlier versions would require extra band-aids below.
# Ubuntu 11.10 ships with Mercurial 1.9.1 as the default version.
hg_required = "1.9"
hg_suggested = "2.0"
old_message = """
The code review extension requires Mercurial """+hg_required+""" or newer.
You are using Mercurial """+hgversion+""".
To install a new Mercurial, visit http://mercurial.selenic.com/downloads/.
"""
linux_message = """
You may need to clear your current Mercurial installation by running:
sudo apt-get remove mercurial mercurial-common
sudo rm -rf /etc/mercurial
"""
if hgversion < hg_required:
msg = old_message
if os.access("/etc/mercurial", 0):
msg += linux_message
raise hg_util.Abort(msg)
from mercurial.hg import clean as hg_clean
from mercurial import cmdutil as hg_cmdutil
from mercurial import error as hg_error
from mercurial import match as hg_match
from mercurial import node as hg_node
class uiwrap(object):
def __init__(self, ui):
self.ui = ui
ui.pushbuffer()
self.oldQuiet = ui.quiet
ui.quiet = True
self.oldVerbose = ui.verbose
ui.verbose = False
def output(self):
ui = self.ui
ui.quiet = self.oldQuiet
ui.verbose = self.oldVerbose
return ui.popbuffer()
def to_slash(path):
if sys.platform == "win32":
return path.replace('\\', '/')
return path
def hg_matchPattern(ui, repo, *pats, **opts):
w = uiwrap(ui)
hg_commands.status(ui, repo, *pats, **opts)
text = w.output()
ret = []
prefix = to_slash(os.path.realpath(repo.root))+'/'
for line in text.split('\n'):
f = line.split()
if len(f) > 1:
if len(pats) > 0:
# Given patterns, Mercurial shows relative to cwd
p = to_slash(os.path.realpath(f[1]))
if not p.startswith(prefix):
print >>sys.stderr, "File %s not in repo root %s.\n" % (p, prefix)
else:
ret.append(p[len(prefix):])
else:
# Without patterns, Mercurial shows relative to root (what we want)
ret.append(to_slash(f[1]))
return ret
def hg_heads(ui, repo):
w = uiwrap(ui)
hg_commands.heads(ui, repo)
return w.output()
noise = [
"",
"resolving manifests",
"searching for changes",
"couldn't find merge tool hgmerge",
"adding changesets",
"adding manifests",
"adding file changes",
"all local heads known remotely",
]
def isNoise(line):
line = str(line)
for x in noise:
if line == x:
return True
return False
def hg_incoming(ui, repo):
w = uiwrap(ui)
ret = hg_commands.incoming(ui, repo, force=False, bundle="")
if ret and ret != 1:
raise hg_util.Abort(ret)
return w.output()
def hg_log(ui, repo, **opts):
for k in ['date', 'keyword', 'rev', 'user']:
if not opts.has_key(k):
opts[k] = ""
w = uiwrap(ui)
ret = hg_commands.log(ui, repo, **opts)
if ret:
raise hg_util.Abort(ret)
return w.output()
def hg_outgoing(ui, repo, **opts):
w = uiwrap(ui)
ret = hg_commands.outgoing(ui, repo, **opts)
if ret and ret != 1:
raise hg_util.Abort(ret)
return w.output()
def hg_pull(ui, repo, **opts):
w = uiwrap(ui)
ui.quiet = False
ui.verbose = True # for file list
err = hg_commands.pull(ui, repo, **opts)
for line in w.output().split('\n'):
if isNoise(line):
continue
if line.startswith('moving '):
line = 'mv ' + line[len('moving '):]
if line.startswith('getting ') and line.find(' to ') >= 0:
line = 'mv ' + line[len('getting '):]
if line.startswith('getting '):
line = '+ ' + line[len('getting '):]
if line.startswith('removing '):
line = '- ' + line[len('removing '):]
ui.write(line + '\n')
return err
def hg_push(ui, repo, **opts):
w = uiwrap(ui)
ui.quiet = False
ui.verbose = True
err = hg_commands.push(ui, repo, **opts)
for line in w.output().split('\n'):
if not isNoise(line):
ui.write(line + '\n')
return err
def hg_commit(ui, repo, *pats, **opts):
return hg_commands.commit(ui, repo, *pats, **opts)
#######################################################################
# Mercurial precommit hook to disable commit except through this interface.
commit_okay = False
def precommithook(ui, repo, **opts):
if commit_okay:
return False # False means okay.
ui.write("\ncodereview extension enabled; use mail, upload, or submit instead of commit\n\n")
return True
#######################################################################
# @clnumber file pattern support
# We replace scmutil.match with the MatchAt wrapper to add the @clnumber pattern.
match_repo = None
match_ui = None
match_orig = None
def InstallMatch(ui, repo):
global match_repo
global match_ui
global match_orig
match_ui = ui
match_repo = repo
from mercurial import scmutil
match_orig = scmutil.match
scmutil.match = MatchAt
def MatchAt(ctx, pats=None, opts=None, globbed=False, default='relpath'):
taken = []
files = []
pats = pats or []
opts = opts or {}
for p in pats:
if p.startswith('@'):
taken.append(p)
clname = p[1:]
if clname == "default":
files = DefaultFiles(match_ui, match_repo, [])
else:
if not GoodCLName(clname):
raise hg_util.Abort("invalid CL name " + clname)
cl, err = LoadCL(match_repo.ui, match_repo, clname, web=False)
if err != '':
raise hg_util.Abort("loading CL " + clname + ": " + err)
if not cl.files:
raise hg_util.Abort("no files in CL " + clname)
files = Add(files, cl.files)
pats = Sub(pats, taken) + ['path:'+f for f in files]
# work-around for http://selenic.com/hg/rev/785bbc8634f8
if not hasattr(ctx, 'match'):
ctx = ctx[None]
return match_orig(ctx, pats=pats, opts=opts, globbed=globbed, default=default)
#######################################################################
# Commands added by code review extension.
def hgcommand(f):
return f
#######################################################################
# hg change
@hgcommand
def change(ui, repo, *pats, **opts):
"""create, edit or delete a change list
Create, edit or delete a change list.
A change list is a group of files to be reviewed and submitted together,
plus a textual description of the change.
Change lists are referred to by simple alphanumeric names.
Changes must be reviewed before they can be submitted.
In the absence of options, the change command opens the
change list for editing in the default editor.
Deleting a change with the -d or -D flag does not affect
the contents of the files listed in that change. To revert
the files listed in a change, use
hg revert @123456
before running hg change -d 123456.
"""
if codereview_disabled:
raise hg_util.Abort(codereview_disabled)
dirty = {}
if len(pats) > 0 and GoodCLName(pats[0]):
name = pats[0]
if len(pats) != 1:
raise hg_util.Abort("cannot specify CL name and file patterns")
pats = pats[1:]
cl, err = LoadCL(ui, repo, name, web=True)
if err != '':
raise hg_util.Abort(err)
if not cl.local and (opts["stdin"] or not opts["stdout"]):
raise hg_util.Abort("cannot change non-local CL " + name)
else:
name = "new"
cl = CL("new")
if repo[None].branch() != "default":
raise hg_util.Abort("cannot create CL outside default branch; switch with 'hg update default'")
dirty[cl] = True
files = ChangedFiles(ui, repo, pats, taken=Taken(ui, repo))
if opts["delete"] or opts["deletelocal"]:
if opts["delete"] and opts["deletelocal"]:
raise hg_util.Abort("cannot use -d and -D together")
flag = "-d"
if opts["deletelocal"]:
flag = "-D"
if name == "new":
raise hg_util.Abort("cannot use "+flag+" with file patterns")
if opts["stdin"] or opts["stdout"]:
raise hg_util.Abort("cannot use "+flag+" with -i or -o")
if not cl.local:
raise hg_util.Abort("cannot change non-local CL " + name)
if opts["delete"]:
if cl.copied_from:
raise hg_util.Abort("original author must delete CL; hg change -D will remove locally")
PostMessage(ui, cl.name, "*** Abandoned ***", send_mail=cl.mailed)
EditDesc(cl.name, closed=True, private=cl.private)
cl.Delete(ui, repo)
return
if opts["stdin"]:
s = sys.stdin.read()
clx, line, err = ParseCL(s, name)
if err != '':
raise hg_util.Abort("error parsing change list: line %d: %s" % (line, err))
if clx.desc is not None:
cl.desc = clx.desc;
dirty[cl] = True
if clx.reviewer is not None:
cl.reviewer = clx.reviewer
dirty[cl] = True
if clx.cc is not None:
cl.cc = clx.cc
dirty[cl] = True
if clx.files is not None:
cl.files = clx.files
dirty[cl] = True
if clx.private != cl.private:
cl.private = clx.private
dirty[cl] = True
if not opts["stdin"] and not opts["stdout"]:
if name == "new":
cl.files = files
err = EditCL(ui, repo, cl)
if err != "":
raise hg_util.Abort(err)
dirty[cl] = True
for d, _ in dirty.items():
name = d.name
d.Flush(ui, repo)
if name == "new":
d.Upload(ui, repo, quiet=True)
if opts["stdout"]:
ui.write(cl.EditorText())
elif opts["pending"]:
ui.write(cl.PendingText())
elif name == "new":
if ui.quiet:
ui.write(cl.name)
else:
ui.write("CL created: " + cl.url + "\n")
return
#######################################################################
# hg code-login (broken?)
@hgcommand
def code_login(ui, repo, **opts):
"""log in to code review server
Logs in to the code review server, saving a cookie in
a file in your home directory.
"""
if codereview_disabled:
raise hg_util.Abort(codereview_disabled)
MySend(None)
#######################################################################
# hg clpatch / undo / release-apply / download
# All concerned with applying or unapplying patches to the repository.
@hgcommand
def clpatch(ui, repo, clname, **opts):
"""import a patch from the code review server
Imports a patch from the code review server into the local client.
If the local client has already modified any of the files that the
patch modifies, this command will refuse to apply the patch.
Submitting an imported patch will keep the original author's
name as the Author: line but add your own name to a Committer: line.
"""
if repo[None].branch() != "default":
raise hg_util.Abort("cannot run hg clpatch outside default branch")
err = clpatch_or_undo(ui, repo, clname, opts, mode="clpatch")
if err:
raise hg_util.Abort(err)
@hgcommand
def undo(ui, repo, clname, **opts):
"""undo the effect of a CL
Creates a new CL that undoes an earlier CL.
After creating the CL, opens the CL text for editing so that
you can add the reason for the undo to the description.
"""
if repo[None].branch() != "default":
raise hg_util.Abort("cannot run hg undo outside default branch")
err = clpatch_or_undo(ui, repo, clname, opts, mode="undo")
if err:
raise hg_util.Abort(err)
@hgcommand
def release_apply(ui, repo, clname, **opts):
"""apply a CL to the release branch
Creates a new CL copying a previously committed change
from the main branch to the release branch.
The current client must either be clean or already be in
the release branch.
The release branch must be created by starting with a
clean client, disabling the code review plugin, and running:
hg update weekly.YYYY-MM-DD
hg branch release-branch.rNN
hg commit -m 'create release-branch.rNN'
hg push --new-branch
Then re-enable the code review plugin.
People can test the release branch by running
hg update release-branch.rNN
in a clean client. To return to the normal tree,
hg update default
Move changes since the weekly into the release branch
using hg release-apply followed by the usual code review
process and hg submit.
When it comes time to tag the release, record the
final long-form tag of the release-branch.rNN
in the *default* branch's .hgtags file. That is, run
hg update default
and then edit .hgtags as you would for a weekly.
"""
c = repo[None]
if not releaseBranch:
raise hg_util.Abort("no active release branches")
if c.branch() != releaseBranch:
if c.modified() or c.added() or c.removed():
raise hg_util.Abort("uncommitted local changes - cannot switch branches")
err = hg_clean(repo, releaseBranch)
if err:
raise hg_util.Abort(err)
try:
err = clpatch_or_undo(ui, repo, clname, opts, mode="backport")
if err:
raise hg_util.Abort(err)
except Exception, e:
hg_clean(repo, "default")
raise e
def rev2clname(rev):
# Extract CL name from revision description.
# The last line in the description that is a codereview URL is the real one.
# Earlier lines might be part of the user-written description.
all = re.findall('(?m)^https?://codereview.appspot.com/([0-9]+)$', rev.description())
if len(all) > 0:
return all[-1]
return ""
undoHeader = """undo CL %s / %s
<enter reason for undo>
««« original CL description
"""
undoFooter = """
»»»
"""
backportHeader = """[%s] %s
««« CL %s / %s
"""
backportFooter = """
»»»
"""
# Implementation of clpatch/undo.
def clpatch_or_undo(ui, repo, clname, opts, mode):
if codereview_disabled:
return codereview_disabled
if mode == "undo" or mode == "backport":
# Find revision in Mercurial repository.
# Assume CL number is 7+ decimal digits.
# Otherwise is either change log sequence number (fewer decimal digits),
# hexadecimal hash, or tag name.
# Mercurial will fall over long before the change log
# sequence numbers get to be 7 digits long.
if re.match('^[0-9]{7,}$', clname):
found = False
for r in hg_log(ui, repo, keyword="codereview.appspot.com/"+clname, limit=100, template="{node}\n").split():
rev = repo[r]
# Last line with a code review URL is the actual review URL.
# Earlier ones might be part of the CL description.
n = rev2clname(rev)
if n == clname:
found = True
break
if not found:
return "cannot find CL %s in local repository" % clname
else:
rev = repo[clname]
if not rev:
return "unknown revision %s" % clname
clname = rev2clname(rev)
if clname == "":
return "cannot find CL name in revision description"
# Create fresh CL and start with patch that would reverse the change.
vers = hg_node.short(rev.node())
cl = CL("new")
desc = str(rev.description())
if mode == "undo":
cl.desc = (undoHeader % (clname, vers)) + desc + undoFooter
else:
cl.desc = (backportHeader % (releaseBranch, line1(desc), clname, vers)) + desc + undoFooter
v1 = vers
v0 = hg_node.short(rev.parents()[0].node())
if mode == "undo":
arg = v1 + ":" + v0
else:
vers = v0
arg = v0 + ":" + v1
patch = RunShell(["hg", "diff", "--git", "-r", arg])
else: # clpatch
cl, vers, patch, err = DownloadCL(ui, repo, clname)
if err != "":
return err
if patch == emptydiff:
return "codereview issue %s has no diff" % clname
# find current hg version (hg identify)
ctx = repo[None]
parents = ctx.parents()
id = '+'.join([hg_node.short(p.node()) for p in parents])
# if version does not match the patch version,
# try to update the patch line numbers.
if vers != "" and id != vers:
# "vers in repo" gives the wrong answer
# on some versions of Mercurial. Instead, do the actual
# lookup and catch the exception.
try:
repo[vers].description()
except:
return "local repository is out of date; sync to get %s" % (vers)
patch1, err = portPatch(repo, patch, vers, id)
if err != "":
if not opts["ignore_hgapplydiff_failure"]:
return "codereview issue %s is out of date: %s (%s->%s)" % (clname, err, vers, id)
else:
patch = patch1
argv = ["hgapplydiff"]
if opts["no_incoming"] or mode == "backport":
argv += ["--checksync=false"]
try:
cmd = subprocess.Popen(argv, shell=False, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=None, close_fds=sys.platform != "win32")
except:
return "hgapplydiff: " + ExceptionDetail() + "\nInstall hgapplydiff with:\n$ go get code.google.com/p/go.codereview/cmd/hgapplydiff\n"
out, err = cmd.communicate(patch)
if cmd.returncode != 0 and not opts["ignore_hgapplydiff_failure"]:
return "hgapplydiff failed"
cl.local = True
cl.files = out.strip().split()
if not cl.files and not opts["ignore_hgapplydiff_failure"]:
return "codereview issue %s has no changed files" % clname
files = ChangedFiles(ui, repo, [])
extra = Sub(cl.files, files)
if extra:
ui.warn("warning: these files were listed in the patch but not changed:\n\t" + "\n\t".join(extra) + "\n")
cl.Flush(ui, repo)
if mode == "undo":
err = EditCL(ui, repo, cl)
if err != "":
return "CL created, but error editing: " + err
cl.Flush(ui, repo)
else:
ui.write(cl.PendingText() + "\n")
# portPatch rewrites patch from being a patch against
# oldver to being a patch against newver.
def portPatch(repo, patch, oldver, newver):
lines = patch.splitlines(True) # True = keep \n
delta = None
for i in range(len(lines)):
line = lines[i]
if line.startswith('--- a/'):
file = line[6:-1]
delta = fileDeltas(repo, file, oldver, newver)
if not delta or not line.startswith('@@ '):
continue
# @@ -x,y +z,w @@ means the patch chunk replaces
# the original file's line numbers x up to x+y with the
# line numbers z up to z+w in the new file.
# Find the delta from x in the original to the same
# line in the current version and add that delta to both
# x and z.
m = re.match('@@ -([0-9]+),([0-9]+) \+([0-9]+),([0-9]+) @@', line)
if not m:
return None, "error parsing patch line numbers"
n1, len1, n2, len2 = int(m.group(1)), int(m.group(2)), int(m.group(3)), int(m.group(4))
d, err = lineDelta(delta, n1, len1)
if err != "":
return "", err
n1 += d
n2 += d
lines[i] = "@@ -%d,%d +%d,%d @@\n" % (n1, len1, n2, len2)
newpatch = ''.join(lines)
return newpatch, ""
# fileDelta returns the line number deltas for the given file's
# changes from oldver to newver.
# The deltas are a list of (n, len, newdelta) triples that say
# lines [n, n+len) were modified, and after that range the
# line numbers are +newdelta from what they were before.
def fileDeltas(repo, file, oldver, newver):
cmd = ["hg", "diff", "--git", "-r", oldver + ":" + newver, "path:" + file]
data = RunShell(cmd, silent_ok=True)
deltas = []
for line in data.splitlines():
m = re.match('@@ -([0-9]+),([0-9]+) \+([0-9]+),([0-9]+) @@', line)
if not m:
continue
n1, len1, n2, len2 = int(m.group(1)), int(m.group(2)), int(m.group(3)), int(m.group(4))
deltas.append((n1, len1, n2+len2-(n1+len1)))
return deltas
# lineDelta finds the appropriate line number delta to apply to the lines [n, n+len).
# It returns an error if those lines were rewritten by the patch.
def lineDelta(deltas, n, len):
d = 0
for (old, oldlen, newdelta) in deltas:
if old >= n+len:
break
if old+len > n:
return 0, "patch and recent changes conflict"
d = newdelta
return d, ""
@hgcommand
def download(ui, repo, clname, **opts):
"""download a change from the code review server
Download prints a description of the given change list
followed by its diff, downloaded from the code review server.
"""
if codereview_disabled:
raise hg_util.Abort(codereview_disabled)
cl, vers, patch, err = DownloadCL(ui, repo, clname)
if err != "":
return err
ui.write(cl.EditorText() + "\n")
ui.write(patch + "\n")
return
#######################################################################
# hg file
@hgcommand
def file(ui, repo, clname, pat, *pats, **opts):
"""assign files to or remove files from a change list
Assign files to or (with -d) remove files from a change list.
The -d option only removes files from the change list.
It does not edit them or remove them from the repository.
"""
if codereview_disabled:
raise hg_util.Abort(codereview_disabled)
pats = tuple([pat] + list(pats))
if not GoodCLName(clname):
return "invalid CL name " + clname
dirty = {}
cl, err = LoadCL(ui, repo, clname, web=False)
if err != '':
return err
if not cl.local:
return "cannot change non-local CL " + clname
files = ChangedFiles(ui, repo, pats)
if opts["delete"]:
oldfiles = Intersect(files, cl.files)
if oldfiles:
if not ui.quiet:
ui.status("# Removing files from CL. To undo:\n")
ui.status("# cd %s\n" % (repo.root))
for f in oldfiles:
ui.status("# hg file %s %s\n" % (cl.name, f))
cl.files = Sub(cl.files, oldfiles)
cl.Flush(ui, repo)
else:
ui.status("no such files in CL")
return
if not files:
return "no such modified files"
files = Sub(files, cl.files)
taken = Taken(ui, repo)
warned = False
for f in files:
if f in taken:
if not warned and not ui.quiet:
ui.status("# Taking files from other CLs. To undo:\n")
ui.status("# cd %s\n" % (repo.root))
warned = True
ocl = taken[f]
if not ui.quiet:
ui.status("# hg file %s %s\n" % (ocl.name, f))
if ocl not in dirty:
ocl.files = Sub(ocl.files, files)
dirty[ocl] = True
cl.files = Add(cl.files, files)
dirty[cl] = True
for d, _ in dirty.items():
d.Flush(ui, repo)
return
#######################################################################
# hg gofmt
@hgcommand
def gofmt(ui, repo, *pats, **opts):
"""apply gofmt to modified files
Applies gofmt to the modified files in the repository that match
the given patterns.
"""
if codereview_disabled:
raise hg_util.Abort(codereview_disabled)
files = ChangedExistingFiles(ui, repo, pats, opts)
files = gofmt_required(files)
if not files:
return "no modified go files"
cwd = os.getcwd()
files = [RelativePath(repo.root + '/' + f, cwd) for f in files]
try:
cmd = ["gofmt", "-l"]
if not opts["list"]:
cmd += ["-w"]
if subprocess.call(cmd + files) != 0:
raise hg_util.Abort("gofmt did not exit cleanly")
except hg_error.Abort, e:
raise
except:
raise hg_util.Abort("gofmt: " + ExceptionDetail())
return
def gofmt_required(files):
return [f for f in files if (not f.startswith('test/') or f.startswith('test/bench/')) and f.endswith('.go')]
#######################################################################
# hg mail
@hgcommand
def mail(ui, repo, *pats, **opts):
"""mail a change for review
Uploads a patch to the code review server and then sends mail
to the reviewer and CC list asking for a review.
"""
if codereview_disabled:
raise hg_util.Abort(codereview_disabled)
cl, err = CommandLineCL(ui, repo, pats, opts, op="mail", defaultcc=defaultcc)
if err != "":
raise hg_util.Abort(err)
cl.Upload(ui, repo, gofmt_just_warn=True)
if not cl.reviewer:
# If no reviewer is listed, assign the review to defaultcc.
# This makes sure that it appears in the
# codereview.appspot.com/user/defaultcc
# page, so that it doesn't get dropped on the floor.
if not defaultcc:
raise hg_util.Abort("no reviewers listed in CL")
cl.cc = Sub(cl.cc, defaultcc)
cl.reviewer = defaultcc
cl.Flush(ui, repo)
if cl.files == []:
raise hg_util.Abort("no changed files, not sending mail")
cl.Mail(ui, repo)
#######################################################################
# hg p / hg pq / hg ps / hg pending
@hgcommand
def ps(ui, repo, *pats, **opts):
"""alias for hg p --short
"""
opts['short'] = True
return pending(ui, repo, *pats, **opts)
@hgcommand
def pq(ui, repo, *pats, **opts):
"""alias for hg p --quick
"""
opts['quick'] = True
return pending(ui, repo, *pats, **opts)
@hgcommand
def pending(ui, repo, *pats, **opts):
"""show pending changes
Lists pending changes followed by a list of unassigned but modified files.
"""
if codereview_disabled:
raise hg_util.Abort(codereview_disabled)
quick = opts.get('quick', False)
short = opts.get('short', False)
m = LoadAllCL(ui, repo, web=not quick and not short)
names = m.keys()
names.sort()
for name in names:
cl = m[name]
if short:
ui.write(name + "\t" + line1(cl.desc) + "\n")
else:
ui.write(cl.PendingText(quick=quick) + "\n")
if short:
return 0
files = DefaultFiles(ui, repo, [])
if len(files) > 0:
s = "Changed files not in any CL:\n"
for f in files:
s += "\t" + f + "\n"
ui.write(s)
#######################################################################
# hg submit
def need_sync():
raise hg_util.Abort("local repository out of date; must sync before submit")
@hgcommand
def submit(ui, repo, *pats, **opts):
"""submit change to remote repository
Submits change to remote repository.
Bails out if the local repository is not in sync with the remote one.
"""
if codereview_disabled:
raise hg_util.Abort(codereview_disabled)
# We already called this on startup but sometimes Mercurial forgets.
set_mercurial_encoding_to_utf8()
if not opts["no_incoming"] and hg_incoming(ui, repo):
need_sync()
cl, err = CommandLineCL(ui, repo, pats, opts, op="submit", defaultcc=defaultcc)
if err != "":
raise hg_util.Abort(err)
user = None
if cl.copied_from:
user = cl.copied_from
userline = CheckContributor(ui, repo, user)
typecheck(userline, str)
about = ""
if cl.reviewer:
about += "R=" + JoinComma([CutDomain(s) for s in cl.reviewer]) + "\n"
if opts.get('tbr'):
tbr = SplitCommaSpace(opts.get('tbr'))
cl.reviewer = Add(cl.reviewer, tbr)
about += "TBR=" + JoinComma([CutDomain(s) for s in tbr]) + "\n"
if cl.cc:
about += "CC=" + JoinComma([CutDomain(s) for s in cl.cc]) + "\n"
if not cl.reviewer:
raise hg_util.Abort("no reviewers listed in CL")
if not cl.local:
raise hg_util.Abort("cannot submit non-local CL")
# upload, to sync current patch and also get change number if CL is new.
if not cl.copied_from:
cl.Upload(ui, repo, gofmt_just_warn=True)
# check gofmt for real; allowed upload to warn in order to save CL.
cl.Flush(ui, repo)
CheckFormat(ui, repo, cl.files)
about += "%s%s\n" % (server_url_base, cl.name)
if cl.copied_from:
about += "\nCommitter: " + CheckContributor(ui, repo, None) + "\n"
typecheck(about, str)
if not cl.mailed and not cl.copied_from: # in case this is TBR
cl.Mail(ui, repo)
# submit changes locally
message = cl.desc.rstrip() + "\n\n" + about
typecheck(message, str)
set_status("pushing " + cl.name + " to remote server")
if hg_outgoing(ui, repo):
raise hg_util.Abort("local repository corrupt or out-of-phase with remote: found outgoing changes")
old_heads = len(hg_heads(ui, repo).split())
global commit_okay
commit_okay = True
ret = hg_commit(ui, repo, *['path:'+f for f in cl.files], message=message, user=userline)
commit_okay = False
if ret:
raise hg_util.Abort("nothing changed")
node = repo["-1"].node()
# push to remote; if it fails for any reason, roll back
try:
new_heads = len(hg_heads(ui, repo).split())
if old_heads != new_heads and not (old_heads == 0 and new_heads == 1):
# Created new head, so we weren't up to date.
need_sync()
# Push changes to remote. If it works, we're committed. If not, roll back.
try:
if hg_push(ui, repo):
raise hg_util.Abort("push error")
except hg_error.Abort, e:
if e.message.find("push creates new heads") >= 0:
# Remote repository had changes we missed.
need_sync()
raise
except:
real_rollback()
raise
# We're committed. Upload final patch, close review, add commit message.
changeURL = hg_node.short(node)
url = ui.expandpath("default")
m = re.match("(^https?://([^@/]+@)?([^.]+)\.googlecode\.com/hg/?)" + "|" +
"(^https?://([^@/]+@)?code\.google\.com/p/([^/.]+)(\.[^./]+)?/?)", url)
if m:
if m.group(1): # prj.googlecode.com/hg/ case
changeURL = "https://code.google.com/p/%s/source/detail?r=%s" % (m.group(3), changeURL)
elif m.group(4) and m.group(7): # code.google.com/p/prj.subrepo/ case
changeURL = "https://code.google.com/p/%s/source/detail?r=%s&repo=%s" % (m.group(6), changeURL, m.group(7)[1:])
elif m.group(4): # code.google.com/p/prj/ case
changeURL = "https://code.google.com/p/%s/source/detail?r=%s" % (m.group(6), changeURL)
else:
print >>sys.stderr, "URL: ", url
else:
print >>sys.stderr, "URL: ", url
pmsg = "*** Submitted as " + changeURL + " ***\n\n" + message
# When posting, move reviewers to CC line,
# so that the issue stops showing up in their "My Issues" page.
PostMessage(ui, cl.name, pmsg, reviewers="", cc=JoinComma(cl.reviewer+cl.cc))
if not cl.copied_from:
EditDesc(cl.name, closed=True, private=cl.private)
cl.Delete(ui, repo)
c = repo[None]
if c.branch() == releaseBranch and not c.modified() and not c.added() and not c.removed():
ui.write("switching from %s to default branch.\n" % releaseBranch)
err = hg_clean(repo, "default")
if err:
return err
return 0
#######################################################################
# hg sync
@hgcommand
def sync(ui, repo, **opts):
"""synchronize with remote repository
Incorporates recent changes from the remote repository
into the local repository.
"""
if codereview_disabled:
raise hg_util.Abort(codereview_disabled)
if not opts["local"]:
err = hg_pull(ui, repo, update=True)
if err:
return err
sync_changes(ui, repo)
def sync_changes(ui, repo):
# Look through recent change log descriptions to find
# potential references to http://.*/our-CL-number.
# Double-check them by looking at the Rietveld log.
for rev in hg_log(ui, repo, limit=100, template="{node}\n").split():
desc = repo[rev].description().strip()
for clname in re.findall('(?m)^https?://(?:[^\n]+)/([0-9]+)$', desc):
if IsLocalCL(ui, repo, clname) and IsRietveldSubmitted(ui, clname, repo[rev].hex()):
ui.warn("CL %s submitted as %s; closing\n" % (clname, repo[rev]))
cl, err = LoadCL(ui, repo, clname, web=False)
if err != "":
ui.warn("loading CL %s: %s\n" % (clname, err))
continue
if not cl.copied_from:
EditDesc(cl.name, closed=True, private=cl.private)
cl.Delete(ui, repo)
# Remove files that are not modified from the CLs in which they appear.
all = LoadAllCL(ui, repo, web=False)
changed = ChangedFiles(ui, repo, [])
for cl in all.values():
extra = Sub(cl.files, changed)
if extra:
ui.warn("Removing unmodified files from CL %s:\n" % (cl.name,))
for f in extra:
ui.warn("\t%s\n" % (f,))
cl.files = Sub(cl.files, extra)
cl.Flush(ui, repo)
if not cl.files:
if not cl.copied_from:
ui.warn("CL %s has no files; delete (abandon) with hg change -d %s\n" % (cl.name, cl.name))
else:
ui.warn("CL %s has no files; delete locally with hg change -D %s\n" % (cl.name, cl.name))
return 0
#######################################################################
# hg upload
@hgcommand
def upload(ui, repo, name, **opts):
"""upload diffs to the code review server
Uploads the current modifications for a given change to the server.
"""
if codereview_disabled:
raise hg_util.Abort(codereview_disabled)
repo.ui.quiet = True
cl, err = LoadCL(ui, repo, name, web=True)
if err != "":
raise hg_util.Abort(err)
if not cl.local:
raise hg_util.Abort("cannot upload non-local change")
cl.Upload(ui, repo)
print "%s%s\n" % (server_url_base, cl.name)
return 0
#######################################################################
# Table of commands, supplied to Mercurial for installation.
review_opts = [
('r', 'reviewer', '', 'add reviewer'),
('', 'cc', '', 'add cc'),
('', 'tbr', '', 'add future reviewer'),
('m', 'message', '', 'change description (for new change)'),
]
cmdtable = {
# The ^ means to show this command in the help text that
# is printed when running hg with no arguments.
"^change": (
change,
[
('d', 'delete', None, 'delete existing change list'),
('D', 'deletelocal', None, 'delete locally, but do not change CL on server'),
('i', 'stdin', None, 'read change list from standard input'),
('o', 'stdout', None, 'print change list to standard output'),
('p', 'pending', None, 'print pending summary to standard output'),
],
"[-d | -D] [-i] [-o] change# or FILE ..."
),
"^clpatch": (
clpatch,
[
('', 'ignore_hgapplydiff_failure', None, 'create CL metadata even if hgapplydiff fails'),
('', 'no_incoming', None, 'disable check for incoming changes'),
],
"change#"
),
# Would prefer to call this codereview-login, but then
# hg help codereview prints the help for this command
# instead of the help for the extension.
"code-login": (
code_login,
[],
"",
),
"^download": (
download,
[],
"change#"
),
"^file": (
file,
[
('d', 'delete', None, 'delete files from change list (but not repository)'),
],
"[-d] change# FILE ..."
),
"^gofmt": (
gofmt,
[
('l', 'list', None, 'list files that would change, but do not edit them'),
],
"FILE ..."
),
"^pending|p": (
pending,
[
('s', 'short', False, 'show short result form'),
('', 'quick', False, 'do not consult codereview server'),
],
"[FILE ...]"
),
"^ps": (
ps,
[],
"[FILE ...]"
),
"^pq": (
pq,
[],
"[FILE ...]"
),
"^mail": (
mail,
review_opts + [
] + hg_commands.walkopts,
"[-r reviewer] [--cc cc] [change# | file ...]"
),
"^release-apply": (
release_apply,
[
('', 'ignore_hgapplydiff_failure', None, 'create CL metadata even if hgapplydiff fails'),
('', 'no_incoming', None, 'disable check for incoming changes'),
],
"change#"
),
# TODO: release-start, release-tag, weekly-tag
"^submit": (
submit,
review_opts + [
('', 'no_incoming', None, 'disable initial incoming check (for testing)'),
] + hg_commands.walkopts + hg_commands.commitopts + hg_commands.commitopts2,
"[-r reviewer] [--cc cc] [change# | file ...]"
),
"^sync": (
sync,
[
('', 'local', None, 'do not pull changes from remote repository')
],
"[--local]",
),
"^undo": (
undo,
[
('', 'ignore_hgapplydiff_failure', None, 'create CL metadata even if hgapplydiff fails'),
('', 'no_incoming', None, 'disable check for incoming changes'),
],
"change#"
),
"^upload": (
upload,
[],
"change#"
),
}
#######################################################################
# Mercurial extension initialization
def norollback(*pats, **opts):
"""(disabled when using this extension)"""
raise hg_util.Abort("codereview extension enabled; use undo instead of rollback")
codereview_init = False
def reposetup(ui, repo):
global codereview_disabled
global defaultcc
# reposetup gets called both for the local repository
# and also for any repository we are pulling or pushing to.
# Only initialize the first time.
global codereview_init
if codereview_init:
return
codereview_init = True
# Read repository-specific options from lib/codereview/codereview.cfg or codereview.cfg.
root = ''
try:
root = repo.root
except:
# Yes, repo might not have root; see issue 959.
codereview_disabled = 'codereview disabled: repository has no root'
return
repo_config_path = ''
p1 = root + '/lib/codereview/codereview.cfg'
p2 = root + '/codereview.cfg'
if os.access(p1, os.F_OK):
repo_config_path = p1
else:
repo_config_path = p2
try:
f = open(repo_config_path)
for line in f:
if line.startswith('defaultcc:'):
defaultcc = SplitCommaSpace(line[len('defaultcc:'):])
if line.startswith('contributors:'):
global contributorsURL
contributorsURL = line[len('contributors:'):].strip()
except:
codereview_disabled = 'codereview disabled: cannot open ' + repo_config_path
return
remote = ui.config("paths", "default", "")
if remote.find("://") < 0:
raise hg_util.Abort("codereview: default path '%s' is not a URL" % (remote,))
InstallMatch(ui, repo)
RietveldSetup(ui, repo)
# Disable the Mercurial commands that might change the repository.
# Only commands in this extension are supposed to do that.
ui.setconfig("hooks", "precommit.codereview", precommithook)
# Rollback removes an existing commit. Don't do that either.
global real_rollback
real_rollback = repo.rollback
repo.rollback = norollback
#######################################################################
# Wrappers around upload.py for interacting with Rietveld
from HTMLParser import HTMLParser
# HTML form parser
class FormParser(HTMLParser):
def __init__(self):
self.map = {}
self.curtag = None
self.curdata = None
HTMLParser.__init__(self)
def handle_starttag(self, tag, attrs):
if tag == "input":
key = None
value = ''
for a in attrs:
if a[0] == 'name':
key = a[1]
if a[0] == 'value':
value = a[1]
if key is not None:
self.map[key] = value
if tag == "textarea":
key = None
for a in attrs:
if a[0] == 'name':
key = a[1]
if key is not None:
self.curtag = key
self.curdata = ''
def handle_endtag(self, tag):
if tag == "textarea" and self.curtag is not None:
self.map[self.curtag] = self.curdata
self.curtag = None
self.curdata = None
def handle_charref(self, name):
self.handle_data(unichr(int(name)))
def handle_entityref(self, name):
import htmlentitydefs
if name in htmlentitydefs.entitydefs:
self.handle_data(htmlentitydefs.entitydefs[name])
else:
self.handle_data("&" + name + ";")
def handle_data(self, data):
if self.curdata is not None:
self.curdata += data
def JSONGet(ui, path):
try:
data = MySend(path, force_auth=False)
typecheck(data, str)
d = fix_json(json.loads(data))
except:
ui.warn("JSONGet %s: %s\n" % (path, ExceptionDetail()))
return None
return d
# Clean up json parser output to match our expectations:
# * all strings are UTF-8-encoded str, not unicode.
# * missing fields are missing, not None,
# so that d.get("foo", defaultvalue) works.
def fix_json(x):
if type(x) in [str, int, float, bool, type(None)]:
pass
elif type(x) is unicode:
x = x.encode("utf-8")
elif type(x) is list:
for i in range(len(x)):
x[i] = fix_json(x[i])
elif type(x) is dict:
todel = []
for k in x:
if x[k] is None:
todel.append(k)
else:
x[k] = fix_json(x[k])
for k in todel:
del x[k]
else:
raise hg_util.Abort("unknown type " + str(type(x)) + " in fix_json")
if type(x) is str:
x = x.replace('\r\n', '\n')
return x
def IsRietveldSubmitted(ui, clname, hex):
dict = JSONGet(ui, "/api/" + clname + "?messages=true")
if dict is None:
return False
for msg in dict.get("messages", []):
text = msg.get("text", "")
m = re.match('\*\*\* Submitted as [^*]*?([0-9a-f]+) \*\*\*', text)
if m is not None and len(m.group(1)) >= 8 and hex.startswith(m.group(1)):
return True
return False
def IsRietveldMailed(cl):
for msg in cl.dict.get("messages", []):
if msg.get("text", "").find("I'd like you to review this change") >= 0:
return True
return False
def DownloadCL(ui, repo, clname):
set_status("downloading CL " + clname)
cl, err = LoadCL(ui, repo, clname, web=True)
if err != "":
return None, None, None, "error loading CL %s: %s" % (clname, err)
# Find most recent diff
diffs = cl.dict.get("patchsets", [])
if not diffs:
return None, None, None, "CL has no patch sets"
patchid = diffs[-1]
patchset = JSONGet(ui, "/api/" + clname + "/" + str(patchid))
if patchset is None:
return None, None, None, "error loading CL patchset %s/%d" % (clname, patchid)
if patchset.get("patchset", 0) != patchid:
return None, None, None, "malformed patchset information"
vers = ""
msg = patchset.get("message", "").split()
if len(msg) >= 3 and msg[0] == "diff" and msg[1] == "-r":
vers = msg[2]
diff = "/download/issue" + clname + "_" + str(patchid) + ".diff"
diffdata = MySend(diff, force_auth=False)
# Print warning if email is not in CONTRIBUTORS file.
email = cl.dict.get("owner_email", "")
if not email:
return None, None, None, "cannot find owner for %s" % (clname)
him = FindContributor(ui, repo, email)
me = FindContributor(ui, repo, None)
if him == me:
cl.mailed = IsRietveldMailed(cl)
else:
cl.copied_from = email
return cl, vers, diffdata, ""
def MySend(request_path, payload=None,
content_type="application/octet-stream",
timeout=None, force_auth=True,
**kwargs):
"""Run MySend1 maybe twice, because Rietveld is unreliable."""
try:
return MySend1(request_path, payload, content_type, timeout, force_auth, **kwargs)
except Exception, e:
if type(e) != urllib2.HTTPError or e.code != 500: # only retry on HTTP 500 error
raise
print >>sys.stderr, "Loading "+request_path+": "+ExceptionDetail()+"; trying again in 2 seconds."
time.sleep(2)
return MySend1(request_path, payload, content_type, timeout, force_auth, **kwargs)
# Like upload.py Send but only authenticates when the
# redirect is to www.google.com/accounts. This keeps
# unnecessary redirects from happening during testing.
def MySend1(request_path, payload=None,
content_type="application/octet-stream",
timeout=None, force_auth=True,
**kwargs):
"""Sends an RPC and returns the response.
Args:
request_path: The path to send the request to, eg /api/appversion/create.
payload: The body of the request, or None to send an empty request.
content_type: The Content-Type header to use.
timeout: timeout in seconds; default None i.e. no timeout.
(Note: for large requests on OS X, the timeout doesn't work right.)
kwargs: Any keyword arguments are converted into query string parameters.
Returns:
The response body, as a string.
"""
# TODO: Don't require authentication. Let the server say
# whether it is necessary.
global rpc
if rpc == None:
rpc = GetRpcServer(upload_options)
self = rpc
if not self.authenticated and force_auth:
self._Authenticate()
if request_path is None:
return
if timeout is None:
timeout = 30 # seconds
old_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(timeout)
try:
tries = 0
while True:
tries += 1
args = dict(kwargs)
url = "https://%s%s" % (self.host, request_path)
if args:
url += "?" + urllib.urlencode(args)
req = self._CreateRequest(url=url, data=payload)
req.add_header("Content-Type", content_type)
try:
f = self.opener.open(req)
response = f.read()
f.close()
# Translate \r\n into \n, because Rietveld doesn't.
response = response.replace('\r\n', '\n')
# who knows what urllib will give us
if type(response) == unicode:
response = response.encode("utf-8")
typecheck(response, str)
return response
except urllib2.HTTPError, e:
if tries > 3:
raise
elif e.code == 401:
self._Authenticate()
elif e.code == 302:
loc = e.info()["location"]
if not loc.startswith('https://www.google.com/a') or loc.find('/ServiceLogin') < 0:
return ''
self._Authenticate()
else:
raise
finally:
socket.setdefaulttimeout(old_timeout)
def GetForm(url):
f = FormParser()
f.feed(ustr(MySend(url))) # f.feed wants unicode
f.close()
# convert back to utf-8 to restore sanity
m = {}
for k,v in f.map.items():
m[k.encode("utf-8")] = v.replace("\r\n", "\n").encode("utf-8")
return m
def EditDesc(issue, subject=None, desc=None, reviewers=None, cc=None, closed=False, private=False):
set_status("uploading change to description")
form_fields = GetForm("/" + issue + "/edit")
if subject is not None:
form_fields['subject'] = subject
if desc is not None:
form_fields['description'] = desc
if reviewers is not None:
form_fields['reviewers'] = reviewers
if cc is not None:
form_fields['cc'] = cc
if closed:
form_fields['closed'] = "checked"
if private:
form_fields['private'] = "checked"
ctype, body = EncodeMultipartFormData(form_fields.items(), [])
response = MySend("/" + issue + "/edit", body, content_type=ctype)
if response != "":
print >>sys.stderr, "Error editing description:\n" + "Sent form: \n", form_fields, "\n", response
sys.exit(2)
def PostMessage(ui, issue, message, reviewers=None, cc=None, send_mail=True, subject=None):
set_status("uploading message")
form_fields = GetForm("/" + issue + "/publish")
if reviewers is not None:
form_fields['reviewers'] = reviewers
if cc is not None:
form_fields['cc'] = cc
if send_mail:
form_fields['send_mail'] = "checked"
else:
del form_fields['send_mail']
if subject is not None:
form_fields['subject'] = subject
form_fields['message'] = message
form_fields['message_only'] = '1' # Don't include draft comments
if reviewers is not None or cc is not None:
form_fields['message_only'] = '' # Must set '' in order to override cc/reviewer
ctype = "applications/x-www-form-urlencoded"
body = urllib.urlencode(form_fields)
response = MySend("/" + issue + "/publish", body, content_type=ctype)
if response != "":
print response
sys.exit(2)
class opt(object):
pass
def RietveldSetup(ui, repo):
global force_google_account
global rpc
global server
global server_url_base
global upload_options
global verbosity
if not ui.verbose:
verbosity = 0
# Config options.
x = ui.config("codereview", "server")
if x is not None:
server = x
# TODO(rsc): Take from ui.username?
email = None
x = ui.config("codereview", "email")
if x is not None:
email = x
server_url_base = "https://" + server + "/"
testing = ui.config("codereview", "testing")
force_google_account = ui.configbool("codereview", "force_google_account", False)
upload_options = opt()
upload_options.email = email
upload_options.host = None
upload_options.verbose = 0
upload_options.description = None
upload_options.description_file = None
upload_options.reviewers = None
upload_options.cc = None
upload_options.message = None
upload_options.issue = None
upload_options.download_base = False
upload_options.revision = None
upload_options.send_mail = False
upload_options.vcs = None
upload_options.server = server
upload_options.save_cookies = True
if testing:
upload_options.save_cookies = False
upload_options.email = "test@example.com"
rpc = None
global releaseBranch
tags = repo.branchtags().keys()
if 'release-branch.go10' in tags:
# NOTE(rsc): This tags.sort is going to get the wrong
# answer when comparing release-branch.go9 with
# release-branch.go10. It will be a while before we care.
raise hg_util.Abort('tags.sort needs to be fixed for release-branch.go10')
tags.sort()
for t in tags:
if t.startswith('release-branch.go'):
releaseBranch = t
#######################################################################
# http://codereview.appspot.com/static/upload.py, heavily edited.
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tool for uploading diffs from a version control system to the codereview app.
Usage summary: upload.py [options] [-- diff_options]
Diff options are passed to the diff command of the underlying system.
Supported version control systems:
Git
Mercurial
Subversion
It is important for Git/Mercurial users to specify a tree/node/branch to diff
against by using the '--rev' option.
"""
# This code is derived from appcfg.py in the App Engine SDK (open source),
# and from ASPN recipe #146306.
import cookielib
import getpass
import logging
import mimetypes
import optparse
import os
import re
import socket
import subprocess
import sys
import urllib
import urllib2
import urlparse
# The md5 module was deprecated in Python 2.5.
try:
from hashlib import md5
except ImportError:
from md5 import md5
try:
import readline
except ImportError:
pass
# The logging verbosity:
# 0: Errors only.
# 1: Status messages.
# 2: Info logs.
# 3: Debug logs.
verbosity = 1
# Max size of patch or base file.
MAX_UPLOAD_SIZE = 900 * 1024
# whitelist for non-binary filetypes which do not start with "text/"
# .mm (Objective-C) shows up as application/x-freemind on my Linux box.
TEXT_MIMETYPES = [
'application/javascript',
'application/x-javascript',
'application/x-freemind'
]
def GetEmail(prompt):
"""Prompts the user for their email address and returns it.
The last used email address is saved to a file and offered up as a suggestion
to the user. If the user presses enter without typing in anything the last
used email address is used. If the user enters a new address, it is saved
for next time we prompt.
"""
last_email_file_name = os.path.expanduser("~/.last_codereview_email_address")
last_email = ""
if os.path.exists(last_email_file_name):
try:
last_email_file = open(last_email_file_name, "r")
last_email = last_email_file.readline().strip("\n")
last_email_file.close()
prompt += " [%s]" % last_email
except IOError, e:
pass
email = raw_input(prompt + ": ").strip()
if email:
try:
last_email_file = open(last_email_file_name, "w")
last_email_file.write(email)
last_email_file.close()
except IOError, e:
pass
else:
email = last_email
return email
def StatusUpdate(msg):
"""Print a status message to stdout.
If 'verbosity' is greater than 0, print the message.
Args:
msg: The string to print.
"""
if verbosity > 0:
print msg
def ErrorExit(msg):
"""Print an error message to stderr and exit."""
print >>sys.stderr, msg
sys.exit(1)
class ClientLoginError(urllib2.HTTPError):
"""Raised to indicate there was an error authenticating with ClientLogin."""
def __init__(self, url, code, msg, headers, args):
urllib2.HTTPError.__init__(self, url, code, msg, headers, None)
self.args = args
# .reason is now a read-only property based on .msg
# this means we ignore 'msg', but that seems to work fine.
self.msg = args["Error"]
class AbstractRpcServer(object):
"""Provides a common interface for a simple RPC server."""
def __init__(self, host, auth_function, host_override=None, extra_headers={}, save_cookies=False):
"""Creates a new HttpRpcServer.
Args:
host: The host to send requests to.
auth_function: A function that takes no arguments and returns an
(email, password) tuple when called. Will be called if authentication
is required.
host_override: The host header to send to the server (defaults to host).
extra_headers: A dict of extra headers to append to every request.
save_cookies: If True, save the authentication cookies to local disk.
If False, use an in-memory cookiejar instead. Subclasses must
implement this functionality. Defaults to False.
"""
self.host = host
self.host_override = host_override
self.auth_function = auth_function
self.authenticated = False
self.extra_headers = extra_headers
self.save_cookies = save_cookies
self.opener = self._GetOpener()
if self.host_override:
logging.info("Server: %s; Host: %s", self.host, self.host_override)
else:
logging.info("Server: %s", self.host)
def _GetOpener(self):
"""Returns an OpenerDirector for making HTTP requests.
Returns:
A urllib2.OpenerDirector object.
"""
raise NotImplementedError()
def _CreateRequest(self, url, data=None):
"""Creates a new urllib request."""
logging.debug("Creating request for: '%s' with payload:\n%s", url, data)
req = urllib2.Request(url, data=data)
if self.host_override:
req.add_header("Host", self.host_override)
for key, value in self.extra_headers.iteritems():
req.add_header(key, value)
return req
def _GetAuthToken(self, email, password):
"""Uses ClientLogin to authenticate the user, returning an auth token.
Args:
email: The user's email address
password: The user's password
Raises:
ClientLoginError: If there was an error authenticating with ClientLogin.
HTTPError: If there was some other form of HTTP error.
Returns:
The authentication token returned by ClientLogin.
"""
account_type = "GOOGLE"
if self.host.endswith(".google.com") and not force_google_account:
# Needed for use inside Google.
account_type = "HOSTED"
req = self._CreateRequest(
url="https://www.google.com/accounts/ClientLogin",
data=urllib.urlencode({
"Email": email,
"Passwd": password,
"service": "ah",
"source": "rietveld-codereview-upload",
"accountType": account_type,
}),
)
try:
response = self.opener.open(req)
response_body = response.read()
response_dict = dict(x.split("=") for x in response_body.split("\n") if x)
return response_dict["Auth"]
except urllib2.HTTPError, e:
if e.code == 403:
body = e.read()
response_dict = dict(x.split("=", 1) for x in body.split("\n") if x)
raise ClientLoginError(req.get_full_url(), e.code, e.msg, e.headers, response_dict)
else:
raise
def _GetAuthCookie(self, auth_token):
"""Fetches authentication cookies for an authentication token.
Args:
auth_token: The authentication token returned by ClientLogin.
Raises:
HTTPError: If there was an error fetching the authentication cookies.
"""
# This is a dummy value to allow us to identify when we're successful.
continue_location = "http://localhost/"
args = {"continue": continue_location, "auth": auth_token}
req = self._CreateRequest("https://%s/_ah/login?%s" % (self.host, urllib.urlencode(args)))
try:
response = self.opener.open(req)
except urllib2.HTTPError, e:
response = e
if (response.code != 302 or
response.info()["location"] != continue_location):
raise urllib2.HTTPError(req.get_full_url(), response.code, response.msg, response.headers, response.fp)
self.authenticated = True
def _Authenticate(self):
"""Authenticates the user.
The authentication process works as follows:
1) We get a username and password from the user
2) We use ClientLogin to obtain an AUTH token for the user
(see http://code.google.com/apis/accounts/AuthForInstalledApps.html).
3) We pass the auth token to /_ah/login on the server to obtain an
authentication cookie. If login was successful, it tries to redirect
us to the URL we provided.
If we attempt to access the upload API without first obtaining an
authentication cookie, it returns a 401 response (or a 302) and
directs us to authenticate ourselves with ClientLogin.
"""
for i in range(3):
credentials = self.auth_function()
try:
auth_token = self._GetAuthToken(credentials[0], credentials[1])
except ClientLoginError, e:
if e.msg == "BadAuthentication":
print >>sys.stderr, "Invalid username or password."
continue
if e.msg == "CaptchaRequired":
print >>sys.stderr, (
"Please go to\n"
"https://www.google.com/accounts/DisplayUnlockCaptcha\n"
"and verify you are a human. Then try again.")
break
if e.msg == "NotVerified":
print >>sys.stderr, "Account not verified."
break
if e.msg == "TermsNotAgreed":
print >>sys.stderr, "User has not agreed to TOS."
break
if e.msg == "AccountDeleted":
print >>sys.stderr, "The user account has been deleted."
break
if e.msg == "AccountDisabled":
print >>sys.stderr, "The user account has been disabled."
break
if e.msg == "ServiceDisabled":
print >>sys.stderr, "The user's access to the service has been disabled."
break
if e.msg == "ServiceUnavailable":
print >>sys.stderr, "The service is not available; try again later."
break
raise
self._GetAuthCookie(auth_token)
return
def Send(self, request_path, payload=None,
content_type="application/octet-stream",
timeout=None,
**kwargs):
"""Sends an RPC and returns the response.
Args:
request_path: The path to send the request to, eg /api/appversion/create.
payload: The body of the request, or None to send an empty request.
content_type: The Content-Type header to use.
timeout: timeout in seconds; default None i.e. no timeout.
(Note: for large requests on OS X, the timeout doesn't work right.)
kwargs: Any keyword arguments are converted into query string parameters.
Returns:
The response body, as a string.
"""
# TODO: Don't require authentication. Let the server say
# whether it is necessary.
if not self.authenticated:
self._Authenticate()
old_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(timeout)
try:
tries = 0
while True:
tries += 1
args = dict(kwargs)
url = "https://%s%s" % (self.host, request_path)
if args:
url += "?" + urllib.urlencode(args)
req = self._CreateRequest(url=url, data=payload)
req.add_header("Content-Type", content_type)
try:
f = self.opener.open(req)
response = f.read()
f.close()
return response
except urllib2.HTTPError, e:
if tries > 3:
raise
elif e.code == 401 or e.code == 302:
self._Authenticate()
else:
raise
finally:
socket.setdefaulttimeout(old_timeout)
class HttpRpcServer(AbstractRpcServer):
"""Provides a simplified RPC-style interface for HTTP requests."""
def _Authenticate(self):
"""Save the cookie jar after authentication."""
super(HttpRpcServer, self)._Authenticate()
if self.save_cookies:
StatusUpdate("Saving authentication cookies to %s" % self.cookie_file)
self.cookie_jar.save()
def _GetOpener(self):
"""Returns an OpenerDirector that supports cookies and ignores redirects.
Returns:
A urllib2.OpenerDirector object.
"""
opener = urllib2.OpenerDirector()
opener.add_handler(urllib2.ProxyHandler())
opener.add_handler(urllib2.UnknownHandler())
opener.add_handler(urllib2.HTTPHandler())
opener.add_handler(urllib2.HTTPDefaultErrorHandler())
opener.add_handler(urllib2.HTTPSHandler())
opener.add_handler(urllib2.HTTPErrorProcessor())
if self.save_cookies:
self.cookie_file = os.path.expanduser("~/.codereview_upload_cookies_" + server)
self.cookie_jar = cookielib.MozillaCookieJar(self.cookie_file)
if os.path.exists(self.cookie_file):
try:
self.cookie_jar.load()
self.authenticated = True
StatusUpdate("Loaded authentication cookies from %s" % self.cookie_file)
except (cookielib.LoadError, IOError):
# Failed to load cookies - just ignore them.
pass
else:
# Create an empty cookie file with mode 600
fd = os.open(self.cookie_file, os.O_CREAT, 0600)
os.close(fd)
# Always chmod the cookie file
os.chmod(self.cookie_file, 0600)
else:
# Don't save cookies across runs of update.py.
self.cookie_jar = cookielib.CookieJar()
opener.add_handler(urllib2.HTTPCookieProcessor(self.cookie_jar))
return opener
def GetRpcServer(options):
"""Returns an instance of an AbstractRpcServer.
Returns:
A new AbstractRpcServer, on which RPC calls can be made.
"""
rpc_server_class = HttpRpcServer
def GetUserCredentials():
"""Prompts the user for a username and password."""
# Disable status prints so they don't obscure the password prompt.
global global_status
st = global_status
global_status = None
email = options.email
if email is None:
email = GetEmail("Email (login for uploading to %s)" % options.server)
password = getpass.getpass("Password for %s: " % email)
# Put status back.
global_status = st
return (email, password)
# If this is the dev_appserver, use fake authentication.
host = (options.host or options.server).lower()
if host == "localhost" or host.startswith("localhost:"):
email = options.email
if email is None:
email = "test@example.com"
logging.info("Using debug user %s. Override with --email" % email)
server = rpc_server_class(
options.server,
lambda: (email, "password"),
host_override=options.host,
extra_headers={"Cookie": 'dev_appserver_login="%s:False"' % email},
save_cookies=options.save_cookies)
# Don't try to talk to ClientLogin.
server.authenticated = True
return server
return rpc_server_class(options.server, GetUserCredentials,
host_override=options.host, save_cookies=options.save_cookies)
def EncodeMultipartFormData(fields, files):
"""Encode form fields for multipart/form-data.
Args:
fields: A sequence of (name, value) elements for regular form fields.
files: A sequence of (name, filename, value) elements for data to be
uploaded as files.
Returns:
(content_type, body) ready for httplib.HTTP instance.
Source:
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/146306
"""
BOUNDARY = '-M-A-G-I-C---B-O-U-N-D-A-R-Y-'
CRLF = '\r\n'
lines = []
for (key, value) in fields:
typecheck(key, str)
typecheck(value, str)
lines.append('--' + BOUNDARY)
lines.append('Content-Disposition: form-data; name="%s"' % key)
lines.append('')
lines.append(value)
for (key, filename, value) in files:
typecheck(key, str)
typecheck(filename, str)
typecheck(value, str)
lines.append('--' + BOUNDARY)
lines.append('Content-Disposition: form-data; name="%s"; filename="%s"' % (key, filename))
lines.append('Content-Type: %s' % GetContentType(filename))
lines.append('')
lines.append(value)
lines.append('--' + BOUNDARY + '--')
lines.append('')
body = CRLF.join(lines)
content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
return content_type, body
def GetContentType(filename):
"""Helper to guess the content-type from the filename."""
return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
# Use a shell for subcommands on Windows to get a PATH search.
use_shell = sys.platform.startswith("win")
def RunShellWithReturnCode(command, print_output=False,
universal_newlines=True, env=os.environ):
"""Executes a command and returns the output from stdout and the return code.
Args:
command: Command to execute.
print_output: If True, the output is printed to stdout.
If False, both stdout and stderr are ignored.
universal_newlines: Use universal_newlines flag (default: True).
Returns:
Tuple (output, return code)
"""
logging.info("Running %s", command)
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=use_shell, universal_newlines=universal_newlines, env=env)
if print_output:
output_array = []
while True:
line = p.stdout.readline()
if not line:
break
print line.strip("\n")
output_array.append(line)
output = "".join(output_array)
else:
output = p.stdout.read()
p.wait()
errout = p.stderr.read()
if print_output and errout:
print >>sys.stderr, errout
p.stdout.close()
p.stderr.close()
return output, p.returncode
def RunShell(command, silent_ok=False, universal_newlines=True,
print_output=False, env=os.environ):
data, retcode = RunShellWithReturnCode(command, print_output, universal_newlines, env)
if retcode:
ErrorExit("Got error status from %s:\n%s" % (command, data))
if not silent_ok and not data:
ErrorExit("No output from %s" % command)
return data
class VersionControlSystem(object):
"""Abstract base class providing an interface to the VCS."""
def __init__(self, options):
"""Constructor.
Args:
options: Command line options.
"""
self.options = options
def GenerateDiff(self, args):
"""Return the current diff as a string.
Args:
args: Extra arguments to pass to the diff command.
"""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def GetUnknownFiles(self):
"""Return a list of files unknown to the VCS."""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def CheckForUnknownFiles(self):
"""Show an "are you sure?" prompt if there are unknown files."""
unknown_files = self.GetUnknownFiles()
if unknown_files:
print "The following files are not added to version control:"
for line in unknown_files:
print line
prompt = "Are you sure to continue?(y/N) "
answer = raw_input(prompt).strip()
if answer != "y":
ErrorExit("User aborted")
def GetBaseFile(self, filename):
"""Get the content of the upstream version of a file.
Returns:
A tuple (base_content, new_content, is_binary, status)
base_content: The contents of the base file.
new_content: For text files, this is empty. For binary files, this is
the contents of the new file, since the diff output won't contain
information to reconstruct the current file.
is_binary: True iff the file is binary.
status: The status of the file.
"""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def GetBaseFiles(self, diff):
"""Helper that calls GetBase file for each file in the patch.
Returns:
A dictionary that maps from filename to GetBaseFile's tuple. Filenames
are retrieved based on lines that start with "Index:" or
"Property changes on:".
"""
files = {}
for line in diff.splitlines(True):
if line.startswith('Index:') or line.startswith('Property changes on:'):
unused, filename = line.split(':', 1)
# On Windows if a file has property changes its filename uses '\'
# instead of '/'.
filename = to_slash(filename.strip())
files[filename] = self.GetBaseFile(filename)
return files
def UploadBaseFiles(self, issue, rpc_server, patch_list, patchset, options,
files):
"""Uploads the base files (and if necessary, the current ones as well)."""
def UploadFile(filename, file_id, content, is_binary, status, is_base):
"""Uploads a file to the server."""
set_status("uploading " + filename)
file_too_large = False
if is_base:
type = "base"
else:
type = "current"
if len(content) > MAX_UPLOAD_SIZE:
print ("Not uploading the %s file for %s because it's too large." %
(type, filename))
file_too_large = True
content = ""
checksum = md5(content).hexdigest()
if options.verbose > 0 and not file_too_large:
print "Uploading %s file for %s" % (type, filename)
url = "/%d/upload_content/%d/%d" % (int(issue), int(patchset), file_id)
form_fields = [
("filename", filename),
("status", status),
("checksum", checksum),
("is_binary", str(is_binary)),
("is_current", str(not is_base)),
]
if file_too_large:
form_fields.append(("file_too_large", "1"))
if options.email:
form_fields.append(("user", options.email))
ctype, body = EncodeMultipartFormData(form_fields, [("data", filename, content)])
response_body = rpc_server.Send(url, body, content_type=ctype)
if not response_body.startswith("OK"):
StatusUpdate(" --> %s" % response_body)
sys.exit(1)
# Don't want to spawn too many threads, nor do we want to
# hit Rietveld too hard, or it will start serving 500 errors.
# When 8 works, it's no better than 4, and sometimes 8 is
# too many for Rietveld to handle.
MAX_PARALLEL_UPLOADS = 4
sema = threading.BoundedSemaphore(MAX_PARALLEL_UPLOADS)
upload_threads = []
finished_upload_threads = []
class UploadFileThread(threading.Thread):
def __init__(self, args):
threading.Thread.__init__(self)
self.args = args
def run(self):
UploadFile(*self.args)
finished_upload_threads.append(self)
sema.release()
def StartUploadFile(*args):
sema.acquire()
while len(finished_upload_threads) > 0:
t = finished_upload_threads.pop()
upload_threads.remove(t)
t.join()
t = UploadFileThread(args)
upload_threads.append(t)
t.start()
def WaitForUploads():
for t in upload_threads:
t.join()
patches = dict()
[patches.setdefault(v, k) for k, v in patch_list]
for filename in patches.keys():
base_content, new_content, is_binary, status = files[filename]
file_id_str = patches.get(filename)
if file_id_str.find("nobase") != -1:
base_content = None
file_id_str = file_id_str[file_id_str.rfind("_") + 1:]
file_id = int(file_id_str)
if base_content != None:
StartUploadFile(filename, file_id, base_content, is_binary, status, True)
if new_content != None:
StartUploadFile(filename, file_id, new_content, is_binary, status, False)
WaitForUploads()
def IsImage(self, filename):
"""Returns true if the filename has an image extension."""
mimetype = mimetypes.guess_type(filename)[0]
if not mimetype:
return False
return mimetype.startswith("image/")
def IsBinary(self, filename):
"""Returns true if the guessed mimetyped isnt't in text group."""
mimetype = mimetypes.guess_type(filename)[0]
if not mimetype:
return False # e.g. README, "real" binaries usually have an extension
# special case for text files which don't start with text/
if mimetype in TEXT_MIMETYPES:
return False
return not mimetype.startswith("text/")
class FakeMercurialUI(object):
def __init__(self):
self.quiet = True
self.output = ''
def write(self, *args, **opts):
self.output += ' '.join(args)
def copy(self):
return self
def status(self, *args, **opts):
pass
def formatter(self, topic, opts):
from mercurial.formatter import plainformatter
return plainformatter(self, topic, opts)
def readconfig(self, *args, **opts):
pass
def expandpath(self, *args, **opts):
return global_ui.expandpath(*args, **opts)
def configitems(self, *args, **opts):
return global_ui.configitems(*args, **opts)
def config(self, *args, **opts):
return global_ui.config(*args, **opts)
use_hg_shell = False # set to True to shell out to hg always; slower
class MercurialVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Mercurial."""
def __init__(self, options, ui, repo):
super(MercurialVCS, self).__init__(options)
self.ui = ui
self.repo = repo
self.status = None
# Absolute path to repository (we can be in a subdir)
self.repo_dir = os.path.normpath(repo.root)
# Compute the subdir
cwd = os.path.normpath(os.getcwd())
assert cwd.startswith(self.repo_dir)
self.subdir = cwd[len(self.repo_dir):].lstrip(r"\/")
if self.options.revision:
self.base_rev = self.options.revision
else:
mqparent, err = RunShellWithReturnCode(['hg', 'log', '--rev', 'qparent', '--template={node}'])
if not err and mqparent != "":
self.base_rev = mqparent
else:
out = RunShell(["hg", "parents", "-q"], silent_ok=True).strip()
if not out:
# No revisions; use 0 to mean a repository with nothing.
out = "0:0"
self.base_rev = out.split(':')[1].strip()
def _GetRelPath(self, filename):
"""Get relative path of a file according to the current directory,
given its logical path in the repo."""
assert filename.startswith(self.subdir), (filename, self.subdir)
return filename[len(self.subdir):].lstrip(r"\/")
def GenerateDiff(self, extra_args):
# If no file specified, restrict to the current subdir
extra_args = extra_args or ["."]
cmd = ["hg", "diff", "--git", "-r", self.base_rev] + extra_args
data = RunShell(cmd, silent_ok=True)
svndiff = []
filecount = 0
for line in data.splitlines():
m = re.match("diff --git a/(\S+) b/(\S+)", line)
if m:
# Modify line to make it look like as it comes from svn diff.
# With this modification no changes on the server side are required
# to make upload.py work with Mercurial repos.
# NOTE: for proper handling of moved/copied files, we have to use
# the second filename.
filename = m.group(2)
svndiff.append("Index: %s" % filename)
svndiff.append("=" * 67)
filecount += 1
logging.info(line)
else:
svndiff.append(line)
if not filecount:
ErrorExit("No valid patches found in output from hg diff")
return "\n".join(svndiff) + "\n"
def GetUnknownFiles(self):
"""Return a list of files unknown to the VCS."""
args = []
status = RunShell(["hg", "status", "--rev", self.base_rev, "-u", "."],
silent_ok=True)
unknown_files = []
for line in status.splitlines():
st, fn = line.split(" ", 1)
if st == "?":
unknown_files.append(fn)
return unknown_files
def get_hg_status(self, rev, path):
# We'd like to use 'hg status -C path', but that is buggy
# (see http://mercurial.selenic.com/bts/issue3023).
# Instead, run 'hg status -C' without a path
# and skim the output for the path we want.
if self.status is None:
if use_hg_shell:
out = RunShell(["hg", "status", "-C", "--rev", rev])
else:
fui = FakeMercurialUI()
ret = hg_commands.status(fui, self.repo, *[], **{'rev': [rev], 'copies': True})
if ret:
raise hg_util.Abort(ret)
out = fui.output
self.status = out.splitlines()
for i in range(len(self.status)):
# line is
# A path
# M path
# etc
line = to_slash(self.status[i])
if line[2:] == path:
if i+1 < len(self.status) and self.status[i+1][:2] == ' ':
return self.status[i:i+2]
return self.status[i:i+1]
raise hg_util.Abort("no status for " + path)
def GetBaseFile(self, filename):
set_status("inspecting " + filename)
# "hg status" and "hg cat" both take a path relative to the current subdir
# rather than to the repo root, but "hg diff" has given us the full path
# to the repo root.
base_content = ""
new_content = None
is_binary = False
oldrelpath = relpath = self._GetRelPath(filename)
out = self.get_hg_status(self.base_rev, relpath)
status, what = out[0].split(' ', 1)
if len(out) > 1 and status == "A" and what == relpath:
oldrelpath = out[1].strip()
status = "M"
if ":" in self.base_rev:
base_rev = self.base_rev.split(":", 1)[0]
else:
base_rev = self.base_rev
if status != "A":
if use_hg_shell:
base_content = RunShell(["hg", "cat", "-r", base_rev, oldrelpath], silent_ok=True)
else:
base_content = str(self.repo[base_rev][oldrelpath].data())
is_binary = "\0" in base_content # Mercurial's heuristic
if status != "R":
new_content = open(relpath, "rb").read()
is_binary = is_binary or "\0" in new_content
if is_binary and base_content and use_hg_shell:
# Fetch again without converting newlines
base_content = RunShell(["hg", "cat", "-r", base_rev, oldrelpath],
silent_ok=True, universal_newlines=False)
if not is_binary or not self.IsImage(relpath):
new_content = None
return base_content, new_content, is_binary, status
# NOTE: The SplitPatch function is duplicated in engine.py, keep them in sync.
def SplitPatch(data):
"""Splits a patch into separate pieces for each file.
Args:
data: A string containing the output of svn diff.
Returns:
A list of 2-tuple (filename, text) where text is the svn diff output
pertaining to filename.
"""
patches = []
filename = None
diff = []
for line in data.splitlines(True):
new_filename = None
if line.startswith('Index:'):
unused, new_filename = line.split(':', 1)
new_filename = new_filename.strip()
elif line.startswith('Property changes on:'):
unused, temp_filename = line.split(':', 1)
# When a file is modified, paths use '/' between directories, however
# when a property is modified '\' is used on Windows. Make them the same
# otherwise the file shows up twice.
temp_filename = to_slash(temp_filename.strip())
if temp_filename != filename:
# File has property changes but no modifications, create a new diff.
new_filename = temp_filename
if new_filename:
if filename and diff:
patches.append((filename, ''.join(diff)))
filename = new_filename
diff = [line]
continue
if diff is not None:
diff.append(line)
if filename and diff:
patches.append((filename, ''.join(diff)))
return patches
def UploadSeparatePatches(issue, rpc_server, patchset, data, options):
"""Uploads a separate patch for each file in the diff output.
Returns a list of [patch_key, filename] for each file.
"""
patches = SplitPatch(data)
rv = []
for patch in patches:
set_status("uploading patch for " + patch[0])
if len(patch[1]) > MAX_UPLOAD_SIZE:
print ("Not uploading the patch for " + patch[0] +
" because the file is too large.")
continue
form_fields = [("filename", patch[0])]
if not options.download_base:
form_fields.append(("content_upload", "1"))
files = [("data", "data.diff", patch[1])]
ctype, body = EncodeMultipartFormData(form_fields, files)
url = "/%d/upload_patch/%d" % (int(issue), int(patchset))
print "Uploading patch for " + patch[0]
response_body = rpc_server.Send(url, body, content_type=ctype)
lines = response_body.splitlines()
if not lines or lines[0] != "OK":
StatusUpdate(" --> %s" % response_body)
sys.exit(1)
rv.append([lines[1], patch[0]])
return rv
| bsd-3-clause |
lizardsystem/lizard5-apps | lizard_wms/migrations/0017_auto__add_field_wmssource_metadata.py | 2 | 4883 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'WMSSource.metadata'
db.add_column('lizard_wms_wmssource', 'metadata', self.gf('jsonfield.fields.JSONField')(null=True, blank=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'WMSSource.metadata'
db.delete_column('lizard_wms_wmssource', 'metadata')
models = {
'lizard_maptree.category': {
'Meta': {'ordering': "('name',)", 'object_name': 'Category'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_maptree.Category']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '20', 'db_index': 'True'})
},
'lizard_wms.featureline': {
'Meta': {'object_name': 'FeatureLine'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_hover': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'order_using': ('django.db.models.fields.IntegerField', [], {'default': '1000'}),
'render_as': ('django.db.models.fields.CharField', [], {'default': "u'T'", 'max_length': '1'}),
'use_as_id': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'visible': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'wms_layer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_wms.WMSSource']"})
},
'lizard_wms.wmsconnection': {
'Meta': {'object_name': 'WMSConnection'},
'category': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['lizard_maptree.Category']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'options': ('django.db.models.fields.TextField', [], {'default': 'u\'{"buffer": 0, "isBaseLayer": false, "opacity": 0.5}\''}),
'params': ('django.db.models.fields.TextField', [], {'default': 'u\'{"height": "256", "width": "256", "layers": "%s", "styles": "", "format": "image/png", "tiled": "true", "transparent": "true"}\''}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'version': ('django.db.models.fields.CharField', [], {'default': "u'1.3.0'", 'max_length': '20'}),
'xml': ('django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'})
},
'lizard_wms.wmssource': {
'Meta': {'ordering': "(u'name',)", 'object_name': 'WMSSource'},
'bbox': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'category': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['lizard_maptree.Category']", 'null': 'True', 'blank': 'True'}),
'connection': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_wms.WMSConnection']", 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'legend_url': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'null': 'True', 'blank': 'True'}),
'metadata': ('jsonfield.fields.JSONField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'old_metadata': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'options': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'params': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'show_legend': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
}
}
complete_apps = ['lizard_wms']
| lgpl-3.0 |
mhdella/scikit-learn | sklearn/tests/test_kernel_ridge.py | 342 | 3027 | import numpy as np
import scipy.sparse as sp
from sklearn.datasets import make_regression
from sklearn.linear_model import Ridge
from sklearn.kernel_ridge import KernelRidge
from sklearn.metrics.pairwise import pairwise_kernels
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_array_almost_equal
X, y = make_regression(n_features=10)
Xcsr = sp.csr_matrix(X)
Xcsc = sp.csc_matrix(X)
Y = np.array([y, y]).T
def test_kernel_ridge():
pred = Ridge(alpha=1, fit_intercept=False).fit(X, y).predict(X)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(X, y).predict(X)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_csr():
pred = Ridge(alpha=1, fit_intercept=False,
solver="cholesky").fit(Xcsr, y).predict(Xcsr)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(Xcsr, y).predict(Xcsr)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_csc():
pred = Ridge(alpha=1, fit_intercept=False,
solver="cholesky").fit(Xcsc, y).predict(Xcsc)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(Xcsc, y).predict(Xcsc)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_singular_kernel():
# alpha=0 causes a LinAlgError in computing the dual coefficients,
# which causes a fallback to a lstsq solver. This is tested here.
pred = Ridge(alpha=0, fit_intercept=False).fit(X, y).predict(X)
kr = KernelRidge(kernel="linear", alpha=0)
ignore_warnings(kr.fit)(X, y)
pred2 = kr.predict(X)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_precomputed():
for kernel in ["linear", "rbf", "poly", "cosine"]:
K = pairwise_kernels(X, X, metric=kernel)
pred = KernelRidge(kernel=kernel).fit(X, y).predict(X)
pred2 = KernelRidge(kernel="precomputed").fit(K, y).predict(K)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_precomputed_kernel_unchanged():
K = np.dot(X, X.T)
K2 = K.copy()
KernelRidge(kernel="precomputed").fit(K, y)
assert_array_almost_equal(K, K2)
def test_kernel_ridge_sample_weights():
K = np.dot(X, X.T) # precomputed kernel
sw = np.random.RandomState(0).rand(X.shape[0])
pred = Ridge(alpha=1,
fit_intercept=False).fit(X, y, sample_weight=sw).predict(X)
pred2 = KernelRidge(kernel="linear",
alpha=1).fit(X, y, sample_weight=sw).predict(X)
pred3 = KernelRidge(kernel="precomputed",
alpha=1).fit(K, y, sample_weight=sw).predict(K)
assert_array_almost_equal(pred, pred2)
assert_array_almost_equal(pred, pred3)
def test_kernel_ridge_multi_output():
pred = Ridge(alpha=1, fit_intercept=False).fit(X, Y).predict(X)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(X, Y).predict(X)
assert_array_almost_equal(pred, pred2)
pred3 = KernelRidge(kernel="linear", alpha=1).fit(X, y).predict(X)
pred3 = np.array([pred3, pred3]).T
assert_array_almost_equal(pred2, pred3)
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.