repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
Titulacion-Sistemas/PythonTitulacion-EV | Lib/site-packages/setuptools/command/upload.py | 12 | 6711 | """distutils.command.upload
Implements the Distutils 'upload' subcommand (upload package to PyPI)."""
from distutils import errors
from distutils import log
from distutils.core import Command
from distutils.spawn import spawn
try:
from hashlib import md5
except ImportError:
from md5 import md5
import os
import sys
import socket
import platform
import base64
from setuptools.compat import urlparse, StringIO, httplib, ConfigParser
class upload(Command):
description = "upload binary package to PyPI"
DEFAULT_REPOSITORY = 'https://pypi.python.org/pypi'
user_options = [
('repository=', 'r',
"url of repository [default: %s]" % DEFAULT_REPOSITORY),
('show-response', None,
'display full response text from server'),
('sign', 's',
'sign files to upload using gpg'),
('identity=', 'i', 'GPG identity used to sign files'),
]
boolean_options = ['show-response', 'sign']
def initialize_options(self):
self.username = ''
self.password = ''
self.repository = ''
self.show_response = 0
self.sign = False
self.identity = None
def finalize_options(self):
if self.identity and not self.sign:
raise errors.DistutilsOptionError(
"Must use --sign for --identity to have meaning"
)
if 'HOME' in os.environ:
rc = os.path.join(os.environ['HOME'], '.pypirc')
if os.path.exists(rc):
self.announce('Using PyPI login from %s' % rc)
config = ConfigParser.ConfigParser({
'username':'',
'password':'',
'repository':''})
config.read(rc)
if not self.repository:
self.repository = config.get('server-login', 'repository')
if not self.username:
self.username = config.get('server-login', 'username')
if not self.password:
self.password = config.get('server-login', 'password')
if not self.repository:
self.repository = self.DEFAULT_REPOSITORY
def run(self):
if not self.distribution.dist_files:
raise errors.DistutilsOptionError("No dist file created in earlier command")
for command, pyversion, filename in self.distribution.dist_files:
self.upload_file(command, pyversion, filename)
def upload_file(self, command, pyversion, filename):
# Sign if requested
if self.sign:
gpg_args = ["gpg", "--detach-sign", "-a", filename]
if self.identity:
gpg_args[2:2] = ["--local-user", self.identity]
spawn(gpg_args,
dry_run=self.dry_run)
# Fill in the data
f = open(filename,'rb')
content = f.read()
f.close()
basename = os.path.basename(filename)
comment = ''
if command=='bdist_egg' and self.distribution.has_ext_modules():
comment = "built on %s" % platform.platform(terse=1)
data = {
':action':'file_upload',
'protocol_version':'1',
'name':self.distribution.get_name(),
'version':self.distribution.get_version(),
'content':(basename,content),
'filetype':command,
'pyversion':pyversion,
'md5_digest':md5(content).hexdigest(),
}
if command == 'bdist_rpm':
dist, version, id = platform.dist()
if dist:
comment = 'built for %s %s' % (dist, version)
elif command == 'bdist_dumb':
comment = 'built for %s' % platform.platform(terse=1)
data['comment'] = comment
if self.sign:
asc_file = open(filename + ".asc")
data['gpg_signature'] = (os.path.basename(filename) + ".asc", asc_file.read())
asc_file.close()
# set up the authentication
auth = "Basic " + base64.encodestring(self.username + ":" + self.password).strip()
# Build up the MIME payload for the POST data
boundary = '--------------GHSKFJDLGDS7543FJKLFHRE75642756743254'
sep_boundary = '\n--' + boundary
end_boundary = sep_boundary + '--'
body = StringIO()
for key, value in data.items():
# handle multiple entries for the same name
if not isinstance(value, list):
value = [value]
for value in value:
if type(value) is tuple:
fn = ';filename="%s"' % value[0]
value = value[1]
else:
fn = ""
value = str(value)
body.write(sep_boundary)
body.write('\nContent-Disposition: form-data; name="%s"'%key)
body.write(fn)
body.write("\n\n")
body.write(value)
if value and value[-1] == '\r':
body.write('\n') # write an extra newline (lurve Macs)
body.write(end_boundary)
body.write("\n")
body = body.getvalue()
self.announce("Submitting %s to %s" % (filename, self.repository), log.INFO)
# build the Request
# We can't use urllib2 since we need to send the Basic
# auth right with the first request
schema, netloc, url, params, query, fragments = \
urlparse(self.repository)
assert not params and not query and not fragments
if schema == 'http':
http = httplib.HTTPConnection(netloc)
elif schema == 'https':
http = httplib.HTTPSConnection(netloc)
else:
raise AssertionError("unsupported schema " + schema)
data = ''
try:
http.connect()
http.putrequest("POST", url)
http.putheader('Content-type',
'multipart/form-data; boundary=%s'%boundary)
http.putheader('Content-length', str(len(body)))
http.putheader('Authorization', auth)
http.endheaders()
http.send(body)
except socket.error:
e = sys.exc_info()[1]
self.announce(str(e), log.ERROR)
return
r = http.getresponse()
if r.status == 200:
self.announce('Server response (%s): %s' % (r.status, r.reason),
log.INFO)
else:
self.announce('Upload failed (%s): %s' % (r.status, r.reason),
log.ERROR)
if self.show_response:
print('-'*75, r.read(), '-'*75)
| mit |
lyx2014/libvpx_c | tools/cpplint.py | 104 | 182862 | #!/usr/bin/python
#
# Copyright (c) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Does google-lint on c++ files.
The goal of this script is to identify places in the code that *may*
be in non-compliance with google style. It does not attempt to fix
up these problems -- the point is to educate. It does also not
attempt to find all problems, or to ensure that everything it does
find is legitimately a problem.
In particular, we can get very confused by /* and // inside strings!
We do a small hack, which is to ignore //'s with "'s after them on the
same line, but it is far from perfect (in either direction).
"""
import codecs
import copy
import getopt
import math # for log
import os
import re
import sre_compile
import string
import sys
import unicodedata
_USAGE = """
Syntax: cpplint.py [--verbose=#] [--output=vs7] [--filter=-x,+y,...]
[--counting=total|toplevel|detailed] [--root=subdir]
[--linelength=digits]
<file> [file] ...
The style guidelines this tries to follow are those in
http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml
Every problem is given a confidence score from 1-5, with 5 meaning we are
certain of the problem, and 1 meaning it could be a legitimate construct.
This will miss some errors, and is not a substitute for a code review.
To suppress false-positive errors of a certain category, add a
'NOLINT(category)' comment to the line. NOLINT or NOLINT(*)
suppresses errors of all categories on that line.
The files passed in will be linted; at least one file must be provided.
Default linted extensions are .cc, .cpp, .cu, .cuh and .h. Change the
extensions with the --extensions flag.
Flags:
output=vs7
By default, the output is formatted to ease emacs parsing. Visual Studio
compatible output (vs7) may also be used. Other formats are unsupported.
verbose=#
Specify a number 0-5 to restrict errors to certain verbosity levels.
filter=-x,+y,...
Specify a comma-separated list of category-filters to apply: only
error messages whose category names pass the filters will be printed.
(Category names are printed with the message and look like
"[whitespace/indent]".) Filters are evaluated left to right.
"-FOO" and "FOO" means "do not print categories that start with FOO".
"+FOO" means "do print categories that start with FOO".
Examples: --filter=-whitespace,+whitespace/braces
--filter=whitespace,runtime/printf,+runtime/printf_format
--filter=-,+build/include_what_you_use
To see a list of all the categories used in cpplint, pass no arg:
--filter=
counting=total|toplevel|detailed
The total number of errors found is always printed. If
'toplevel' is provided, then the count of errors in each of
the top-level categories like 'build' and 'whitespace' will
also be printed. If 'detailed' is provided, then a count
is provided for each category like 'build/class'.
root=subdir
The root directory used for deriving header guard CPP variable.
By default, the header guard CPP variable is calculated as the relative
path to the directory that contains .git, .hg, or .svn. When this flag
is specified, the relative path is calculated from the specified
directory. If the specified directory does not exist, this flag is
ignored.
Examples:
Assuing that src/.git exists, the header guard CPP variables for
src/chrome/browser/ui/browser.h are:
No flag => CHROME_BROWSER_UI_BROWSER_H_
--root=chrome => BROWSER_UI_BROWSER_H_
--root=chrome/browser => UI_BROWSER_H_
linelength=digits
This is the allowed line length for the project. The default value is
80 characters.
Examples:
--linelength=120
extensions=extension,extension,...
The allowed file extensions that cpplint will check
Examples:
--extensions=hpp,cpp
"""
# We categorize each error message we print. Here are the categories.
# We want an explicit list so we can list them all in cpplint --filter=.
# If you add a new error message with a new category, add it to the list
# here! cpplint_unittest.py should tell you if you forget to do this.
_ERROR_CATEGORIES = [
'build/class',
'build/deprecated',
'build/endif_comment',
'build/explicit_make_pair',
'build/forward_decl',
'build/header_guard',
'build/include',
'build/include_alpha',
'build/include_order',
'build/include_what_you_use',
'build/namespaces',
'build/printf_format',
'build/storage_class',
'legal/copyright',
'readability/alt_tokens',
'readability/braces',
'readability/casting',
'readability/check',
'readability/constructors',
'readability/fn_size',
'readability/function',
'readability/multiline_comment',
'readability/multiline_string',
'readability/namespace',
'readability/nolint',
'readability/nul',
'readability/streams',
'readability/todo',
'readability/utf8',
'runtime/arrays',
'runtime/casting',
'runtime/explicit',
'runtime/int',
'runtime/init',
'runtime/invalid_increment',
'runtime/member_string_references',
'runtime/memset',
'runtime/operator',
'runtime/printf',
'runtime/printf_format',
'runtime/references',
'runtime/sizeof',
'runtime/string',
'runtime/threadsafe_fn',
'runtime/vlog',
'whitespace/blank_line',
'whitespace/braces',
'whitespace/comma',
'whitespace/comments',
'whitespace/empty_conditional_body',
'whitespace/empty_loop_body',
'whitespace/end_of_line',
'whitespace/ending_newline',
'whitespace/forcolon',
'whitespace/indent',
'whitespace/line_length',
'whitespace/newline',
'whitespace/operators',
'whitespace/parens',
'whitespace/semicolon',
'whitespace/tab',
'whitespace/todo'
]
# The default state of the category filter. This is overrided by the --filter=
# flag. By default all errors are on, so only add here categories that should be
# off by default (i.e., categories that must be enabled by the --filter= flags).
# All entries here should start with a '-' or '+', as in the --filter= flag.
_DEFAULT_FILTERS = ['-build/include_alpha']
# We used to check for high-bit characters, but after much discussion we
# decided those were OK, as long as they were in UTF-8 and didn't represent
# hard-coded international strings, which belong in a separate i18n file.
# C++ headers
_CPP_HEADERS = frozenset([
# Legacy
'algobase.h',
'algo.h',
'alloc.h',
'builtinbuf.h',
'bvector.h',
'complex.h',
'defalloc.h',
'deque.h',
'editbuf.h',
'fstream.h',
'function.h',
'hash_map',
'hash_map.h',
'hash_set',
'hash_set.h',
'hashtable.h',
'heap.h',
'indstream.h',
'iomanip.h',
'iostream.h',
'istream.h',
'iterator.h',
'list.h',
'map.h',
'multimap.h',
'multiset.h',
'ostream.h',
'pair.h',
'parsestream.h',
'pfstream.h',
'procbuf.h',
'pthread_alloc',
'pthread_alloc.h',
'rope',
'rope.h',
'ropeimpl.h',
'set.h',
'slist',
'slist.h',
'stack.h',
'stdiostream.h',
'stl_alloc.h',
'stl_relops.h',
'streambuf.h',
'stream.h',
'strfile.h',
'strstream.h',
'tempbuf.h',
'tree.h',
'type_traits.h',
'vector.h',
# 17.6.1.2 C++ library headers
'algorithm',
'array',
'atomic',
'bitset',
'chrono',
'codecvt',
'complex',
'condition_variable',
'deque',
'exception',
'forward_list',
'fstream',
'functional',
'future',
'initializer_list',
'iomanip',
'ios',
'iosfwd',
'iostream',
'istream',
'iterator',
'limits',
'list',
'locale',
'map',
'memory',
'mutex',
'new',
'numeric',
'ostream',
'queue',
'random',
'ratio',
'regex',
'set',
'sstream',
'stack',
'stdexcept',
'streambuf',
'string',
'strstream',
'system_error',
'thread',
'tuple',
'typeindex',
'typeinfo',
'type_traits',
'unordered_map',
'unordered_set',
'utility',
'valarray',
'vector',
# 17.6.1.2 C++ headers for C library facilities
'cassert',
'ccomplex',
'cctype',
'cerrno',
'cfenv',
'cfloat',
'cinttypes',
'ciso646',
'climits',
'clocale',
'cmath',
'csetjmp',
'csignal',
'cstdalign',
'cstdarg',
'cstdbool',
'cstddef',
'cstdint',
'cstdio',
'cstdlib',
'cstring',
'ctgmath',
'ctime',
'cuchar',
'cwchar',
'cwctype',
])
# Assertion macros. These are defined in base/logging.h and
# testing/base/gunit.h. Note that the _M versions need to come first
# for substring matching to work.
_CHECK_MACROS = [
'DCHECK', 'CHECK',
'EXPECT_TRUE_M', 'EXPECT_TRUE',
'ASSERT_TRUE_M', 'ASSERT_TRUE',
'EXPECT_FALSE_M', 'EXPECT_FALSE',
'ASSERT_FALSE_M', 'ASSERT_FALSE',
]
# Replacement macros for CHECK/DCHECK/EXPECT_TRUE/EXPECT_FALSE
_CHECK_REPLACEMENT = dict([(m, {}) for m in _CHECK_MACROS])
for op, replacement in [('==', 'EQ'), ('!=', 'NE'),
('>=', 'GE'), ('>', 'GT'),
('<=', 'LE'), ('<', 'LT')]:
_CHECK_REPLACEMENT['DCHECK'][op] = 'DCHECK_%s' % replacement
_CHECK_REPLACEMENT['CHECK'][op] = 'CHECK_%s' % replacement
_CHECK_REPLACEMENT['EXPECT_TRUE'][op] = 'EXPECT_%s' % replacement
_CHECK_REPLACEMENT['ASSERT_TRUE'][op] = 'ASSERT_%s' % replacement
_CHECK_REPLACEMENT['EXPECT_TRUE_M'][op] = 'EXPECT_%s_M' % replacement
_CHECK_REPLACEMENT['ASSERT_TRUE_M'][op] = 'ASSERT_%s_M' % replacement
for op, inv_replacement in [('==', 'NE'), ('!=', 'EQ'),
('>=', 'LT'), ('>', 'LE'),
('<=', 'GT'), ('<', 'GE')]:
_CHECK_REPLACEMENT['EXPECT_FALSE'][op] = 'EXPECT_%s' % inv_replacement
_CHECK_REPLACEMENT['ASSERT_FALSE'][op] = 'ASSERT_%s' % inv_replacement
_CHECK_REPLACEMENT['EXPECT_FALSE_M'][op] = 'EXPECT_%s_M' % inv_replacement
_CHECK_REPLACEMENT['ASSERT_FALSE_M'][op] = 'ASSERT_%s_M' % inv_replacement
# Alternative tokens and their replacements. For full list, see section 2.5
# Alternative tokens [lex.digraph] in the C++ standard.
#
# Digraphs (such as '%:') are not included here since it's a mess to
# match those on a word boundary.
_ALT_TOKEN_REPLACEMENT = {
'and': '&&',
'bitor': '|',
'or': '||',
'xor': '^',
'compl': '~',
'bitand': '&',
'and_eq': '&=',
'or_eq': '|=',
'xor_eq': '^=',
'not': '!',
'not_eq': '!='
}
# Compile regular expression that matches all the above keywords. The "[ =()]"
# bit is meant to avoid matching these keywords outside of boolean expressions.
#
# False positives include C-style multi-line comments and multi-line strings
# but those have always been troublesome for cpplint.
_ALT_TOKEN_REPLACEMENT_PATTERN = re.compile(
r'[ =()](' + ('|'.join(_ALT_TOKEN_REPLACEMENT.keys())) + r')(?=[ (]|$)')
# These constants define types of headers for use with
# _IncludeState.CheckNextIncludeOrder().
_C_SYS_HEADER = 1
_CPP_SYS_HEADER = 2
_LIKELY_MY_HEADER = 3
_POSSIBLE_MY_HEADER = 4
_OTHER_HEADER = 5
# These constants define the current inline assembly state
_NO_ASM = 0 # Outside of inline assembly block
_INSIDE_ASM = 1 # Inside inline assembly block
_END_ASM = 2 # Last line of inline assembly block
_BLOCK_ASM = 3 # The whole block is an inline assembly block
# Match start of assembly blocks
_MATCH_ASM = re.compile(r'^\s*(?:asm|_asm|__asm|__asm__)'
r'(?:\s+(volatile|__volatile__))?'
r'\s*[{(]')
_regexp_compile_cache = {}
# Finds occurrences of NOLINT or NOLINT(...).
_RE_SUPPRESSION = re.compile(r'\bNOLINT\b(\([^)]*\))?')
# {str, set(int)}: a map from error categories to sets of linenumbers
# on which those errors are expected and should be suppressed.
_error_suppressions = {}
# The root directory used for deriving header guard CPP variable.
# This is set by --root flag.
_root = None
# The allowed line length of files.
# This is set by --linelength flag.
_line_length = 80
# The allowed extensions for file names
# This is set by --extensions flag.
_valid_extensions = set(['cc', 'h', 'cpp', 'cu', 'cuh'])
def ParseNolintSuppressions(filename, raw_line, linenum, error):
"""Updates the global list of error-suppressions.
Parses any NOLINT comments on the current line, updating the global
error_suppressions store. Reports an error if the NOLINT comment
was malformed.
Args:
filename: str, the name of the input file.
raw_line: str, the line of input text, with comments.
linenum: int, the number of the current line.
error: function, an error handler.
"""
# FIXME(adonovan): "NOLINT(" is misparsed as NOLINT(*).
matched = _RE_SUPPRESSION.search(raw_line)
if matched:
category = matched.group(1)
if category in (None, '(*)'): # => "suppress all"
_error_suppressions.setdefault(None, set()).add(linenum)
else:
if category.startswith('(') and category.endswith(')'):
category = category[1:-1]
if category in _ERROR_CATEGORIES:
_error_suppressions.setdefault(category, set()).add(linenum)
else:
error(filename, linenum, 'readability/nolint', 5,
'Unknown NOLINT error category: %s' % category)
def ResetNolintSuppressions():
"Resets the set of NOLINT suppressions to empty."
_error_suppressions.clear()
def IsErrorSuppressedByNolint(category, linenum):
"""Returns true if the specified error category is suppressed on this line.
Consults the global error_suppressions map populated by
ParseNolintSuppressions/ResetNolintSuppressions.
Args:
category: str, the category of the error.
linenum: int, the current line number.
Returns:
bool, True iff the error should be suppressed due to a NOLINT comment.
"""
return (linenum in _error_suppressions.get(category, set()) or
linenum in _error_suppressions.get(None, set()))
def Match(pattern, s):
"""Matches the string with the pattern, caching the compiled regexp."""
# The regexp compilation caching is inlined in both Match and Search for
# performance reasons; factoring it out into a separate function turns out
# to be noticeably expensive.
if pattern not in _regexp_compile_cache:
_regexp_compile_cache[pattern] = sre_compile.compile(pattern)
return _regexp_compile_cache[pattern].match(s)
def ReplaceAll(pattern, rep, s):
"""Replaces instances of pattern in a string with a replacement.
The compiled regex is kept in a cache shared by Match and Search.
Args:
pattern: regex pattern
rep: replacement text
s: search string
Returns:
string with replacements made (or original string if no replacements)
"""
if pattern not in _regexp_compile_cache:
_regexp_compile_cache[pattern] = sre_compile.compile(pattern)
return _regexp_compile_cache[pattern].sub(rep, s)
def Search(pattern, s):
"""Searches the string for the pattern, caching the compiled regexp."""
if pattern not in _regexp_compile_cache:
_regexp_compile_cache[pattern] = sre_compile.compile(pattern)
return _regexp_compile_cache[pattern].search(s)
class _IncludeState(dict):
"""Tracks line numbers for includes, and the order in which includes appear.
As a dict, an _IncludeState object serves as a mapping between include
filename and line number on which that file was included.
Call CheckNextIncludeOrder() once for each header in the file, passing
in the type constants defined above. Calls in an illegal order will
raise an _IncludeError with an appropriate error message.
"""
# self._section will move monotonically through this set. If it ever
# needs to move backwards, CheckNextIncludeOrder will raise an error.
_INITIAL_SECTION = 0
_MY_H_SECTION = 1
_C_SECTION = 2
_CPP_SECTION = 3
_OTHER_H_SECTION = 4
_TYPE_NAMES = {
_C_SYS_HEADER: 'C system header',
_CPP_SYS_HEADER: 'C++ system header',
_LIKELY_MY_HEADER: 'header this file implements',
_POSSIBLE_MY_HEADER: 'header this file may implement',
_OTHER_HEADER: 'other header',
}
_SECTION_NAMES = {
_INITIAL_SECTION: "... nothing. (This can't be an error.)",
_MY_H_SECTION: 'a header this file implements',
_C_SECTION: 'C system header',
_CPP_SECTION: 'C++ system header',
_OTHER_H_SECTION: 'other header',
}
def __init__(self):
dict.__init__(self)
self.ResetSection()
def ResetSection(self):
# The name of the current section.
self._section = self._INITIAL_SECTION
# The path of last found header.
self._last_header = ''
def SetLastHeader(self, header_path):
self._last_header = header_path
def CanonicalizeAlphabeticalOrder(self, header_path):
"""Returns a path canonicalized for alphabetical comparison.
- replaces "-" with "_" so they both cmp the same.
- removes '-inl' since we don't require them to be after the main header.
- lowercase everything, just in case.
Args:
header_path: Path to be canonicalized.
Returns:
Canonicalized path.
"""
return header_path.replace('-inl.h', '.h').replace('-', '_').lower()
def IsInAlphabeticalOrder(self, clean_lines, linenum, header_path):
"""Check if a header is in alphabetical order with the previous header.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
header_path: Canonicalized header to be checked.
Returns:
Returns true if the header is in alphabetical order.
"""
# If previous section is different from current section, _last_header will
# be reset to empty string, so it's always less than current header.
#
# If previous line was a blank line, assume that the headers are
# intentionally sorted the way they are.
if (self._last_header > header_path and
not Match(r'^\s*$', clean_lines.elided[linenum - 1])):
return False
return True
def CheckNextIncludeOrder(self, header_type):
"""Returns a non-empty error message if the next header is out of order.
This function also updates the internal state to be ready to check
the next include.
Args:
header_type: One of the _XXX_HEADER constants defined above.
Returns:
The empty string if the header is in the right order, or an
error message describing what's wrong.
"""
error_message = ('Found %s after %s' %
(self._TYPE_NAMES[header_type],
self._SECTION_NAMES[self._section]))
last_section = self._section
if header_type == _C_SYS_HEADER:
if self._section <= self._C_SECTION:
self._section = self._C_SECTION
else:
self._last_header = ''
return error_message
elif header_type == _CPP_SYS_HEADER:
if self._section <= self._CPP_SECTION:
self._section = self._CPP_SECTION
else:
self._last_header = ''
return error_message
elif header_type == _LIKELY_MY_HEADER:
if self._section <= self._MY_H_SECTION:
self._section = self._MY_H_SECTION
else:
self._section = self._OTHER_H_SECTION
elif header_type == _POSSIBLE_MY_HEADER:
if self._section <= self._MY_H_SECTION:
self._section = self._MY_H_SECTION
else:
# This will always be the fallback because we're not sure
# enough that the header is associated with this file.
self._section = self._OTHER_H_SECTION
else:
assert header_type == _OTHER_HEADER
self._section = self._OTHER_H_SECTION
if last_section != self._section:
self._last_header = ''
return ''
class _CppLintState(object):
"""Maintains module-wide state.."""
def __init__(self):
self.verbose_level = 1 # global setting.
self.error_count = 0 # global count of reported errors
# filters to apply when emitting error messages
self.filters = _DEFAULT_FILTERS[:]
self.counting = 'total' # In what way are we counting errors?
self.errors_by_category = {} # string to int dict storing error counts
# output format:
# "emacs" - format that emacs can parse (default)
# "vs7" - format that Microsoft Visual Studio 7 can parse
self.output_format = 'emacs'
def SetOutputFormat(self, output_format):
"""Sets the output format for errors."""
self.output_format = output_format
def SetVerboseLevel(self, level):
"""Sets the module's verbosity, and returns the previous setting."""
last_verbose_level = self.verbose_level
self.verbose_level = level
return last_verbose_level
def SetCountingStyle(self, counting_style):
"""Sets the module's counting options."""
self.counting = counting_style
def SetFilters(self, filters):
"""Sets the error-message filters.
These filters are applied when deciding whether to emit a given
error message.
Args:
filters: A string of comma-separated filters (eg "+whitespace/indent").
Each filter should start with + or -; else we die.
Raises:
ValueError: The comma-separated filters did not all start with '+' or '-'.
E.g. "-,+whitespace,-whitespace/indent,whitespace/badfilter"
"""
# Default filters always have less priority than the flag ones.
self.filters = _DEFAULT_FILTERS[:]
for filt in filters.split(','):
clean_filt = filt.strip()
if clean_filt:
self.filters.append(clean_filt)
for filt in self.filters:
if not (filt.startswith('+') or filt.startswith('-')):
raise ValueError('Every filter in --filters must start with + or -'
' (%s does not)' % filt)
def ResetErrorCounts(self):
"""Sets the module's error statistic back to zero."""
self.error_count = 0
self.errors_by_category = {}
def IncrementErrorCount(self, category):
"""Bumps the module's error statistic."""
self.error_count += 1
if self.counting in ('toplevel', 'detailed'):
if self.counting != 'detailed':
category = category.split('/')[0]
if category not in self.errors_by_category:
self.errors_by_category[category] = 0
self.errors_by_category[category] += 1
def PrintErrorCounts(self):
"""Print a summary of errors by category, and the total."""
for category, count in self.errors_by_category.iteritems():
sys.stderr.write('Category \'%s\' errors found: %d\n' %
(category, count))
sys.stderr.write('Total errors found: %d\n' % self.error_count)
_cpplint_state = _CppLintState()
def _OutputFormat():
"""Gets the module's output format."""
return _cpplint_state.output_format
def _SetOutputFormat(output_format):
"""Sets the module's output format."""
_cpplint_state.SetOutputFormat(output_format)
def _VerboseLevel():
"""Returns the module's verbosity setting."""
return _cpplint_state.verbose_level
def _SetVerboseLevel(level):
"""Sets the module's verbosity, and returns the previous setting."""
return _cpplint_state.SetVerboseLevel(level)
def _SetCountingStyle(level):
"""Sets the module's counting options."""
_cpplint_state.SetCountingStyle(level)
def _Filters():
"""Returns the module's list of output filters, as a list."""
return _cpplint_state.filters
def _SetFilters(filters):
"""Sets the module's error-message filters.
These filters are applied when deciding whether to emit a given
error message.
Args:
filters: A string of comma-separated filters (eg "whitespace/indent").
Each filter should start with + or -; else we die.
"""
_cpplint_state.SetFilters(filters)
class _FunctionState(object):
"""Tracks current function name and the number of lines in its body."""
_NORMAL_TRIGGER = 250 # for --v=0, 500 for --v=1, etc.
_TEST_TRIGGER = 400 # about 50% more than _NORMAL_TRIGGER.
def __init__(self):
self.in_a_function = False
self.lines_in_function = 0
self.current_function = ''
def Begin(self, function_name):
"""Start analyzing function body.
Args:
function_name: The name of the function being tracked.
"""
self.in_a_function = True
self.lines_in_function = 0
self.current_function = function_name
def Count(self):
"""Count line in current function body."""
if self.in_a_function:
self.lines_in_function += 1
def Check(self, error, filename, linenum):
"""Report if too many lines in function body.
Args:
error: The function to call with any errors found.
filename: The name of the current file.
linenum: The number of the line to check.
"""
if Match(r'T(EST|est)', self.current_function):
base_trigger = self._TEST_TRIGGER
else:
base_trigger = self._NORMAL_TRIGGER
trigger = base_trigger * 2**_VerboseLevel()
if self.lines_in_function > trigger:
error_level = int(math.log(self.lines_in_function / base_trigger, 2))
# 50 => 0, 100 => 1, 200 => 2, 400 => 3, 800 => 4, 1600 => 5, ...
if error_level > 5:
error_level = 5
error(filename, linenum, 'readability/fn_size', error_level,
'Small and focused functions are preferred:'
' %s has %d non-comment lines'
' (error triggered by exceeding %d lines).' % (
self.current_function, self.lines_in_function, trigger))
def End(self):
"""Stop analyzing function body."""
self.in_a_function = False
class _IncludeError(Exception):
"""Indicates a problem with the include order in a file."""
pass
class FileInfo:
"""Provides utility functions for filenames.
FileInfo provides easy access to the components of a file's path
relative to the project root.
"""
def __init__(self, filename):
self._filename = filename
def FullName(self):
"""Make Windows paths like Unix."""
return os.path.abspath(self._filename).replace('\\', '/')
def RepositoryName(self):
"""FullName after removing the local path to the repository.
If we have a real absolute path name here we can try to do something smart:
detecting the root of the checkout and truncating /path/to/checkout from
the name so that we get header guards that don't include things like
"C:\Documents and Settings\..." or "/home/username/..." in them and thus
people on different computers who have checked the source out to different
locations won't see bogus errors.
"""
fullname = self.FullName()
if os.path.exists(fullname):
project_dir = os.path.dirname(fullname)
if os.path.exists(os.path.join(project_dir, ".svn")):
# If there's a .svn file in the current directory, we recursively look
# up the directory tree for the top of the SVN checkout
root_dir = project_dir
one_up_dir = os.path.dirname(root_dir)
while os.path.exists(os.path.join(one_up_dir, ".svn")):
root_dir = os.path.dirname(root_dir)
one_up_dir = os.path.dirname(one_up_dir)
prefix = os.path.commonprefix([root_dir, project_dir])
return fullname[len(prefix) + 1:]
# Not SVN <= 1.6? Try to find a git, hg, or svn top level directory by
# searching up from the current path.
root_dir = os.path.dirname(fullname)
while (root_dir != os.path.dirname(root_dir) and
not os.path.exists(os.path.join(root_dir, ".git")) and
not os.path.exists(os.path.join(root_dir, ".hg")) and
not os.path.exists(os.path.join(root_dir, ".svn"))):
root_dir = os.path.dirname(root_dir)
if (os.path.exists(os.path.join(root_dir, ".git")) or
os.path.exists(os.path.join(root_dir, ".hg")) or
os.path.exists(os.path.join(root_dir, ".svn"))):
prefix = os.path.commonprefix([root_dir, project_dir])
return fullname[len(prefix) + 1:]
# Don't know what to do; header guard warnings may be wrong...
return fullname
def Split(self):
"""Splits the file into the directory, basename, and extension.
For 'chrome/browser/browser.cc', Split() would
return ('chrome/browser', 'browser', '.cc')
Returns:
A tuple of (directory, basename, extension).
"""
googlename = self.RepositoryName()
project, rest = os.path.split(googlename)
return (project,) + os.path.splitext(rest)
def BaseName(self):
"""File base name - text after the final slash, before the final period."""
return self.Split()[1]
def Extension(self):
"""File extension - text following the final period."""
return self.Split()[2]
def NoExtension(self):
"""File has no source file extension."""
return '/'.join(self.Split()[0:2])
def IsSource(self):
"""File has a source file extension."""
return self.Extension()[1:] in ('c', 'cc', 'cpp', 'cxx')
def _ShouldPrintError(category, confidence, linenum):
"""If confidence >= verbose, category passes filter and is not suppressed."""
# There are three ways we might decide not to print an error message:
# a "NOLINT(category)" comment appears in the source,
# the verbosity level isn't high enough, or the filters filter it out.
if IsErrorSuppressedByNolint(category, linenum):
return False
if confidence < _cpplint_state.verbose_level:
return False
is_filtered = False
for one_filter in _Filters():
if one_filter.startswith('-'):
if category.startswith(one_filter[1:]):
is_filtered = True
elif one_filter.startswith('+'):
if category.startswith(one_filter[1:]):
is_filtered = False
else:
assert False # should have been checked for in SetFilter.
if is_filtered:
return False
return True
def Error(filename, linenum, category, confidence, message):
"""Logs the fact we've found a lint error.
We log where the error was found, and also our confidence in the error,
that is, how certain we are this is a legitimate style regression, and
not a misidentification or a use that's sometimes justified.
False positives can be suppressed by the use of
"cpplint(category)" comments on the offending line. These are
parsed into _error_suppressions.
Args:
filename: The name of the file containing the error.
linenum: The number of the line containing the error.
category: A string used to describe the "category" this bug
falls under: "whitespace", say, or "runtime". Categories
may have a hierarchy separated by slashes: "whitespace/indent".
confidence: A number from 1-5 representing a confidence score for
the error, with 5 meaning that we are certain of the problem,
and 1 meaning that it could be a legitimate construct.
message: The error message.
"""
if _ShouldPrintError(category, confidence, linenum):
_cpplint_state.IncrementErrorCount(category)
if _cpplint_state.output_format == 'vs7':
sys.stderr.write('%s(%s): %s [%s] [%d]\n' % (
filename, linenum, message, category, confidence))
elif _cpplint_state.output_format == 'eclipse':
sys.stderr.write('%s:%s: warning: %s [%s] [%d]\n' % (
filename, linenum, message, category, confidence))
else:
sys.stderr.write('%s:%s: %s [%s] [%d]\n' % (
filename, linenum, message, category, confidence))
# Matches standard C++ escape sequences per 2.13.2.3 of the C++ standard.
_RE_PATTERN_CLEANSE_LINE_ESCAPES = re.compile(
r'\\([abfnrtv?"\\\']|\d+|x[0-9a-fA-F]+)')
# Matches strings. Escape codes should already be removed by ESCAPES.
_RE_PATTERN_CLEANSE_LINE_DOUBLE_QUOTES = re.compile(r'"[^"]*"')
# Matches characters. Escape codes should already be removed by ESCAPES.
_RE_PATTERN_CLEANSE_LINE_SINGLE_QUOTES = re.compile(r"'.'")
# Matches multi-line C++ comments.
# This RE is a little bit more complicated than one might expect, because we
# have to take care of space removals tools so we can handle comments inside
# statements better.
# The current rule is: We only clear spaces from both sides when we're at the
# end of the line. Otherwise, we try to remove spaces from the right side,
# if this doesn't work we try on left side but only if there's a non-character
# on the right.
_RE_PATTERN_CLEANSE_LINE_C_COMMENTS = re.compile(
r"""(\s*/\*.*\*/\s*$|
/\*.*\*/\s+|
\s+/\*.*\*/(?=\W)|
/\*.*\*/)""", re.VERBOSE)
def IsCppString(line):
"""Does line terminate so, that the next symbol is in string constant.
This function does not consider single-line nor multi-line comments.
Args:
line: is a partial line of code starting from the 0..n.
Returns:
True, if next character appended to 'line' is inside a
string constant.
"""
line = line.replace(r'\\', 'XX') # after this, \\" does not match to \"
return ((line.count('"') - line.count(r'\"') - line.count("'\"'")) & 1) == 1
def CleanseRawStrings(raw_lines):
"""Removes C++11 raw strings from lines.
Before:
static const char kData[] = R"(
multi-line string
)";
After:
static const char kData[] = ""
(replaced by blank line)
"";
Args:
raw_lines: list of raw lines.
Returns:
list of lines with C++11 raw strings replaced by empty strings.
"""
delimiter = None
lines_without_raw_strings = []
for line in raw_lines:
if delimiter:
# Inside a raw string, look for the end
end = line.find(delimiter)
if end >= 0:
# Found the end of the string, match leading space for this
# line and resume copying the original lines, and also insert
# a "" on the last line.
leading_space = Match(r'^(\s*)\S', line)
line = leading_space.group(1) + '""' + line[end + len(delimiter):]
delimiter = None
else:
# Haven't found the end yet, append a blank line.
line = ''
else:
# Look for beginning of a raw string.
# See 2.14.15 [lex.string] for syntax.
matched = Match(r'^(.*)\b(?:R|u8R|uR|UR|LR)"([^\s\\()]*)\((.*)$', line)
if matched:
delimiter = ')' + matched.group(2) + '"'
end = matched.group(3).find(delimiter)
if end >= 0:
# Raw string ended on same line
line = (matched.group(1) + '""' +
matched.group(3)[end + len(delimiter):])
delimiter = None
else:
# Start of a multi-line raw string
line = matched.group(1) + '""'
lines_without_raw_strings.append(line)
# TODO(unknown): if delimiter is not None here, we might want to
# emit a warning for unterminated string.
return lines_without_raw_strings
def FindNextMultiLineCommentStart(lines, lineix):
"""Find the beginning marker for a multiline comment."""
while lineix < len(lines):
if lines[lineix].strip().startswith('/*'):
# Only return this marker if the comment goes beyond this line
if lines[lineix].strip().find('*/', 2) < 0:
return lineix
lineix += 1
return len(lines)
def FindNextMultiLineCommentEnd(lines, lineix):
"""We are inside a comment, find the end marker."""
while lineix < len(lines):
if lines[lineix].strip().endswith('*/'):
return lineix
lineix += 1
return len(lines)
def RemoveMultiLineCommentsFromRange(lines, begin, end):
"""Clears a range of lines for multi-line comments."""
# Having // dummy comments makes the lines non-empty, so we will not get
# unnecessary blank line warnings later in the code.
for i in range(begin, end):
lines[i] = '// dummy'
def RemoveMultiLineComments(filename, lines, error):
"""Removes multiline (c-style) comments from lines."""
lineix = 0
while lineix < len(lines):
lineix_begin = FindNextMultiLineCommentStart(lines, lineix)
if lineix_begin >= len(lines):
return
lineix_end = FindNextMultiLineCommentEnd(lines, lineix_begin)
if lineix_end >= len(lines):
error(filename, lineix_begin + 1, 'readability/multiline_comment', 5,
'Could not find end of multi-line comment')
return
RemoveMultiLineCommentsFromRange(lines, lineix_begin, lineix_end + 1)
lineix = lineix_end + 1
def CleanseComments(line):
"""Removes //-comments and single-line C-style /* */ comments.
Args:
line: A line of C++ source.
Returns:
The line with single-line comments removed.
"""
commentpos = line.find('//')
if commentpos != -1 and not IsCppString(line[:commentpos]):
line = line[:commentpos].rstrip()
# get rid of /* ... */
return _RE_PATTERN_CLEANSE_LINE_C_COMMENTS.sub('', line)
class CleansedLines(object):
"""Holds 3 copies of all lines with different preprocessing applied to them.
1) elided member contains lines without strings and comments,
2) lines member contains lines without comments, and
3) raw_lines member contains all the lines without processing.
All these three members are of <type 'list'>, and of the same length.
"""
def __init__(self, lines):
self.elided = []
self.lines = []
self.raw_lines = lines
self.num_lines = len(lines)
self.lines_without_raw_strings = CleanseRawStrings(lines)
for linenum in range(len(self.lines_without_raw_strings)):
self.lines.append(CleanseComments(
self.lines_without_raw_strings[linenum]))
elided = self._CollapseStrings(self.lines_without_raw_strings[linenum])
self.elided.append(CleanseComments(elided))
def NumLines(self):
"""Returns the number of lines represented."""
return self.num_lines
@staticmethod
def _CollapseStrings(elided):
"""Collapses strings and chars on a line to simple "" or '' blocks.
We nix strings first so we're not fooled by text like '"http://"'
Args:
elided: The line being processed.
Returns:
The line with collapsed strings.
"""
if not _RE_PATTERN_INCLUDE.match(elided):
# Remove escaped characters first to make quote/single quote collapsing
# basic. Things that look like escaped characters shouldn't occur
# outside of strings and chars.
elided = _RE_PATTERN_CLEANSE_LINE_ESCAPES.sub('', elided)
elided = _RE_PATTERN_CLEANSE_LINE_SINGLE_QUOTES.sub("''", elided)
elided = _RE_PATTERN_CLEANSE_LINE_DOUBLE_QUOTES.sub('""', elided)
return elided
def FindEndOfExpressionInLine(line, startpos, depth, startchar, endchar):
"""Find the position just after the matching endchar.
Args:
line: a CleansedLines line.
startpos: start searching at this position.
depth: nesting level at startpos.
startchar: expression opening character.
endchar: expression closing character.
Returns:
On finding matching endchar: (index just after matching endchar, 0)
Otherwise: (-1, new depth at end of this line)
"""
for i in xrange(startpos, len(line)):
if line[i] == startchar:
depth += 1
elif line[i] == endchar:
depth -= 1
if depth == 0:
return (i + 1, 0)
return (-1, depth)
def CloseExpression(clean_lines, linenum, pos):
"""If input points to ( or { or [ or <, finds the position that closes it.
If lines[linenum][pos] points to a '(' or '{' or '[' or '<', finds the
linenum/pos that correspond to the closing of the expression.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
pos: A position on the line.
Returns:
A tuple (line, linenum, pos) pointer *past* the closing brace, or
(line, len(lines), -1) if we never find a close. Note we ignore
strings and comments when matching; and the line we return is the
'cleansed' line at linenum.
"""
line = clean_lines.elided[linenum]
startchar = line[pos]
if startchar not in '({[<':
return (line, clean_lines.NumLines(), -1)
if startchar == '(': endchar = ')'
if startchar == '[': endchar = ']'
if startchar == '{': endchar = '}'
if startchar == '<': endchar = '>'
# Check first line
(end_pos, num_open) = FindEndOfExpressionInLine(
line, pos, 0, startchar, endchar)
if end_pos > -1:
return (line, linenum, end_pos)
# Continue scanning forward
while linenum < clean_lines.NumLines() - 1:
linenum += 1
line = clean_lines.elided[linenum]
(end_pos, num_open) = FindEndOfExpressionInLine(
line, 0, num_open, startchar, endchar)
if end_pos > -1:
return (line, linenum, end_pos)
# Did not find endchar before end of file, give up
return (line, clean_lines.NumLines(), -1)
def FindStartOfExpressionInLine(line, endpos, depth, startchar, endchar):
"""Find position at the matching startchar.
This is almost the reverse of FindEndOfExpressionInLine, but note
that the input position and returned position differs by 1.
Args:
line: a CleansedLines line.
endpos: start searching at this position.
depth: nesting level at endpos.
startchar: expression opening character.
endchar: expression closing character.
Returns:
On finding matching startchar: (index at matching startchar, 0)
Otherwise: (-1, new depth at beginning of this line)
"""
for i in xrange(endpos, -1, -1):
if line[i] == endchar:
depth += 1
elif line[i] == startchar:
depth -= 1
if depth == 0:
return (i, 0)
return (-1, depth)
def ReverseCloseExpression(clean_lines, linenum, pos):
"""If input points to ) or } or ] or >, finds the position that opens it.
If lines[linenum][pos] points to a ')' or '}' or ']' or '>', finds the
linenum/pos that correspond to the opening of the expression.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
pos: A position on the line.
Returns:
A tuple (line, linenum, pos) pointer *at* the opening brace, or
(line, 0, -1) if we never find the matching opening brace. Note
we ignore strings and comments when matching; and the line we
return is the 'cleansed' line at linenum.
"""
line = clean_lines.elided[linenum]
endchar = line[pos]
if endchar not in ')}]>':
return (line, 0, -1)
if endchar == ')': startchar = '('
if endchar == ']': startchar = '['
if endchar == '}': startchar = '{'
if endchar == '>': startchar = '<'
# Check last line
(start_pos, num_open) = FindStartOfExpressionInLine(
line, pos, 0, startchar, endchar)
if start_pos > -1:
return (line, linenum, start_pos)
# Continue scanning backward
while linenum > 0:
linenum -= 1
line = clean_lines.elided[linenum]
(start_pos, num_open) = FindStartOfExpressionInLine(
line, len(line) - 1, num_open, startchar, endchar)
if start_pos > -1:
return (line, linenum, start_pos)
# Did not find startchar before beginning of file, give up
return (line, 0, -1)
def CheckForCopyright(filename, lines, error):
"""Logs an error if no Copyright message appears at the top of the file."""
# We'll say it should occur by line 10. Don't forget there's a
# dummy line at the front.
for line in xrange(1, min(len(lines), 11)):
if re.search(r'Copyright', lines[line], re.I): break
else: # means no copyright line was found
error(filename, 0, 'legal/copyright', 5,
'No copyright message found. '
'You should have a line: "Copyright [year] <Copyright Owner>"')
def GetHeaderGuardCPPVariable(filename):
"""Returns the CPP variable that should be used as a header guard.
Args:
filename: The name of a C++ header file.
Returns:
The CPP variable that should be used as a header guard in the
named file.
"""
# Restores original filename in case that cpplint is invoked from Emacs's
# flymake.
filename = re.sub(r'_flymake\.h$', '.h', filename)
filename = re.sub(r'/\.flymake/([^/]*)$', r'/\1', filename)
fileinfo = FileInfo(filename)
file_path_from_root = fileinfo.RepositoryName()
if _root:
file_path_from_root = re.sub('^' + _root + os.sep, '', file_path_from_root)
return re.sub(r'[-./\s]', '_', file_path_from_root).upper() + '_'
def CheckForHeaderGuard(filename, lines, error):
"""Checks that the file contains a header guard.
Logs an error if no #ifndef header guard is present. For other
headers, checks that the full pathname is used.
Args:
filename: The name of the C++ header file.
lines: An array of strings, each representing a line of the file.
error: The function to call with any errors found.
"""
cppvar = GetHeaderGuardCPPVariable(filename)
ifndef = None
ifndef_linenum = 0
define = None
endif = None
endif_linenum = 0
for linenum, line in enumerate(lines):
linesplit = line.split()
if len(linesplit) >= 2:
# find the first occurrence of #ifndef and #define, save arg
if not ifndef and linesplit[0] == '#ifndef':
# set ifndef to the header guard presented on the #ifndef line.
ifndef = linesplit[1]
ifndef_linenum = linenum
if not define and linesplit[0] == '#define':
define = linesplit[1]
# find the last occurrence of #endif, save entire line
if line.startswith('#endif'):
endif = line
endif_linenum = linenum
if not ifndef:
error(filename, 0, 'build/header_guard', 5,
'No #ifndef header guard found, suggested CPP variable is: %s' %
cppvar)
return
if not define:
error(filename, 0, 'build/header_guard', 5,
'No #define header guard found, suggested CPP variable is: %s' %
cppvar)
return
# The guard should be PATH_FILE_H_, but we also allow PATH_FILE_H__
# for backward compatibility.
if ifndef != cppvar:
error_level = 0
if ifndef != cppvar + '_':
error_level = 5
ParseNolintSuppressions(filename, lines[ifndef_linenum], ifndef_linenum,
error)
error(filename, ifndef_linenum, 'build/header_guard', error_level,
'#ifndef header guard has wrong style, please use: %s' % cppvar)
if define != ifndef:
error(filename, 0, 'build/header_guard', 5,
'#ifndef and #define don\'t match, suggested CPP variable is: %s' %
cppvar)
return
if endif != ('#endif // %s' % cppvar):
error_level = 0
if endif != ('#endif // %s' % (cppvar + '_')):
error_level = 5
ParseNolintSuppressions(filename, lines[endif_linenum], endif_linenum,
error)
error(filename, endif_linenum, 'build/header_guard', error_level,
'#endif line should be "#endif // %s"' % cppvar)
def CheckForBadCharacters(filename, lines, error):
"""Logs an error for each line containing bad characters.
Two kinds of bad characters:
1. Unicode replacement characters: These indicate that either the file
contained invalid UTF-8 (likely) or Unicode replacement characters (which
it shouldn't). Note that it's possible for this to throw off line
numbering if the invalid UTF-8 occurred adjacent to a newline.
2. NUL bytes. These are problematic for some tools.
Args:
filename: The name of the current file.
lines: An array of strings, each representing a line of the file.
error: The function to call with any errors found.
"""
for linenum, line in enumerate(lines):
if u'\ufffd' in line:
error(filename, linenum, 'readability/utf8', 5,
'Line contains invalid UTF-8 (or Unicode replacement character).')
if '\0' in line:
error(filename, linenum, 'readability/nul', 5, 'Line contains NUL byte.')
def CheckForNewlineAtEOF(filename, lines, error):
"""Logs an error if there is no newline char at the end of the file.
Args:
filename: The name of the current file.
lines: An array of strings, each representing a line of the file.
error: The function to call with any errors found.
"""
# The array lines() was created by adding two newlines to the
# original file (go figure), then splitting on \n.
# To verify that the file ends in \n, we just have to make sure the
# last-but-two element of lines() exists and is empty.
if len(lines) < 3 or lines[-2]:
error(filename, len(lines) - 2, 'whitespace/ending_newline', 5,
'Could not find a newline character at the end of the file.')
def CheckForMultilineCommentsAndStrings(filename, clean_lines, linenum, error):
"""Logs an error if we see /* ... */ or "..." that extend past one line.
/* ... */ comments are legit inside macros, for one line.
Otherwise, we prefer // comments, so it's ok to warn about the
other. Likewise, it's ok for strings to extend across multiple
lines, as long as a line continuation character (backslash)
terminates each line. Although not currently prohibited by the C++
style guide, it's ugly and unnecessary. We don't do well with either
in this lint program, so we warn about both.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Remove all \\ (escaped backslashes) from the line. They are OK, and the
# second (escaped) slash may trigger later \" detection erroneously.
line = line.replace('\\\\', '')
if line.count('/*') > line.count('*/'):
error(filename, linenum, 'readability/multiline_comment', 5,
'Complex multi-line /*...*/-style comment found. '
'Lint may give bogus warnings. '
'Consider replacing these with //-style comments, '
'with #if 0...#endif, '
'or with more clearly structured multi-line comments.')
if (line.count('"') - line.count('\\"')) % 2:
error(filename, linenum, 'readability/multiline_string', 5,
'Multi-line string ("...") found. This lint script doesn\'t '
'do well with such strings, and may give bogus warnings. '
'Use C++11 raw strings or concatenation instead.')
threading_list = (
('asctime(', 'asctime_r('),
('ctime(', 'ctime_r('),
('getgrgid(', 'getgrgid_r('),
('getgrnam(', 'getgrnam_r('),
('getlogin(', 'getlogin_r('),
('getpwnam(', 'getpwnam_r('),
('getpwuid(', 'getpwuid_r('),
('gmtime(', 'gmtime_r('),
('localtime(', 'localtime_r('),
('rand(', 'rand_r('),
('strtok(', 'strtok_r('),
('ttyname(', 'ttyname_r('),
)
def CheckPosixThreading(filename, clean_lines, linenum, error):
"""Checks for calls to thread-unsafe functions.
Much code has been originally written without consideration of
multi-threading. Also, engineers are relying on their old experience;
they have learned posix before threading extensions were added. These
tests guide the engineers to use thread-safe functions (when using
posix directly).
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
for single_thread_function, multithread_safe_function in threading_list:
ix = line.find(single_thread_function)
# Comparisons made explicit for clarity -- pylint: disable=g-explicit-bool-comparison
if ix >= 0 and (ix == 0 or (not line[ix - 1].isalnum() and
line[ix - 1] not in ('_', '.', '>'))):
error(filename, linenum, 'runtime/threadsafe_fn', 2,
'Consider using ' + multithread_safe_function +
'...) instead of ' + single_thread_function +
'...) for improved thread safety.')
def CheckVlogArguments(filename, clean_lines, linenum, error):
"""Checks that VLOG() is only used for defining a logging level.
For example, VLOG(2) is correct. VLOG(INFO), VLOG(WARNING), VLOG(ERROR), and
VLOG(FATAL) are not.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
if Search(r'\bVLOG\((INFO|ERROR|WARNING|DFATAL|FATAL)\)', line):
error(filename, linenum, 'runtime/vlog', 5,
'VLOG() should be used with numeric verbosity level. '
'Use LOG() if you want symbolic severity levels.')
# Matches invalid increment: *count++, which moves pointer instead of
# incrementing a value.
_RE_PATTERN_INVALID_INCREMENT = re.compile(
r'^\s*\*\w+(\+\+|--);')
def CheckInvalidIncrement(filename, clean_lines, linenum, error):
"""Checks for invalid increment *count++.
For example following function:
void increment_counter(int* count) {
*count++;
}
is invalid, because it effectively does count++, moving pointer, and should
be replaced with ++*count, (*count)++ or *count += 1.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
if _RE_PATTERN_INVALID_INCREMENT.match(line):
error(filename, linenum, 'runtime/invalid_increment', 5,
'Changing pointer instead of value (or unused value of operator*).')
class _BlockInfo(object):
"""Stores information about a generic block of code."""
def __init__(self, seen_open_brace):
self.seen_open_brace = seen_open_brace
self.open_parentheses = 0
self.inline_asm = _NO_ASM
def CheckBegin(self, filename, clean_lines, linenum, error):
"""Run checks that applies to text up to the opening brace.
This is mostly for checking the text after the class identifier
and the "{", usually where the base class is specified. For other
blocks, there isn't much to check, so we always pass.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
pass
def CheckEnd(self, filename, clean_lines, linenum, error):
"""Run checks that applies to text after the closing brace.
This is mostly used for checking end of namespace comments.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
pass
class _ClassInfo(_BlockInfo):
"""Stores information about a class."""
def __init__(self, name, class_or_struct, clean_lines, linenum):
_BlockInfo.__init__(self, False)
self.name = name
self.starting_linenum = linenum
self.is_derived = False
if class_or_struct == 'struct':
self.access = 'public'
self.is_struct = True
else:
self.access = 'private'
self.is_struct = False
# Remember initial indentation level for this class. Using raw_lines here
# instead of elided to account for leading comments.
initial_indent = Match(r'^( *)\S', clean_lines.raw_lines[linenum])
if initial_indent:
self.class_indent = len(initial_indent.group(1))
else:
self.class_indent = 0
# Try to find the end of the class. This will be confused by things like:
# class A {
# } *x = { ...
#
# But it's still good enough for CheckSectionSpacing.
self.last_line = 0
depth = 0
for i in range(linenum, clean_lines.NumLines()):
line = clean_lines.elided[i]
depth += line.count('{') - line.count('}')
if not depth:
self.last_line = i
break
def CheckBegin(self, filename, clean_lines, linenum, error):
# Look for a bare ':'
if Search('(^|[^:]):($|[^:])', clean_lines.elided[linenum]):
self.is_derived = True
def CheckEnd(self, filename, clean_lines, linenum, error):
# Check that closing brace is aligned with beginning of the class.
# Only do this if the closing brace is indented by only whitespaces.
# This means we will not check single-line class definitions.
indent = Match(r'^( *)\}', clean_lines.elided[linenum])
if indent and len(indent.group(1)) != self.class_indent:
if self.is_struct:
parent = 'struct ' + self.name
else:
parent = 'class ' + self.name
error(filename, linenum, 'whitespace/indent', 3,
'Closing brace should be aligned with beginning of %s' % parent)
class _NamespaceInfo(_BlockInfo):
"""Stores information about a namespace."""
def __init__(self, name, linenum):
_BlockInfo.__init__(self, False)
self.name = name or ''
self.starting_linenum = linenum
def CheckEnd(self, filename, clean_lines, linenum, error):
"""Check end of namespace comments."""
line = clean_lines.raw_lines[linenum]
# Check how many lines is enclosed in this namespace. Don't issue
# warning for missing namespace comments if there aren't enough
# lines. However, do apply checks if there is already an end of
# namespace comment and it's incorrect.
#
# TODO(unknown): We always want to check end of namespace comments
# if a namespace is large, but sometimes we also want to apply the
# check if a short namespace contained nontrivial things (something
# other than forward declarations). There is currently no logic on
# deciding what these nontrivial things are, so this check is
# triggered by namespace size only, which works most of the time.
if (linenum - self.starting_linenum < 10
and not Match(r'};*\s*(//|/\*).*\bnamespace\b', line)):
return
# Look for matching comment at end of namespace.
#
# Note that we accept C style "/* */" comments for terminating
# namespaces, so that code that terminate namespaces inside
# preprocessor macros can be cpplint clean.
#
# We also accept stuff like "// end of namespace <name>." with the
# period at the end.
#
# Besides these, we don't accept anything else, otherwise we might
# get false negatives when existing comment is a substring of the
# expected namespace.
if self.name:
# Named namespace
if not Match((r'};*\s*(//|/\*).*\bnamespace\s+' + re.escape(self.name) +
r'[\*/\.\\\s]*$'),
line):
error(filename, linenum, 'readability/namespace', 5,
'Namespace should be terminated with "// namespace %s"' %
self.name)
else:
# Anonymous namespace
if not Match(r'};*\s*(//|/\*).*\bnamespace[\*/\.\\\s]*$', line):
error(filename, linenum, 'readability/namespace', 5,
'Namespace should be terminated with "// namespace"')
class _PreprocessorInfo(object):
"""Stores checkpoints of nesting stacks when #if/#else is seen."""
def __init__(self, stack_before_if):
# The entire nesting stack before #if
self.stack_before_if = stack_before_if
# The entire nesting stack up to #else
self.stack_before_else = []
# Whether we have already seen #else or #elif
self.seen_else = False
class _NestingState(object):
"""Holds states related to parsing braces."""
def __init__(self):
# Stack for tracking all braces. An object is pushed whenever we
# see a "{", and popped when we see a "}". Only 3 types of
# objects are possible:
# - _ClassInfo: a class or struct.
# - _NamespaceInfo: a namespace.
# - _BlockInfo: some other type of block.
self.stack = []
# Stack of _PreprocessorInfo objects.
self.pp_stack = []
def SeenOpenBrace(self):
"""Check if we have seen the opening brace for the innermost block.
Returns:
True if we have seen the opening brace, False if the innermost
block is still expecting an opening brace.
"""
return (not self.stack) or self.stack[-1].seen_open_brace
def InNamespaceBody(self):
"""Check if we are currently one level inside a namespace body.
Returns:
True if top of the stack is a namespace block, False otherwise.
"""
return self.stack and isinstance(self.stack[-1], _NamespaceInfo)
def UpdatePreprocessor(self, line):
"""Update preprocessor stack.
We need to handle preprocessors due to classes like this:
#ifdef SWIG
struct ResultDetailsPageElementExtensionPoint {
#else
struct ResultDetailsPageElementExtensionPoint : public Extension {
#endif
We make the following assumptions (good enough for most files):
- Preprocessor condition evaluates to true from #if up to first
#else/#elif/#endif.
- Preprocessor condition evaluates to false from #else/#elif up
to #endif. We still perform lint checks on these lines, but
these do not affect nesting stack.
Args:
line: current line to check.
"""
if Match(r'^\s*#\s*(if|ifdef|ifndef)\b', line):
# Beginning of #if block, save the nesting stack here. The saved
# stack will allow us to restore the parsing state in the #else case.
self.pp_stack.append(_PreprocessorInfo(copy.deepcopy(self.stack)))
elif Match(r'^\s*#\s*(else|elif)\b', line):
# Beginning of #else block
if self.pp_stack:
if not self.pp_stack[-1].seen_else:
# This is the first #else or #elif block. Remember the
# whole nesting stack up to this point. This is what we
# keep after the #endif.
self.pp_stack[-1].seen_else = True
self.pp_stack[-1].stack_before_else = copy.deepcopy(self.stack)
# Restore the stack to how it was before the #if
self.stack = copy.deepcopy(self.pp_stack[-1].stack_before_if)
else:
# TODO(unknown): unexpected #else, issue warning?
pass
elif Match(r'^\s*#\s*endif\b', line):
# End of #if or #else blocks.
if self.pp_stack:
# If we saw an #else, we will need to restore the nesting
# stack to its former state before the #else, otherwise we
# will just continue from where we left off.
if self.pp_stack[-1].seen_else:
# Here we can just use a shallow copy since we are the last
# reference to it.
self.stack = self.pp_stack[-1].stack_before_else
# Drop the corresponding #if
self.pp_stack.pop()
else:
# TODO(unknown): unexpected #endif, issue warning?
pass
def Update(self, filename, clean_lines, linenum, error):
"""Update nesting state with current line.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Update pp_stack first
self.UpdatePreprocessor(line)
# Count parentheses. This is to avoid adding struct arguments to
# the nesting stack.
if self.stack:
inner_block = self.stack[-1]
depth_change = line.count('(') - line.count(')')
inner_block.open_parentheses += depth_change
# Also check if we are starting or ending an inline assembly block.
if inner_block.inline_asm in (_NO_ASM, _END_ASM):
if (depth_change != 0 and
inner_block.open_parentheses == 1 and
_MATCH_ASM.match(line)):
# Enter assembly block
inner_block.inline_asm = _INSIDE_ASM
else:
# Not entering assembly block. If previous line was _END_ASM,
# we will now shift to _NO_ASM state.
inner_block.inline_asm = _NO_ASM
elif (inner_block.inline_asm == _INSIDE_ASM and
inner_block.open_parentheses == 0):
# Exit assembly block
inner_block.inline_asm = _END_ASM
# Consume namespace declaration at the beginning of the line. Do
# this in a loop so that we catch same line declarations like this:
# namespace proto2 { namespace bridge { class MessageSet; } }
while True:
# Match start of namespace. The "\b\s*" below catches namespace
# declarations even if it weren't followed by a whitespace, this
# is so that we don't confuse our namespace checker. The
# missing spaces will be flagged by CheckSpacing.
namespace_decl_match = Match(r'^\s*namespace\b\s*([:\w]+)?(.*)$', line)
if not namespace_decl_match:
break
new_namespace = _NamespaceInfo(namespace_decl_match.group(1), linenum)
self.stack.append(new_namespace)
line = namespace_decl_match.group(2)
if line.find('{') != -1:
new_namespace.seen_open_brace = True
line = line[line.find('{') + 1:]
# Look for a class declaration in whatever is left of the line
# after parsing namespaces. The regexp accounts for decorated classes
# such as in:
# class LOCKABLE API Object {
# };
#
# Templates with class arguments may confuse the parser, for example:
# template <class T
# class Comparator = less<T>,
# class Vector = vector<T> >
# class HeapQueue {
#
# Because this parser has no nesting state about templates, by the
# time it saw "class Comparator", it may think that it's a new class.
# Nested templates have a similar problem:
# template <
# typename ExportedType,
# typename TupleType,
# template <typename, typename> class ImplTemplate>
#
# To avoid these cases, we ignore classes that are followed by '=' or '>'
class_decl_match = Match(
r'\s*(template\s*<[\w\s<>,:]*>\s*)?'
r'(class|struct)\s+([A-Z_]+\s+)*(\w+(?:::\w+)*)'
r'(([^=>]|<[^<>]*>|<[^<>]*<[^<>]*>\s*>)*)$', line)
if (class_decl_match and
(not self.stack or self.stack[-1].open_parentheses == 0)):
self.stack.append(_ClassInfo(
class_decl_match.group(4), class_decl_match.group(2),
clean_lines, linenum))
line = class_decl_match.group(5)
# If we have not yet seen the opening brace for the innermost block,
# run checks here.
if not self.SeenOpenBrace():
self.stack[-1].CheckBegin(filename, clean_lines, linenum, error)
# Update access control if we are inside a class/struct
if self.stack and isinstance(self.stack[-1], _ClassInfo):
classinfo = self.stack[-1]
access_match = Match(
r'^(.*)\b(public|private|protected|signals)(\s+(?:slots\s*)?)?'
r':(?:[^:]|$)',
line)
if access_match:
classinfo.access = access_match.group(2)
# Check that access keywords are indented +1 space. Skip this
# check if the keywords are not preceded by whitespaces.
indent = access_match.group(1)
if (len(indent) != classinfo.class_indent + 1 and
Match(r'^\s*$', indent)):
if classinfo.is_struct:
parent = 'struct ' + classinfo.name
else:
parent = 'class ' + classinfo.name
slots = ''
if access_match.group(3):
slots = access_match.group(3)
error(filename, linenum, 'whitespace/indent', 3,
'%s%s: should be indented +1 space inside %s' % (
access_match.group(2), slots, parent))
# Consume braces or semicolons from what's left of the line
while True:
# Match first brace, semicolon, or closed parenthesis.
matched = Match(r'^[^{;)}]*([{;)}])(.*)$', line)
if not matched:
break
token = matched.group(1)
if token == '{':
# If namespace or class hasn't seen a opening brace yet, mark
# namespace/class head as complete. Push a new block onto the
# stack otherwise.
if not self.SeenOpenBrace():
self.stack[-1].seen_open_brace = True
else:
self.stack.append(_BlockInfo(True))
if _MATCH_ASM.match(line):
self.stack[-1].inline_asm = _BLOCK_ASM
elif token == ';' or token == ')':
# If we haven't seen an opening brace yet, but we already saw
# a semicolon, this is probably a forward declaration. Pop
# the stack for these.
#
# Similarly, if we haven't seen an opening brace yet, but we
# already saw a closing parenthesis, then these are probably
# function arguments with extra "class" or "struct" keywords.
# Also pop these stack for these.
if not self.SeenOpenBrace():
self.stack.pop()
else: # token == '}'
# Perform end of block checks and pop the stack.
if self.stack:
self.stack[-1].CheckEnd(filename, clean_lines, linenum, error)
self.stack.pop()
line = matched.group(2)
def InnermostClass(self):
"""Get class info on the top of the stack.
Returns:
A _ClassInfo object if we are inside a class, or None otherwise.
"""
for i in range(len(self.stack), 0, -1):
classinfo = self.stack[i - 1]
if isinstance(classinfo, _ClassInfo):
return classinfo
return None
def CheckCompletedBlocks(self, filename, error):
"""Checks that all classes and namespaces have been completely parsed.
Call this when all lines in a file have been processed.
Args:
filename: The name of the current file.
error: The function to call with any errors found.
"""
# Note: This test can result in false positives if #ifdef constructs
# get in the way of brace matching. See the testBuildClass test in
# cpplint_unittest.py for an example of this.
for obj in self.stack:
if isinstance(obj, _ClassInfo):
error(filename, obj.starting_linenum, 'build/class', 5,
'Failed to find complete declaration of class %s' %
obj.name)
elif isinstance(obj, _NamespaceInfo):
error(filename, obj.starting_linenum, 'build/namespaces', 5,
'Failed to find complete declaration of namespace %s' %
obj.name)
def CheckForNonStandardConstructs(filename, clean_lines, linenum,
nesting_state, error):
r"""Logs an error if we see certain non-ANSI constructs ignored by gcc-2.
Complain about several constructs which gcc-2 accepts, but which are
not standard C++. Warning about these in lint is one way to ease the
transition to new compilers.
- put storage class first (e.g. "static const" instead of "const static").
- "%lld" instead of %qd" in printf-type functions.
- "%1$d" is non-standard in printf-type functions.
- "\%" is an undefined character escape sequence.
- text after #endif is not allowed.
- invalid inner-style forward declaration.
- >? and <? operators, and their >?= and <?= cousins.
Additionally, check for constructor/destructor style violations and reference
members, as it is very convenient to do so while checking for
gcc-2 compliance.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A _NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
"""
# Remove comments from the line, but leave in strings for now.
line = clean_lines.lines[linenum]
if Search(r'printf\s*\(.*".*%[-+ ]?\d*q', line):
error(filename, linenum, 'runtime/printf_format', 3,
'%q in format strings is deprecated. Use %ll instead.')
if Search(r'printf\s*\(.*".*%\d+\$', line):
error(filename, linenum, 'runtime/printf_format', 2,
'%N$ formats are unconventional. Try rewriting to avoid them.')
# Remove escaped backslashes before looking for undefined escapes.
line = line.replace('\\\\', '')
if Search(r'("|\').*\\(%|\[|\(|{)', line):
error(filename, linenum, 'build/printf_format', 3,
'%, [, (, and { are undefined character escapes. Unescape them.')
# For the rest, work with both comments and strings removed.
line = clean_lines.elided[linenum]
if Search(r'\b(const|volatile|void|char|short|int|long'
r'|float|double|signed|unsigned'
r'|schar|u?int8|u?int16|u?int32|u?int64)'
r'\s+(register|static|extern|typedef)\b',
line):
error(filename, linenum, 'build/storage_class', 5,
'Storage class (static, extern, typedef, etc) should be first.')
if Match(r'\s*#\s*endif\s*[^/\s]+', line):
error(filename, linenum, 'build/endif_comment', 5,
'Uncommented text after #endif is non-standard. Use a comment.')
if Match(r'\s*class\s+(\w+\s*::\s*)+\w+\s*;', line):
error(filename, linenum, 'build/forward_decl', 5,
'Inner-style forward declarations are invalid. Remove this line.')
if Search(r'(\w+|[+-]?\d+(\.\d*)?)\s*(<|>)\?=?\s*(\w+|[+-]?\d+)(\.\d*)?',
line):
error(filename, linenum, 'build/deprecated', 3,
'>? and <? (max and min) operators are non-standard and deprecated.')
if Search(r'^\s*const\s*string\s*&\s*\w+\s*;', line):
# TODO(unknown): Could it be expanded safely to arbitrary references,
# without triggering too many false positives? The first
# attempt triggered 5 warnings for mostly benign code in the regtest, hence
# the restriction.
# Here's the original regexp, for the reference:
# type_name = r'\w+((\s*::\s*\w+)|(\s*<\s*\w+?\s*>))?'
# r'\s*const\s*' + type_name + '\s*&\s*\w+\s*;'
error(filename, linenum, 'runtime/member_string_references', 2,
'const string& members are dangerous. It is much better to use '
'alternatives, such as pointers or simple constants.')
# Everything else in this function operates on class declarations.
# Return early if the top of the nesting stack is not a class, or if
# the class head is not completed yet.
classinfo = nesting_state.InnermostClass()
if not classinfo or not classinfo.seen_open_brace:
return
# The class may have been declared with namespace or classname qualifiers.
# The constructor and destructor will not have those qualifiers.
base_classname = classinfo.name.split('::')[-1]
# Look for single-argument constructors that aren't marked explicit.
# Technically a valid construct, but against style.
args = Match(r'\s+(?:inline\s+)?%s\s*\(([^,()]+)\)'
% re.escape(base_classname),
line)
if (args and
args.group(1) != 'void' and
not Match(r'(const\s+)?%s(\s+const)?\s*(?:<\w+>\s*)?&'
% re.escape(base_classname), args.group(1).strip())):
error(filename, linenum, 'runtime/explicit', 5,
'Single-argument constructors should be marked explicit.')
def CheckSpacingForFunctionCall(filename, line, linenum, error):
"""Checks for the correctness of various spacing around function calls.
Args:
filename: The name of the current file.
line: The text of the line to check.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Since function calls often occur inside if/for/while/switch
# expressions - which have their own, more liberal conventions - we
# first see if we should be looking inside such an expression for a
# function call, to which we can apply more strict standards.
fncall = line # if there's no control flow construct, look at whole line
for pattern in (r'\bif\s*\((.*)\)\s*{',
r'\bfor\s*\((.*)\)\s*{',
r'\bwhile\s*\((.*)\)\s*[{;]',
r'\bswitch\s*\((.*)\)\s*{'):
match = Search(pattern, line)
if match:
fncall = match.group(1) # look inside the parens for function calls
break
# Except in if/for/while/switch, there should never be space
# immediately inside parens (eg "f( 3, 4 )"). We make an exception
# for nested parens ( (a+b) + c ). Likewise, there should never be
# a space before a ( when it's a function argument. I assume it's a
# function argument when the char before the whitespace is legal in
# a function name (alnum + _) and we're not starting a macro. Also ignore
# pointers and references to arrays and functions coz they're too tricky:
# we use a very simple way to recognize these:
# " (something)(maybe-something)" or
# " (something)(maybe-something," or
# " (something)[something]"
# Note that we assume the contents of [] to be short enough that
# they'll never need to wrap.
if ( # Ignore control structures.
not Search(r'\b(if|for|while|switch|return|new|delete|catch|sizeof)\b',
fncall) and
# Ignore pointers/references to functions.
not Search(r' \([^)]+\)\([^)]*(\)|,$)', fncall) and
# Ignore pointers/references to arrays.
not Search(r' \([^)]+\)\[[^\]]+\]', fncall)):
if Search(r'\w\s*\(\s(?!\s*\\$)', fncall): # a ( used for a fn call
error(filename, linenum, 'whitespace/parens', 4,
'Extra space after ( in function call')
elif Search(r'\(\s+(?!(\s*\\)|\()', fncall):
error(filename, linenum, 'whitespace/parens', 2,
'Extra space after (')
if (Search(r'\w\s+\(', fncall) and
not Search(r'#\s*define|typedef', fncall) and
not Search(r'\w\s+\((\w+::)*\*\w+\)\(', fncall)):
error(filename, linenum, 'whitespace/parens', 4,
'Extra space before ( in function call')
# If the ) is followed only by a newline or a { + newline, assume it's
# part of a control statement (if/while/etc), and don't complain
if Search(r'[^)]\s+\)\s*[^{\s]', fncall):
# If the closing parenthesis is preceded by only whitespaces,
# try to give a more descriptive error message.
if Search(r'^\s+\)', fncall):
error(filename, linenum, 'whitespace/parens', 2,
'Closing ) should be moved to the previous line')
else:
error(filename, linenum, 'whitespace/parens', 2,
'Extra space before )')
def IsBlankLine(line):
"""Returns true if the given line is blank.
We consider a line to be blank if the line is empty or consists of
only white spaces.
Args:
line: A line of a string.
Returns:
True, if the given line is blank.
"""
return not line or line.isspace()
def CheckForFunctionLengths(filename, clean_lines, linenum,
function_state, error):
"""Reports for long function bodies.
For an overview why this is done, see:
http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Write_Short_Functions
Uses a simplistic algorithm assuming other style guidelines
(especially spacing) are followed.
Only checks unindented functions, so class members are unchecked.
Trivial bodies are unchecked, so constructors with huge initializer lists
may be missed.
Blank/comment lines are not counted so as to avoid encouraging the removal
of vertical space and comments just to get through a lint check.
NOLINT *on the last line of a function* disables this check.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
function_state: Current function name and lines in body so far.
error: The function to call with any errors found.
"""
lines = clean_lines.lines
line = lines[linenum]
raw = clean_lines.raw_lines
raw_line = raw[linenum]
joined_line = ''
starting_func = False
regexp = r'(\w(\w|::|\*|\&|\s)*)\(' # decls * & space::name( ...
match_result = Match(regexp, line)
if match_result:
# If the name is all caps and underscores, figure it's a macro and
# ignore it, unless it's TEST or TEST_F.
function_name = match_result.group(1).split()[-1]
if function_name == 'TEST' or function_name == 'TEST_F' or (
not Match(r'[A-Z_]+$', function_name)):
starting_func = True
if starting_func:
body_found = False
for start_linenum in xrange(linenum, clean_lines.NumLines()):
start_line = lines[start_linenum]
joined_line += ' ' + start_line.lstrip()
if Search(r'(;|})', start_line): # Declarations and trivial functions
body_found = True
break # ... ignore
elif Search(r'{', start_line):
body_found = True
function = Search(r'((\w|:)*)\(', line).group(1)
if Match(r'TEST', function): # Handle TEST... macros
parameter_regexp = Search(r'(\(.*\))', joined_line)
if parameter_regexp: # Ignore bad syntax
function += parameter_regexp.group(1)
else:
function += '()'
function_state.Begin(function)
break
if not body_found:
# No body for the function (or evidence of a non-function) was found.
error(filename, linenum, 'readability/fn_size', 5,
'Lint failed to find start of function body.')
elif Match(r'^\}\s*$', line): # function end
function_state.Check(error, filename, linenum)
function_state.End()
elif not Match(r'^\s*$', line):
function_state.Count() # Count non-blank/non-comment lines.
_RE_PATTERN_TODO = re.compile(r'^//(\s*)TODO(\(.+?\))?:?(\s|$)?')
def CheckComment(comment, filename, linenum, error):
"""Checks for common mistakes in TODO comments.
Args:
comment: The text of the comment from the line in question.
filename: The name of the current file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
match = _RE_PATTERN_TODO.match(comment)
if match:
# One whitespace is correct; zero whitespace is handled elsewhere.
leading_whitespace = match.group(1)
if len(leading_whitespace) > 1:
error(filename, linenum, 'whitespace/todo', 2,
'Too many spaces before TODO')
username = match.group(2)
if not username:
error(filename, linenum, 'readability/todo', 2,
'Missing username in TODO; it should look like '
'"// TODO(my_username): Stuff."')
middle_whitespace = match.group(3)
# Comparisons made explicit for correctness -- pylint: disable=g-explicit-bool-comparison
if middle_whitespace != ' ' and middle_whitespace != '':
error(filename, linenum, 'whitespace/todo', 2,
'TODO(my_username) should be followed by a space')
def CheckAccess(filename, clean_lines, linenum, nesting_state, error):
"""Checks for improper use of DISALLOW* macros.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A _NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum] # get rid of comments and strings
matched = Match((r'\s*(DISALLOW_COPY_AND_ASSIGN|'
r'DISALLOW_EVIL_CONSTRUCTORS|'
r'DISALLOW_IMPLICIT_CONSTRUCTORS)'), line)
if not matched:
return
if nesting_state.stack and isinstance(nesting_state.stack[-1], _ClassInfo):
if nesting_state.stack[-1].access != 'private':
error(filename, linenum, 'readability/constructors', 3,
'%s must be in the private: section' % matched.group(1))
else:
# Found DISALLOW* macro outside a class declaration, or perhaps it
# was used inside a function when it should have been part of the
# class declaration. We could issue a warning here, but it
# probably resulted in a compiler error already.
pass
def FindNextMatchingAngleBracket(clean_lines, linenum, init_suffix):
"""Find the corresponding > to close a template.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: Current line number.
init_suffix: Remainder of the current line after the initial <.
Returns:
True if a matching bracket exists.
"""
line = init_suffix
nesting_stack = ['<']
while True:
# Find the next operator that can tell us whether < is used as an
# opening bracket or as a less-than operator. We only want to
# warn on the latter case.
#
# We could also check all other operators and terminate the search
# early, e.g. if we got something like this "a<b+c", the "<" is
# most likely a less-than operator, but then we will get false
# positives for default arguments and other template expressions.
match = Search(r'^[^<>(),;\[\]]*([<>(),;\[\]])(.*)$', line)
if match:
# Found an operator, update nesting stack
operator = match.group(1)
line = match.group(2)
if nesting_stack[-1] == '<':
# Expecting closing angle bracket
if operator in ('<', '(', '['):
nesting_stack.append(operator)
elif operator == '>':
nesting_stack.pop()
if not nesting_stack:
# Found matching angle bracket
return True
elif operator == ',':
# Got a comma after a bracket, this is most likely a template
# argument. We have not seen a closing angle bracket yet, but
# it's probably a few lines later if we look for it, so just
# return early here.
return True
else:
# Got some other operator.
return False
else:
# Expecting closing parenthesis or closing bracket
if operator in ('<', '(', '['):
nesting_stack.append(operator)
elif operator in (')', ']'):
# We don't bother checking for matching () or []. If we got
# something like (] or [), it would have been a syntax error.
nesting_stack.pop()
else:
# Scan the next line
linenum += 1
if linenum >= len(clean_lines.elided):
break
line = clean_lines.elided[linenum]
# Exhausted all remaining lines and still no matching angle bracket.
# Most likely the input was incomplete, otherwise we should have
# seen a semicolon and returned early.
return True
def FindPreviousMatchingAngleBracket(clean_lines, linenum, init_prefix):
"""Find the corresponding < that started a template.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: Current line number.
init_prefix: Part of the current line before the initial >.
Returns:
True if a matching bracket exists.
"""
line = init_prefix
nesting_stack = ['>']
while True:
# Find the previous operator
match = Search(r'^(.*)([<>(),;\[\]])[^<>(),;\[\]]*$', line)
if match:
# Found an operator, update nesting stack
operator = match.group(2)
line = match.group(1)
if nesting_stack[-1] == '>':
# Expecting opening angle bracket
if operator in ('>', ')', ']'):
nesting_stack.append(operator)
elif operator == '<':
nesting_stack.pop()
if not nesting_stack:
# Found matching angle bracket
return True
elif operator == ',':
# Got a comma before a bracket, this is most likely a
# template argument. The opening angle bracket is probably
# there if we look for it, so just return early here.
return True
else:
# Got some other operator.
return False
else:
# Expecting opening parenthesis or opening bracket
if operator in ('>', ')', ']'):
nesting_stack.append(operator)
elif operator in ('(', '['):
nesting_stack.pop()
else:
# Scan the previous line
linenum -= 1
if linenum < 0:
break
line = clean_lines.elided[linenum]
# Exhausted all earlier lines and still no matching angle bracket.
return False
def CheckSpacing(filename, clean_lines, linenum, nesting_state, error):
"""Checks for the correctness of various spacing issues in the code.
Things we check for: spaces around operators, spaces after
if/for/while/switch, no spaces around parens in function calls, two
spaces between code and comment, don't start a block with a blank
line, don't end a function with a blank line, don't add a blank line
after public/protected/private, don't have too many blank lines in a row.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A _NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# Don't use "elided" lines here, otherwise we can't check commented lines.
# Don't want to use "raw" either, because we don't want to check inside C++11
# raw strings,
raw = clean_lines.lines_without_raw_strings
line = raw[linenum]
# Before nixing comments, check if the line is blank for no good
# reason. This includes the first line after a block is opened, and
# blank lines at the end of a function (ie, right before a line like '}'
#
# Skip all the blank line checks if we are immediately inside a
# namespace body. In other words, don't issue blank line warnings
# for this block:
# namespace {
#
# }
#
# A warning about missing end of namespace comments will be issued instead.
if IsBlankLine(line) and not nesting_state.InNamespaceBody():
elided = clean_lines.elided
prev_line = elided[linenum - 1]
prevbrace = prev_line.rfind('{')
# TODO(unknown): Don't complain if line before blank line, and line after,
# both start with alnums and are indented the same amount.
# This ignores whitespace at the start of a namespace block
# because those are not usually indented.
if prevbrace != -1 and prev_line[prevbrace:].find('}') == -1:
# OK, we have a blank line at the start of a code block. Before we
# complain, we check if it is an exception to the rule: The previous
# non-empty line has the parameters of a function header that are indented
# 4 spaces (because they did not fit in a 80 column line when placed on
# the same line as the function name). We also check for the case where
# the previous line is indented 6 spaces, which may happen when the
# initializers of a constructor do not fit into a 80 column line.
exception = False
if Match(r' {6}\w', prev_line): # Initializer list?
# We are looking for the opening column of initializer list, which
# should be indented 4 spaces to cause 6 space indentation afterwards.
search_position = linenum-2
while (search_position >= 0
and Match(r' {6}\w', elided[search_position])):
search_position -= 1
exception = (search_position >= 0
and elided[search_position][:5] == ' :')
else:
# Search for the function arguments or an initializer list. We use a
# simple heuristic here: If the line is indented 4 spaces; and we have a
# closing paren, without the opening paren, followed by an opening brace
# or colon (for initializer lists) we assume that it is the last line of
# a function header. If we have a colon indented 4 spaces, it is an
# initializer list.
exception = (Match(r' {4}\w[^\(]*\)\s*(const\s*)?(\{\s*$|:)',
prev_line)
or Match(r' {4}:', prev_line))
if not exception:
error(filename, linenum, 'whitespace/blank_line', 2,
'Redundant blank line at the start of a code block '
'should be deleted.')
# Ignore blank lines at the end of a block in a long if-else
# chain, like this:
# if (condition1) {
# // Something followed by a blank line
#
# } else if (condition2) {
# // Something else
# }
if linenum + 1 < clean_lines.NumLines():
next_line = raw[linenum + 1]
if (next_line
and Match(r'\s*}', next_line)
and next_line.find('} else ') == -1):
error(filename, linenum, 'whitespace/blank_line', 3,
'Redundant blank line at the end of a code block '
'should be deleted.')
matched = Match(r'\s*(public|protected|private):', prev_line)
if matched:
error(filename, linenum, 'whitespace/blank_line', 3,
'Do not leave a blank line after "%s:"' % matched.group(1))
# Next, we complain if there's a comment too near the text
commentpos = line.find('//')
if commentpos != -1:
# Check if the // may be in quotes. If so, ignore it
# Comparisons made explicit for clarity -- pylint: disable=g-explicit-bool-comparison
if (line.count('"', 0, commentpos) -
line.count('\\"', 0, commentpos)) % 2 == 0: # not in quotes
# Allow one space for new scopes, two spaces otherwise:
if (not Match(r'^\s*{ //', line) and
((commentpos >= 1 and
line[commentpos-1] not in string.whitespace) or
(commentpos >= 2 and
line[commentpos-2] not in string.whitespace))):
error(filename, linenum, 'whitespace/comments', 2,
'At least two spaces is best between code and comments')
# There should always be a space between the // and the comment
commentend = commentpos + 2
if commentend < len(line) and not line[commentend] == ' ':
# but some lines are exceptions -- e.g. if they're big
# comment delimiters like:
# //----------------------------------------------------------
# or are an empty C++ style Doxygen comment, like:
# ///
# or C++ style Doxygen comments placed after the variable:
# ///< Header comment
# //!< Header comment
# or they begin with multiple slashes followed by a space:
# //////// Header comment
match = (Search(r'[=/-]{4,}\s*$', line[commentend:]) or
Search(r'^/$', line[commentend:]) or
Search(r'^!< ', line[commentend:]) or
Search(r'^/< ', line[commentend:]) or
Search(r'^/+ ', line[commentend:]))
if not match:
error(filename, linenum, 'whitespace/comments', 4,
'Should have a space between // and comment')
CheckComment(line[commentpos:], filename, linenum, error)
line = clean_lines.elided[linenum] # get rid of comments and strings
# Don't try to do spacing checks for operator methods
line = re.sub(r'operator(==|!=|<|<<|<=|>=|>>|>)\(', 'operator\(', line)
# We allow no-spaces around = within an if: "if ( (a=Foo()) == 0 )".
# Otherwise not. Note we only check for non-spaces on *both* sides;
# sometimes people put non-spaces on one side when aligning ='s among
# many lines (not that this is behavior that I approve of...)
if Search(r'[\w.]=[\w.]', line) and not Search(r'\b(if|while) ', line):
error(filename, linenum, 'whitespace/operators', 4,
'Missing spaces around =')
# It's ok not to have spaces around binary operators like + - * /, but if
# there's too little whitespace, we get concerned. It's hard to tell,
# though, so we punt on this one for now. TODO.
# You should always have whitespace around binary operators.
#
# Check <= and >= first to avoid false positives with < and >, then
# check non-include lines for spacing around < and >.
match = Search(r'[^<>=!\s](==|!=|<=|>=)[^<>=!\s]', line)
if match:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around %s' % match.group(1))
# We allow no-spaces around << when used like this: 10<<20, but
# not otherwise (particularly, not when used as streams)
# Also ignore using ns::operator<<;
match = Search(r'(operator|\S)(?:L|UL|ULL|l|ul|ull)?<<(\S)', line)
if (match and
not (match.group(1).isdigit() and match.group(2).isdigit()) and
not (match.group(1) == 'operator' and match.group(2) == ';')):
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around <<')
elif not Match(r'#.*include', line):
# Avoid false positives on ->
reduced_line = line.replace('->', '')
# Look for < that is not surrounded by spaces. This is only
# triggered if both sides are missing spaces, even though
# technically should should flag if at least one side is missing a
# space. This is done to avoid some false positives with shifts.
match = Search(r'[^\s<]<([^\s=<].*)', reduced_line)
if (match and
not FindNextMatchingAngleBracket(clean_lines, linenum, match.group(1))):
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around <')
# Look for > that is not surrounded by spaces. Similar to the
# above, we only trigger if both sides are missing spaces to avoid
# false positives with shifts.
match = Search(r'^(.*[^\s>])>[^\s=>]', reduced_line)
if (match and
not FindPreviousMatchingAngleBracket(clean_lines, linenum,
match.group(1))):
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around >')
# We allow no-spaces around >> for almost anything. This is because
# C++11 allows ">>" to close nested templates, which accounts for
# most cases when ">>" is not followed by a space.
#
# We still warn on ">>" followed by alpha character, because that is
# likely due to ">>" being used for right shifts, e.g.:
# value >> alpha
#
# When ">>" is used to close templates, the alphanumeric letter that
# follows would be part of an identifier, and there should still be
# a space separating the template type and the identifier.
# type<type<type>> alpha
match = Search(r'>>[a-zA-Z_]', line)
if match:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around >>')
# There shouldn't be space around unary operators
match = Search(r'(!\s|~\s|[\s]--[\s;]|[\s]\+\+[\s;])', line)
if match:
error(filename, linenum, 'whitespace/operators', 4,
'Extra space for operator %s' % match.group(1))
# A pet peeve of mine: no spaces after an if, while, switch, or for
match = Search(r' (if\(|for\(|while\(|switch\()', line)
if match:
error(filename, linenum, 'whitespace/parens', 5,
'Missing space before ( in %s' % match.group(1))
# For if/for/while/switch, the left and right parens should be
# consistent about how many spaces are inside the parens, and
# there should either be zero or one spaces inside the parens.
# We don't want: "if ( foo)" or "if ( foo )".
# Exception: "for ( ; foo; bar)" and "for (foo; bar; )" are allowed.
match = Search(r'\b(if|for|while|switch)\s*'
r'\(([ ]*)(.).*[^ ]+([ ]*)\)\s*{\s*$',
line)
if match:
if len(match.group(2)) != len(match.group(4)):
if not (match.group(3) == ';' and
len(match.group(2)) == 1 + len(match.group(4)) or
not match.group(2) and Search(r'\bfor\s*\(.*; \)', line)):
error(filename, linenum, 'whitespace/parens', 5,
'Mismatching spaces inside () in %s' % match.group(1))
if len(match.group(2)) not in [0, 1]:
error(filename, linenum, 'whitespace/parens', 5,
'Should have zero or one spaces inside ( and ) in %s' %
match.group(1))
# You should always have a space after a comma (either as fn arg or operator)
#
# This does not apply when the non-space character following the
# comma is another comma, since the only time when that happens is
# for empty macro arguments.
#
# We run this check in two passes: first pass on elided lines to
# verify that lines contain missing whitespaces, second pass on raw
# lines to confirm that those missing whitespaces are not due to
# elided comments.
if Search(r',[^,\s]', line) and Search(r',[^,\s]', raw[linenum]):
error(filename, linenum, 'whitespace/comma', 3,
'Missing space after ,')
# You should always have a space after a semicolon
# except for few corner cases
# TODO(unknown): clarify if 'if (1) { return 1;}' is requires one more
# space after ;
if Search(r';[^\s};\\)/]', line):
error(filename, linenum, 'whitespace/semicolon', 3,
'Missing space after ;')
# Next we will look for issues with function calls.
CheckSpacingForFunctionCall(filename, line, linenum, error)
# Except after an opening paren, or after another opening brace (in case of
# an initializer list, for instance), you should have spaces before your
# braces. And since you should never have braces at the beginning of a line,
# this is an easy test.
match = Match(r'^(.*[^ ({]){', line)
if match:
# Try a bit harder to check for brace initialization. This
# happens in one of the following forms:
# Constructor() : initializer_list_{} { ... }
# Constructor{}.MemberFunction()
# Type variable{};
# FunctionCall(type{}, ...);
# LastArgument(..., type{});
# LOG(INFO) << type{} << " ...";
# map_of_type[{...}] = ...;
#
# We check for the character following the closing brace, and
# silence the warning if it's one of those listed above, i.e.
# "{.;,)<]".
#
# To account for nested initializer list, we allow any number of
# closing braces up to "{;,)<". We can't simply silence the
# warning on first sight of closing brace, because that would
# cause false negatives for things that are not initializer lists.
# Silence this: But not this:
# Outer{ if (...) {
# Inner{...} if (...){ // Missing space before {
# }; }
#
# There is a false negative with this approach if people inserted
# spurious semicolons, e.g. "if (cond){};", but we will catch the
# spurious semicolon with a separate check.
(endline, endlinenum, endpos) = CloseExpression(
clean_lines, linenum, len(match.group(1)))
trailing_text = ''
if endpos > -1:
trailing_text = endline[endpos:]
for offset in xrange(endlinenum + 1,
min(endlinenum + 3, clean_lines.NumLines() - 1)):
trailing_text += clean_lines.elided[offset]
if not Match(r'^[\s}]*[{.;,)<\]]', trailing_text):
error(filename, linenum, 'whitespace/braces', 5,
'Missing space before {')
# Make sure '} else {' has spaces.
if Search(r'}else', line):
error(filename, linenum, 'whitespace/braces', 5,
'Missing space before else')
# You shouldn't have spaces before your brackets, except maybe after
# 'delete []' or 'new char * []'.
if Search(r'\w\s+\[', line) and not Search(r'delete\s+\[', line):
error(filename, linenum, 'whitespace/braces', 5,
'Extra space before [')
# You shouldn't have a space before a semicolon at the end of the line.
# There's a special case for "for" since the style guide allows space before
# the semicolon there.
if Search(r':\s*;\s*$', line):
error(filename, linenum, 'whitespace/semicolon', 5,
'Semicolon defining empty statement. Use {} instead.')
elif Search(r'^\s*;\s*$', line):
error(filename, linenum, 'whitespace/semicolon', 5,
'Line contains only semicolon. If this should be an empty statement, '
'use {} instead.')
elif (Search(r'\s+;\s*$', line) and
not Search(r'\bfor\b', line)):
error(filename, linenum, 'whitespace/semicolon', 5,
'Extra space before last semicolon. If this should be an empty '
'statement, use {} instead.')
# In range-based for, we wanted spaces before and after the colon, but
# not around "::" tokens that might appear.
if (Search('for *\(.*[^:]:[^: ]', line) or
Search('for *\(.*[^: ]:[^:]', line)):
error(filename, linenum, 'whitespace/forcolon', 2,
'Missing space around colon in range-based for loop')
def CheckSectionSpacing(filename, clean_lines, class_info, linenum, error):
"""Checks for additional blank line issues related to sections.
Currently the only thing checked here is blank line before protected/private.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
class_info: A _ClassInfo objects.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Skip checks if the class is small, where small means 25 lines or less.
# 25 lines seems like a good cutoff since that's the usual height of
# terminals, and any class that can't fit in one screen can't really
# be considered "small".
#
# Also skip checks if we are on the first line. This accounts for
# classes that look like
# class Foo { public: ... };
#
# If we didn't find the end of the class, last_line would be zero,
# and the check will be skipped by the first condition.
if (class_info.last_line - class_info.starting_linenum <= 24 or
linenum <= class_info.starting_linenum):
return
matched = Match(r'\s*(public|protected|private):', clean_lines.lines[linenum])
if matched:
# Issue warning if the line before public/protected/private was
# not a blank line, but don't do this if the previous line contains
# "class" or "struct". This can happen two ways:
# - We are at the beginning of the class.
# - We are forward-declaring an inner class that is semantically
# private, but needed to be public for implementation reasons.
# Also ignores cases where the previous line ends with a backslash as can be
# common when defining classes in C macros.
prev_line = clean_lines.lines[linenum - 1]
if (not IsBlankLine(prev_line) and
not Search(r'\b(class|struct)\b', prev_line) and
not Search(r'\\$', prev_line)):
# Try a bit harder to find the beginning of the class. This is to
# account for multi-line base-specifier lists, e.g.:
# class Derived
# : public Base {
end_class_head = class_info.starting_linenum
for i in range(class_info.starting_linenum, linenum):
if Search(r'\{\s*$', clean_lines.lines[i]):
end_class_head = i
break
if end_class_head < linenum - 1:
error(filename, linenum, 'whitespace/blank_line', 3,
'"%s:" should be preceded by a blank line' % matched.group(1))
def GetPreviousNonBlankLine(clean_lines, linenum):
"""Return the most recent non-blank line and its line number.
Args:
clean_lines: A CleansedLines instance containing the file contents.
linenum: The number of the line to check.
Returns:
A tuple with two elements. The first element is the contents of the last
non-blank line before the current line, or the empty string if this is the
first non-blank line. The second is the line number of that line, or -1
if this is the first non-blank line.
"""
prevlinenum = linenum - 1
while prevlinenum >= 0:
prevline = clean_lines.elided[prevlinenum]
if not IsBlankLine(prevline): # if not a blank line...
return (prevline, prevlinenum)
prevlinenum -= 1
return ('', -1)
def CheckBraces(filename, clean_lines, linenum, error):
"""Looks for misplaced braces (e.g. at the end of line).
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum] # get rid of comments and strings
if Match(r'\s*{\s*$', line):
# We allow an open brace to start a line in the case where someone is using
# braces in a block to explicitly create a new scope, which is commonly used
# to control the lifetime of stack-allocated variables. Braces are also
# used for brace initializers inside function calls. We don't detect this
# perfectly: we just don't complain if the last non-whitespace character on
# the previous non-blank line is ',', ';', ':', '(', '{', or '}', or if the
# previous line starts a preprocessor block.
prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
if (not Search(r'[,;:}{(]\s*$', prevline) and
not Match(r'\s*#', prevline)):
error(filename, linenum, 'whitespace/braces', 4,
'{ should almost always be at the end of the previous line')
# An else clause should be on the same line as the preceding closing brace.
if Match(r'\s*else\s*', line):
prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
if Match(r'\s*}\s*$', prevline):
error(filename, linenum, 'whitespace/newline', 4,
'An else should appear on the same line as the preceding }')
# If braces come on one side of an else, they should be on both.
# However, we have to worry about "else if" that spans multiple lines!
if Search(r'}\s*else[^{]*$', line) or Match(r'[^}]*else\s*{', line):
if Search(r'}\s*else if([^{]*)$', line): # could be multi-line if
# find the ( after the if
pos = line.find('else if')
pos = line.find('(', pos)
if pos > 0:
(endline, _, endpos) = CloseExpression(clean_lines, linenum, pos)
if endline[endpos:].find('{') == -1: # must be brace after if
error(filename, linenum, 'readability/braces', 5,
'If an else has a brace on one side, it should have it on both')
else: # common case: else not followed by a multi-line if
error(filename, linenum, 'readability/braces', 5,
'If an else has a brace on one side, it should have it on both')
# Likewise, an else should never have the else clause on the same line
if Search(r'\belse [^\s{]', line) and not Search(r'\belse if\b', line):
error(filename, linenum, 'whitespace/newline', 4,
'Else clause should never be on same line as else (use 2 lines)')
# In the same way, a do/while should never be on one line
if Match(r'\s*do [^\s{]', line):
error(filename, linenum, 'whitespace/newline', 4,
'do/while clauses should not be on a single line')
# Block bodies should not be followed by a semicolon. Due to C++11
# brace initialization, there are more places where semicolons are
# required than not, so we use a whitelist approach to check these
# rather than a blacklist. These are the places where "};" should
# be replaced by just "}":
# 1. Some flavor of block following closing parenthesis:
# for (;;) {};
# while (...) {};
# switch (...) {};
# Function(...) {};
# if (...) {};
# if (...) else if (...) {};
#
# 2. else block:
# if (...) else {};
#
# 3. const member function:
# Function(...) const {};
#
# 4. Block following some statement:
# x = 42;
# {};
#
# 5. Block at the beginning of a function:
# Function(...) {
# {};
# }
#
# Note that naively checking for the preceding "{" will also match
# braces inside multi-dimensional arrays, but this is fine since
# that expression will not contain semicolons.
#
# 6. Block following another block:
# while (true) {}
# {};
#
# 7. End of namespaces:
# namespace {};
#
# These semicolons seems far more common than other kinds of
# redundant semicolons, possibly due to people converting classes
# to namespaces. For now we do not warn for this case.
#
# Try matching case 1 first.
match = Match(r'^(.*\)\s*)\{', line)
if match:
# Matched closing parenthesis (case 1). Check the token before the
# matching opening parenthesis, and don't warn if it looks like a
# macro. This avoids these false positives:
# - macro that defines a base class
# - multi-line macro that defines a base class
# - macro that defines the whole class-head
#
# But we still issue warnings for macros that we know are safe to
# warn, specifically:
# - TEST, TEST_F, TEST_P, MATCHER, MATCHER_P
# - TYPED_TEST
# - INTERFACE_DEF
# - EXCLUSIVE_LOCKS_REQUIRED, SHARED_LOCKS_REQUIRED, LOCKS_EXCLUDED:
#
# We implement a whitelist of safe macros instead of a blacklist of
# unsafe macros, even though the latter appears less frequently in
# google code and would have been easier to implement. This is because
# the downside for getting the whitelist wrong means some extra
# semicolons, while the downside for getting the blacklist wrong
# would result in compile errors.
#
# In addition to macros, we also don't want to warn on compound
# literals.
closing_brace_pos = match.group(1).rfind(')')
opening_parenthesis = ReverseCloseExpression(
clean_lines, linenum, closing_brace_pos)
if opening_parenthesis[2] > -1:
line_prefix = opening_parenthesis[0][0:opening_parenthesis[2]]
macro = Search(r'\b([A-Z_]+)\s*$', line_prefix)
if ((macro and
macro.group(1) not in (
'TEST', 'TEST_F', 'MATCHER', 'MATCHER_P', 'TYPED_TEST',
'EXCLUSIVE_LOCKS_REQUIRED', 'SHARED_LOCKS_REQUIRED',
'LOCKS_EXCLUDED', 'INTERFACE_DEF')) or
Search(r'\s+=\s*$', line_prefix)):
match = None
else:
# Try matching cases 2-3.
match = Match(r'^(.*(?:else|\)\s*const)\s*)\{', line)
if not match:
# Try matching cases 4-6. These are always matched on separate lines.
#
# Note that we can't simply concatenate the previous line to the
# current line and do a single match, otherwise we may output
# duplicate warnings for the blank line case:
# if (cond) {
# // blank line
# }
prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
if prevline and Search(r'[;{}]\s*$', prevline):
match = Match(r'^(\s*)\{', line)
# Check matching closing brace
if match:
(endline, endlinenum, endpos) = CloseExpression(
clean_lines, linenum, len(match.group(1)))
if endpos > -1 and Match(r'^\s*;', endline[endpos:]):
# Current {} pair is eligible for semicolon check, and we have found
# the redundant semicolon, output warning here.
#
# Note: because we are scanning forward for opening braces, and
# outputting warnings for the matching closing brace, if there are
# nested blocks with trailing semicolons, we will get the error
# messages in reversed order.
error(filename, endlinenum, 'readability/braces', 4,
"You don't need a ; after a }")
def CheckEmptyBlockBody(filename, clean_lines, linenum, error):
"""Look for empty loop/conditional body with only a single semicolon.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Search for loop keywords at the beginning of the line. Because only
# whitespaces are allowed before the keywords, this will also ignore most
# do-while-loops, since those lines should start with closing brace.
#
# We also check "if" blocks here, since an empty conditional block
# is likely an error.
line = clean_lines.elided[linenum]
matched = Match(r'\s*(for|while|if)\s*\(', line)
if matched:
# Find the end of the conditional expression
(end_line, end_linenum, end_pos) = CloseExpression(
clean_lines, linenum, line.find('('))
# Output warning if what follows the condition expression is a semicolon.
# No warning for all other cases, including whitespace or newline, since we
# have a separate check for semicolons preceded by whitespace.
if end_pos >= 0 and Match(r';', end_line[end_pos:]):
if matched.group(1) == 'if':
error(filename, end_linenum, 'whitespace/empty_conditional_body', 5,
'Empty conditional bodies should use {}')
else:
error(filename, end_linenum, 'whitespace/empty_loop_body', 5,
'Empty loop bodies should use {} or continue')
def CheckCheck(filename, clean_lines, linenum, error):
"""Checks the use of CHECK and EXPECT macros.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Decide the set of replacement macros that should be suggested
lines = clean_lines.elided
check_macro = None
start_pos = -1
for macro in _CHECK_MACROS:
i = lines[linenum].find(macro)
if i >= 0:
check_macro = macro
# Find opening parenthesis. Do a regular expression match here
# to make sure that we are matching the expected CHECK macro, as
# opposed to some other macro that happens to contain the CHECK
# substring.
matched = Match(r'^(.*\b' + check_macro + r'\s*)\(', lines[linenum])
if not matched:
continue
start_pos = len(matched.group(1))
break
if not check_macro or start_pos < 0:
# Don't waste time here if line doesn't contain 'CHECK' or 'EXPECT'
return
# Find end of the boolean expression by matching parentheses
(last_line, end_line, end_pos) = CloseExpression(
clean_lines, linenum, start_pos)
if end_pos < 0:
return
if linenum == end_line:
expression = lines[linenum][start_pos + 1:end_pos - 1]
else:
expression = lines[linenum][start_pos + 1:]
for i in xrange(linenum + 1, end_line):
expression += lines[i]
expression += last_line[0:end_pos - 1]
# Parse expression so that we can take parentheses into account.
# This avoids false positives for inputs like "CHECK((a < 4) == b)",
# which is not replaceable by CHECK_LE.
lhs = ''
rhs = ''
operator = None
while expression:
matched = Match(r'^\s*(<<|<<=|>>|>>=|->\*|->|&&|\|\||'
r'==|!=|>=|>|<=|<|\()(.*)$', expression)
if matched:
token = matched.group(1)
if token == '(':
# Parenthesized operand
expression = matched.group(2)
(end, _) = FindEndOfExpressionInLine(expression, 0, 1, '(', ')')
if end < 0:
return # Unmatched parenthesis
lhs += '(' + expression[0:end]
expression = expression[end:]
elif token in ('&&', '||'):
# Logical and/or operators. This means the expression
# contains more than one term, for example:
# CHECK(42 < a && a < b);
#
# These are not replaceable with CHECK_LE, so bail out early.
return
elif token in ('<<', '<<=', '>>', '>>=', '->*', '->'):
# Non-relational operator
lhs += token
expression = matched.group(2)
else:
# Relational operator
operator = token
rhs = matched.group(2)
break
else:
# Unparenthesized operand. Instead of appending to lhs one character
# at a time, we do another regular expression match to consume several
# characters at once if possible. Trivial benchmark shows that this
# is more efficient when the operands are longer than a single
# character, which is generally the case.
matched = Match(r'^([^-=!<>()&|]+)(.*)$', expression)
if not matched:
matched = Match(r'^(\s*\S)(.*)$', expression)
if not matched:
break
lhs += matched.group(1)
expression = matched.group(2)
# Only apply checks if we got all parts of the boolean expression
if not (lhs and operator and rhs):
return
# Check that rhs do not contain logical operators. We already know
# that lhs is fine since the loop above parses out && and ||.
if rhs.find('&&') > -1 or rhs.find('||') > -1:
return
# At least one of the operands must be a constant literal. This is
# to avoid suggesting replacements for unprintable things like
# CHECK(variable != iterator)
#
# The following pattern matches decimal, hex integers, strings, and
# characters (in that order).
lhs = lhs.strip()
rhs = rhs.strip()
match_constant = r'^([-+]?(\d+|0[xX][0-9a-fA-F]+)[lLuU]{0,3}|".*"|\'.*\')$'
if Match(match_constant, lhs) or Match(match_constant, rhs):
# Note: since we know both lhs and rhs, we can provide a more
# descriptive error message like:
# Consider using CHECK_EQ(x, 42) instead of CHECK(x == 42)
# Instead of:
# Consider using CHECK_EQ instead of CHECK(a == b)
#
# We are still keeping the less descriptive message because if lhs
# or rhs gets long, the error message might become unreadable.
error(filename, linenum, 'readability/check', 2,
'Consider using %s instead of %s(a %s b)' % (
_CHECK_REPLACEMENT[check_macro][operator],
check_macro, operator))
def CheckAltTokens(filename, clean_lines, linenum, error):
"""Check alternative keywords being used in boolean expressions.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Avoid preprocessor lines
if Match(r'^\s*#', line):
return
# Last ditch effort to avoid multi-line comments. This will not help
# if the comment started before the current line or ended after the
# current line, but it catches most of the false positives. At least,
# it provides a way to workaround this warning for people who use
# multi-line comments in preprocessor macros.
#
# TODO(unknown): remove this once cpplint has better support for
# multi-line comments.
if line.find('/*') >= 0 or line.find('*/') >= 0:
return
for match in _ALT_TOKEN_REPLACEMENT_PATTERN.finditer(line):
error(filename, linenum, 'readability/alt_tokens', 2,
'Use operator %s instead of %s' % (
_ALT_TOKEN_REPLACEMENT[match.group(1)], match.group(1)))
def GetLineWidth(line):
"""Determines the width of the line in column positions.
Args:
line: A string, which may be a Unicode string.
Returns:
The width of the line in column positions, accounting for Unicode
combining characters and wide characters.
"""
if isinstance(line, unicode):
width = 0
for uc in unicodedata.normalize('NFC', line):
if unicodedata.east_asian_width(uc) in ('W', 'F'):
width += 2
elif not unicodedata.combining(uc):
width += 1
return width
else:
return len(line)
def CheckStyle(filename, clean_lines, linenum, file_extension, nesting_state,
error):
"""Checks rules from the 'C++ style rules' section of cppguide.html.
Most of these rules are hard to test (naming, comment style), but we
do what we can. In particular we check for 2-space indents, line lengths,
tab usage, spaces inside code, etc.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
file_extension: The extension (without the dot) of the filename.
nesting_state: A _NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# Don't use "elided" lines here, otherwise we can't check commented lines.
# Don't want to use "raw" either, because we don't want to check inside C++11
# raw strings,
raw_lines = clean_lines.lines_without_raw_strings
line = raw_lines[linenum]
if line.find('\t') != -1:
error(filename, linenum, 'whitespace/tab', 1,
'Tab found; better to use spaces')
# One or three blank spaces at the beginning of the line is weird; it's
# hard to reconcile that with 2-space indents.
# NOTE: here are the conditions rob pike used for his tests. Mine aren't
# as sophisticated, but it may be worth becoming so: RLENGTH==initial_spaces
# if(RLENGTH > 20) complain = 0;
# if(match($0, " +(error|private|public|protected):")) complain = 0;
# if(match(prev, "&& *$")) complain = 0;
# if(match(prev, "\\|\\| *$")) complain = 0;
# if(match(prev, "[\",=><] *$")) complain = 0;
# if(match($0, " <<")) complain = 0;
# if(match(prev, " +for \\(")) complain = 0;
# if(prevodd && match(prevprev, " +for \\(")) complain = 0;
initial_spaces = 0
cleansed_line = clean_lines.elided[linenum]
while initial_spaces < len(line) and line[initial_spaces] == ' ':
initial_spaces += 1
if line and line[-1].isspace():
error(filename, linenum, 'whitespace/end_of_line', 4,
'Line ends in whitespace. Consider deleting these extra spaces.')
# There are certain situations we allow one space, notably for section labels
elif ((initial_spaces == 1 or initial_spaces == 3) and
not Match(r'\s*\w+\s*:\s*$', cleansed_line)):
error(filename, linenum, 'whitespace/indent', 3,
'Weird number of spaces at line-start. '
'Are you using a 2-space indent?')
# Check if the line is a header guard.
is_header_guard = False
if file_extension == 'h':
cppvar = GetHeaderGuardCPPVariable(filename)
if (line.startswith('#ifndef %s' % cppvar) or
line.startswith('#define %s' % cppvar) or
line.startswith('#endif // %s' % cppvar)):
is_header_guard = True
# #include lines and header guards can be long, since there's no clean way to
# split them.
#
# URLs can be long too. It's possible to split these, but it makes them
# harder to cut&paste.
#
# The "$Id:...$" comment may also get very long without it being the
# developers fault.
if (not line.startswith('#include') and not is_header_guard and
not Match(r'^\s*//.*http(s?)://\S*$', line) and
not Match(r'^// \$Id:.*#[0-9]+ \$$', line)):
line_width = GetLineWidth(line)
extended_length = int((_line_length * 1.25))
if line_width > extended_length:
error(filename, linenum, 'whitespace/line_length', 4,
'Lines should very rarely be longer than %i characters' %
extended_length)
elif line_width > _line_length:
error(filename, linenum, 'whitespace/line_length', 2,
'Lines should be <= %i characters long' % _line_length)
if (cleansed_line.count(';') > 1 and
# for loops are allowed two ;'s (and may run over two lines).
cleansed_line.find('for') == -1 and
(GetPreviousNonBlankLine(clean_lines, linenum)[0].find('for') == -1 or
GetPreviousNonBlankLine(clean_lines, linenum)[0].find(';') != -1) and
# It's ok to have many commands in a switch case that fits in 1 line
not ((cleansed_line.find('case ') != -1 or
cleansed_line.find('default:') != -1) and
cleansed_line.find('break;') != -1)):
error(filename, linenum, 'whitespace/newline', 0,
'More than one command on the same line')
# Some more style checks
CheckBraces(filename, clean_lines, linenum, error)
CheckEmptyBlockBody(filename, clean_lines, linenum, error)
CheckAccess(filename, clean_lines, linenum, nesting_state, error)
CheckSpacing(filename, clean_lines, linenum, nesting_state, error)
CheckCheck(filename, clean_lines, linenum, error)
CheckAltTokens(filename, clean_lines, linenum, error)
classinfo = nesting_state.InnermostClass()
if classinfo:
CheckSectionSpacing(filename, clean_lines, classinfo, linenum, error)
_RE_PATTERN_INCLUDE_NEW_STYLE = re.compile(r'#include +"[^/]+\.h"')
_RE_PATTERN_INCLUDE = re.compile(r'^\s*#\s*include\s*([<"])([^>"]*)[>"].*$')
# Matches the first component of a filename delimited by -s and _s. That is:
# _RE_FIRST_COMPONENT.match('foo').group(0) == 'foo'
# _RE_FIRST_COMPONENT.match('foo.cc').group(0) == 'foo'
# _RE_FIRST_COMPONENT.match('foo-bar_baz.cc').group(0) == 'foo'
# _RE_FIRST_COMPONENT.match('foo_bar-baz.cc').group(0) == 'foo'
_RE_FIRST_COMPONENT = re.compile(r'^[^-_.]+')
def _DropCommonSuffixes(filename):
"""Drops common suffixes like _test.cc or -inl.h from filename.
For example:
>>> _DropCommonSuffixes('foo/foo-inl.h')
'foo/foo'
>>> _DropCommonSuffixes('foo/bar/foo.cc')
'foo/bar/foo'
>>> _DropCommonSuffixes('foo/foo_internal.h')
'foo/foo'
>>> _DropCommonSuffixes('foo/foo_unusualinternal.h')
'foo/foo_unusualinternal'
Args:
filename: The input filename.
Returns:
The filename with the common suffix removed.
"""
for suffix in ('test.cc', 'regtest.cc', 'unittest.cc',
'inl.h', 'impl.h', 'internal.h'):
if (filename.endswith(suffix) and len(filename) > len(suffix) and
filename[-len(suffix) - 1] in ('-', '_')):
return filename[:-len(suffix) - 1]
return os.path.splitext(filename)[0]
def _IsTestFilename(filename):
"""Determines if the given filename has a suffix that identifies it as a test.
Args:
filename: The input filename.
Returns:
True if 'filename' looks like a test, False otherwise.
"""
if (filename.endswith('_test.cc') or
filename.endswith('_unittest.cc') or
filename.endswith('_regtest.cc')):
return True
else:
return False
def _ClassifyInclude(fileinfo, include, is_system):
"""Figures out what kind of header 'include' is.
Args:
fileinfo: The current file cpplint is running over. A FileInfo instance.
include: The path to a #included file.
is_system: True if the #include used <> rather than "".
Returns:
One of the _XXX_HEADER constants.
For example:
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'stdio.h', True)
_C_SYS_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'string', True)
_CPP_SYS_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/foo.h', False)
_LIKELY_MY_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo_unknown_extension.cc'),
... 'bar/foo_other_ext.h', False)
_POSSIBLE_MY_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/bar.h', False)
_OTHER_HEADER
"""
# This is a list of all standard c++ header files, except
# those already checked for above.
is_cpp_h = include in _CPP_HEADERS
if is_system:
if is_cpp_h:
return _CPP_SYS_HEADER
else:
return _C_SYS_HEADER
# If the target file and the include we're checking share a
# basename when we drop common extensions, and the include
# lives in . , then it's likely to be owned by the target file.
target_dir, target_base = (
os.path.split(_DropCommonSuffixes(fileinfo.RepositoryName())))
include_dir, include_base = os.path.split(_DropCommonSuffixes(include))
if target_base == include_base and (
include_dir == target_dir or
include_dir == os.path.normpath(target_dir + '/../public')):
return _LIKELY_MY_HEADER
# If the target and include share some initial basename
# component, it's possible the target is implementing the
# include, so it's allowed to be first, but we'll never
# complain if it's not there.
target_first_component = _RE_FIRST_COMPONENT.match(target_base)
include_first_component = _RE_FIRST_COMPONENT.match(include_base)
if (target_first_component and include_first_component and
target_first_component.group(0) ==
include_first_component.group(0)):
return _POSSIBLE_MY_HEADER
return _OTHER_HEADER
def CheckIncludeLine(filename, clean_lines, linenum, include_state, error):
"""Check rules that are applicable to #include lines.
Strings on #include lines are NOT removed from elided line, to make
certain tasks easier. However, to prevent false positives, checks
applicable to #include lines in CheckLanguage must be put here.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
include_state: An _IncludeState instance in which the headers are inserted.
error: The function to call with any errors found.
"""
fileinfo = FileInfo(filename)
line = clean_lines.lines[linenum]
# "include" should use the new style "foo/bar.h" instead of just "bar.h"
if _RE_PATTERN_INCLUDE_NEW_STYLE.search(line):
error(filename, linenum, 'build/include', 4,
'Include the directory when naming .h files')
# we shouldn't include a file more than once. actually, there are a
# handful of instances where doing so is okay, but in general it's
# not.
match = _RE_PATTERN_INCLUDE.search(line)
if match:
include = match.group(2)
is_system = (match.group(1) == '<')
if include in include_state:
error(filename, linenum, 'build/include', 4,
'"%s" already included at %s:%s' %
(include, filename, include_state[include]))
else:
include_state[include] = linenum
# We want to ensure that headers appear in the right order:
# 1) for foo.cc, foo.h (preferred location)
# 2) c system files
# 3) cpp system files
# 4) for foo.cc, foo.h (deprecated location)
# 5) other google headers
#
# We classify each include statement as one of those 5 types
# using a number of techniques. The include_state object keeps
# track of the highest type seen, and complains if we see a
# lower type after that.
error_message = include_state.CheckNextIncludeOrder(
_ClassifyInclude(fileinfo, include, is_system))
if error_message:
error(filename, linenum, 'build/include_order', 4,
'%s. Should be: %s.h, c system, c++ system, other.' %
(error_message, fileinfo.BaseName()))
canonical_include = include_state.CanonicalizeAlphabeticalOrder(include)
if not include_state.IsInAlphabeticalOrder(
clean_lines, linenum, canonical_include):
error(filename, linenum, 'build/include_alpha', 4,
'Include "%s" not in alphabetical order' % include)
include_state.SetLastHeader(canonical_include)
# Look for any of the stream classes that are part of standard C++.
match = _RE_PATTERN_INCLUDE.match(line)
if match:
include = match.group(2)
if Match(r'(f|ind|io|i|o|parse|pf|stdio|str|)?stream$', include):
# Many unit tests use cout, so we exempt them.
if not _IsTestFilename(filename):
error(filename, linenum, 'readability/streams', 3,
'Streams are highly discouraged.')
def _GetTextInside(text, start_pattern):
r"""Retrieves all the text between matching open and close parentheses.
Given a string of lines and a regular expression string, retrieve all the text
following the expression and between opening punctuation symbols like
(, [, or {, and the matching close-punctuation symbol. This properly nested
occurrences of the punctuations, so for the text like
printf(a(), b(c()));
a call to _GetTextInside(text, r'printf\(') will return 'a(), b(c())'.
start_pattern must match string having an open punctuation symbol at the end.
Args:
text: The lines to extract text. Its comments and strings must be elided.
It can be single line and can span multiple lines.
start_pattern: The regexp string indicating where to start extracting
the text.
Returns:
The extracted text.
None if either the opening string or ending punctuation could not be found.
"""
# TODO(sugawarayu): Audit cpplint.py to see what places could be profitably
# rewritten to use _GetTextInside (and use inferior regexp matching today).
# Give opening punctuations to get the matching close-punctuations.
matching_punctuation = {'(': ')', '{': '}', '[': ']'}
closing_punctuation = set(matching_punctuation.itervalues())
# Find the position to start extracting text.
match = re.search(start_pattern, text, re.M)
if not match: # start_pattern not found in text.
return None
start_position = match.end(0)
assert start_position > 0, (
'start_pattern must ends with an opening punctuation.')
assert text[start_position - 1] in matching_punctuation, (
'start_pattern must ends with an opening punctuation.')
# Stack of closing punctuations we expect to have in text after position.
punctuation_stack = [matching_punctuation[text[start_position - 1]]]
position = start_position
while punctuation_stack and position < len(text):
if text[position] == punctuation_stack[-1]:
punctuation_stack.pop()
elif text[position] in closing_punctuation:
# A closing punctuation without matching opening punctuations.
return None
elif text[position] in matching_punctuation:
punctuation_stack.append(matching_punctuation[text[position]])
position += 1
if punctuation_stack:
# Opening punctuations left without matching close-punctuations.
return None
# punctuations match.
return text[start_position:position - 1]
# Patterns for matching call-by-reference parameters.
#
# Supports nested templates up to 2 levels deep using this messy pattern:
# < (?: < (?: < [^<>]*
# >
# | [^<>] )*
# >
# | [^<>] )*
# >
_RE_PATTERN_IDENT = r'[_a-zA-Z]\w*' # =~ [[:alpha:]][[:alnum:]]*
_RE_PATTERN_TYPE = (
r'(?:const\s+)?(?:typename\s+|class\s+|struct\s+|union\s+|enum\s+)?'
r'(?:\w|'
r'\s*<(?:<(?:<[^<>]*>|[^<>])*>|[^<>])*>|'
r'::)+')
# A call-by-reference parameter ends with '& identifier'.
_RE_PATTERN_REF_PARAM = re.compile(
r'(' + _RE_PATTERN_TYPE + r'(?:\s*(?:\bconst\b|[*]))*\s*'
r'&\s*' + _RE_PATTERN_IDENT + r')\s*(?:=[^,()]+)?[,)]')
# A call-by-const-reference parameter either ends with 'const& identifier'
# or looks like 'const type& identifier' when 'type' is atomic.
_RE_PATTERN_CONST_REF_PARAM = (
r'(?:.*\s*\bconst\s*&\s*' + _RE_PATTERN_IDENT +
r'|const\s+' + _RE_PATTERN_TYPE + r'\s*&\s*' + _RE_PATTERN_IDENT + r')')
def CheckLanguage(filename, clean_lines, linenum, file_extension,
include_state, nesting_state, error):
"""Checks rules from the 'C++ language rules' section of cppguide.html.
Some of these rules are hard to test (function overloading, using
uint32 inappropriately), but we do the best we can.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
file_extension: The extension (without the dot) of the filename.
include_state: An _IncludeState instance in which the headers are inserted.
nesting_state: A _NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# If the line is empty or consists of entirely a comment, no need to
# check it.
line = clean_lines.elided[linenum]
if not line:
return
match = _RE_PATTERN_INCLUDE.search(line)
if match:
CheckIncludeLine(filename, clean_lines, linenum, include_state, error)
return
# Reset include state across preprocessor directives. This is meant
# to silence warnings for conditional includes.
if Match(r'^\s*#\s*(?:ifdef|elif|else|endif)\b', line):
include_state.ResetSection()
# Make Windows paths like Unix.
fullname = os.path.abspath(filename).replace('\\', '/')
# TODO(unknown): figure out if they're using default arguments in fn proto.
# Check to see if they're using an conversion function cast.
# I just try to capture the most common basic types, though there are more.
# Parameterless conversion functions, such as bool(), are allowed as they are
# probably a member operator declaration or default constructor.
match = Search(
r'(\bnew\s+)?\b' # Grab 'new' operator, if it's there
r'(int|float|double|bool|char|int32|uint32|int64|uint64)'
r'(\([^)].*)', line)
if match:
matched_new = match.group(1)
matched_type = match.group(2)
matched_funcptr = match.group(3)
# gMock methods are defined using some variant of MOCK_METHODx(name, type)
# where type may be float(), int(string), etc. Without context they are
# virtually indistinguishable from int(x) casts. Likewise, gMock's
# MockCallback takes a template parameter of the form return_type(arg_type),
# which looks much like the cast we're trying to detect.
#
# std::function<> wrapper has a similar problem.
#
# Return types for function pointers also look like casts if they
# don't have an extra space.
if (matched_new is None and # If new operator, then this isn't a cast
not (Match(r'^\s*MOCK_(CONST_)?METHOD\d+(_T)?\(', line) or
Search(r'\bMockCallback<.*>', line) or
Search(r'\bstd::function<.*>', line)) and
not (matched_funcptr and
Match(r'\((?:[^() ]+::\s*\*\s*)?[^() ]+\)\s*\(',
matched_funcptr))):
# Try a bit harder to catch gmock lines: the only place where
# something looks like an old-style cast is where we declare the
# return type of the mocked method, and the only time when we
# are missing context is if MOCK_METHOD was split across
# multiple lines. The missing MOCK_METHOD is usually one or two
# lines back, so scan back one or two lines.
#
# It's not possible for gmock macros to appear in the first 2
# lines, since the class head + section name takes up 2 lines.
if (linenum < 2 or
not (Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\((?:\S+,)?\s*$',
clean_lines.elided[linenum - 1]) or
Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\(\s*$',
clean_lines.elided[linenum - 2]))):
error(filename, linenum, 'readability/casting', 4,
'Using deprecated casting style. '
'Use static_cast<%s>(...) instead' %
matched_type)
CheckCStyleCast(filename, linenum, line, clean_lines.raw_lines[linenum],
'static_cast',
r'\((int|float|double|bool|char|u?int(16|32|64))\)', error)
# This doesn't catch all cases. Consider (const char * const)"hello".
#
# (char *) "foo" should always be a const_cast (reinterpret_cast won't
# compile).
if CheckCStyleCast(filename, linenum, line, clean_lines.raw_lines[linenum],
'const_cast', r'\((char\s?\*+\s?)\)\s*"', error):
pass
else:
# Check pointer casts for other than string constants
CheckCStyleCast(filename, linenum, line, clean_lines.raw_lines[linenum],
'reinterpret_cast', r'\((\w+\s?\*+\s?)\)', error)
# In addition, we look for people taking the address of a cast. This
# is dangerous -- casts can assign to temporaries, so the pointer doesn't
# point where you think.
match = Search(
r'(?:&\(([^)]+)\)[\w(])|'
r'(?:&(static|dynamic|down|reinterpret)_cast\b)', line)
if match and match.group(1) != '*':
error(filename, linenum, 'runtime/casting', 4,
('Are you taking an address of a cast? '
'This is dangerous: could be a temp var. '
'Take the address before doing the cast, rather than after'))
# Create an extended_line, which is the concatenation of the current and
# next lines, for more effective checking of code that may span more than one
# line.
if linenum + 1 < clean_lines.NumLines():
extended_line = line + clean_lines.elided[linenum + 1]
else:
extended_line = line
# Check for people declaring static/global STL strings at the top level.
# This is dangerous because the C++ language does not guarantee that
# globals with constructors are initialized before the first access.
match = Match(
r'((?:|static +)(?:|const +))string +([a-zA-Z0-9_:]+)\b(.*)',
line)
# Make sure it's not a function.
# Function template specialization looks like: "string foo<Type>(...".
# Class template definitions look like: "string Foo<Type>::Method(...".
#
# Also ignore things that look like operators. These are matched separately
# because operator names cross non-word boundaries. If we change the pattern
# above, we would decrease the accuracy of matching identifiers.
if (match and
not Search(r'\boperator\W', line) and
not Match(r'\s*(<.*>)?(::[a-zA-Z0-9_]+)?\s*\(([^"]|$)', match.group(3))):
error(filename, linenum, 'runtime/string', 4,
'For a static/global string constant, use a C style string instead: '
'"%schar %s[]".' %
(match.group(1), match.group(2)))
if Search(r'\b([A-Za-z0-9_]*_)\(\1\)', line):
error(filename, linenum, 'runtime/init', 4,
'You seem to be initializing a member variable with itself.')
if file_extension == 'h':
# TODO(unknown): check that 1-arg constructors are explicit.
# How to tell it's a constructor?
# (handled in CheckForNonStandardConstructs for now)
# TODO(unknown): check that classes have DISALLOW_EVIL_CONSTRUCTORS
# (level 1 error)
pass
# Check if people are using the verboten C basic types. The only exception
# we regularly allow is "unsigned short port" for port.
if Search(r'\bshort port\b', line):
if not Search(r'\bunsigned short port\b', line):
error(filename, linenum, 'runtime/int', 4,
'Use "unsigned short" for ports, not "short"')
else:
match = Search(r'\b(short|long(?! +double)|long long)\b', line)
if match:
error(filename, linenum, 'runtime/int', 4,
'Use int16/int64/etc, rather than the C type %s' % match.group(1))
# When snprintf is used, the second argument shouldn't be a literal.
match = Search(r'snprintf\s*\(([^,]*),\s*([0-9]*)\s*,', line)
if match and match.group(2) != '0':
# If 2nd arg is zero, snprintf is used to calculate size.
error(filename, linenum, 'runtime/printf', 3,
'If you can, use sizeof(%s) instead of %s as the 2nd arg '
'to snprintf.' % (match.group(1), match.group(2)))
# Check if some verboten C functions are being used.
if Search(r'\bsprintf\b', line):
error(filename, linenum, 'runtime/printf', 5,
'Never use sprintf. Use snprintf instead.')
match = Search(r'\b(strcpy|strcat)\b', line)
if match:
error(filename, linenum, 'runtime/printf', 4,
'Almost always, snprintf is better than %s' % match.group(1))
# Check if some verboten operator overloading is going on
# TODO(unknown): catch out-of-line unary operator&:
# class X {};
# int operator&(const X& x) { return 42; } // unary operator&
# The trick is it's hard to tell apart from binary operator&:
# class Y { int operator&(const Y& x) { return 23; } }; // binary operator&
if Search(r'\boperator\s*&\s*\(\s*\)', line):
error(filename, linenum, 'runtime/operator', 4,
'Unary operator& is dangerous. Do not use it.')
# Check for suspicious usage of "if" like
# } if (a == b) {
if Search(r'\}\s*if\s*\(', line):
error(filename, linenum, 'readability/braces', 4,
'Did you mean "else if"? If not, start a new line for "if".')
# Check for potential format string bugs like printf(foo).
# We constrain the pattern not to pick things like DocidForPrintf(foo).
# Not perfect but it can catch printf(foo.c_str()) and printf(foo->c_str())
# TODO(sugawarayu): Catch the following case. Need to change the calling
# convention of the whole function to process multiple line to handle it.
# printf(
# boy_this_is_a_really_long_variable_that_cannot_fit_on_the_prev_line);
printf_args = _GetTextInside(line, r'(?i)\b(string)?printf\s*\(')
if printf_args:
match = Match(r'([\w.\->()]+)$', printf_args)
if match and match.group(1) != '__VA_ARGS__':
function_name = re.search(r'\b((?:string)?printf)\s*\(',
line, re.I).group(1)
error(filename, linenum, 'runtime/printf', 4,
'Potential format string bug. Do %s("%%s", %s) instead.'
% (function_name, match.group(1)))
# Check for potential memset bugs like memset(buf, sizeof(buf), 0).
match = Search(r'memset\s*\(([^,]*),\s*([^,]*),\s*0\s*\)', line)
if match and not Match(r"^''|-?[0-9]+|0x[0-9A-Fa-f]$", match.group(2)):
error(filename, linenum, 'runtime/memset', 4,
'Did you mean "memset(%s, 0, %s)"?'
% (match.group(1), match.group(2)))
if Search(r'\busing namespace\b', line):
error(filename, linenum, 'build/namespaces', 5,
'Do not use namespace using-directives. '
'Use using-declarations instead.')
# Detect variable-length arrays.
match = Match(r'\s*(.+::)?(\w+) [a-z]\w*\[(.+)];', line)
if (match and match.group(2) != 'return' and match.group(2) != 'delete' and
match.group(3).find(']') == -1):
# Split the size using space and arithmetic operators as delimiters.
# If any of the resulting tokens are not compile time constants then
# report the error.
tokens = re.split(r'\s|\+|\-|\*|\/|<<|>>]', match.group(3))
is_const = True
skip_next = False
for tok in tokens:
if skip_next:
skip_next = False
continue
if Search(r'sizeof\(.+\)', tok): continue
if Search(r'arraysize\(\w+\)', tok): continue
tok = tok.lstrip('(')
tok = tok.rstrip(')')
if not tok: continue
if Match(r'\d+', tok): continue
if Match(r'0[xX][0-9a-fA-F]+', tok): continue
if Match(r'k[A-Z0-9]\w*', tok): continue
if Match(r'(.+::)?k[A-Z0-9]\w*', tok): continue
if Match(r'(.+::)?[A-Z][A-Z0-9_]*', tok): continue
# A catch all for tricky sizeof cases, including 'sizeof expression',
# 'sizeof(*type)', 'sizeof(const type)', 'sizeof(struct StructName)'
# requires skipping the next token because we split on ' ' and '*'.
if tok.startswith('sizeof'):
skip_next = True
continue
is_const = False
break
if not is_const:
error(filename, linenum, 'runtime/arrays', 1,
'Do not use variable-length arrays. Use an appropriately named '
"('k' followed by CamelCase) compile-time constant for the size.")
# If DISALLOW_EVIL_CONSTRUCTORS, DISALLOW_COPY_AND_ASSIGN, or
# DISALLOW_IMPLICIT_CONSTRUCTORS is present, then it should be the last thing
# in the class declaration.
match = Match(
(r'\s*'
r'(DISALLOW_(EVIL_CONSTRUCTORS|COPY_AND_ASSIGN|IMPLICIT_CONSTRUCTORS))'
r'\(.*\);$'),
line)
if match and linenum + 1 < clean_lines.NumLines():
next_line = clean_lines.elided[linenum + 1]
# We allow some, but not all, declarations of variables to be present
# in the statement that defines the class. The [\w\*,\s]* fragment of
# the regular expression below allows users to declare instances of
# the class or pointers to instances, but not less common types such
# as function pointers or arrays. It's a tradeoff between allowing
# reasonable code and avoiding trying to parse more C++ using regexps.
if not Search(r'^\s*}[\w\*,\s]*;', next_line):
error(filename, linenum, 'readability/constructors', 3,
match.group(1) + ' should be the last thing in the class')
# Check for use of unnamed namespaces in header files. Registration
# macros are typically OK, so we allow use of "namespace {" on lines
# that end with backslashes.
if (file_extension == 'h'
and Search(r'\bnamespace\s*{', line)
and line[-1] != '\\'):
error(filename, linenum, 'build/namespaces', 4,
'Do not use unnamed namespaces in header files. See '
'http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Namespaces'
' for more information.')
def CheckForNonConstReference(filename, clean_lines, linenum,
nesting_state, error):
"""Check for non-const references.
Separate from CheckLanguage since it scans backwards from current
line, instead of scanning forward.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A _NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# Do nothing if there is no '&' on current line.
line = clean_lines.elided[linenum]
if '&' not in line:
return
# Long type names may be broken across multiple lines, usually in one
# of these forms:
# LongType
# ::LongTypeContinued &identifier
# LongType::
# LongTypeContinued &identifier
# LongType<
# ...>::LongTypeContinued &identifier
#
# If we detected a type split across two lines, join the previous
# line to current line so that we can match const references
# accordingly.
#
# Note that this only scans back one line, since scanning back
# arbitrary number of lines would be expensive. If you have a type
# that spans more than 2 lines, please use a typedef.
if linenum > 1:
previous = None
if Match(r'\s*::(?:[\w<>]|::)+\s*&\s*\S', line):
# previous_line\n + ::current_line
previous = Search(r'\b((?:const\s*)?(?:[\w<>]|::)+[\w<>])\s*$',
clean_lines.elided[linenum - 1])
elif Match(r'\s*[a-zA-Z_]([\w<>]|::)+\s*&\s*\S', line):
# previous_line::\n + current_line
previous = Search(r'\b((?:const\s*)?(?:[\w<>]|::)+::)\s*$',
clean_lines.elided[linenum - 1])
if previous:
line = previous.group(1) + line.lstrip()
else:
# Check for templated parameter that is split across multiple lines
endpos = line.rfind('>')
if endpos > -1:
(_, startline, startpos) = ReverseCloseExpression(
clean_lines, linenum, endpos)
if startpos > -1 and startline < linenum:
# Found the matching < on an earlier line, collect all
# pieces up to current line.
line = ''
for i in xrange(startline, linenum + 1):
line += clean_lines.elided[i].strip()
# Check for non-const references in function parameters. A single '&' may
# found in the following places:
# inside expression: binary & for bitwise AND
# inside expression: unary & for taking the address of something
# inside declarators: reference parameter
# We will exclude the first two cases by checking that we are not inside a
# function body, including one that was just introduced by a trailing '{'.
# TODO(unknwon): Doesn't account for preprocessor directives.
# TODO(unknown): Doesn't account for 'catch(Exception& e)' [rare].
check_params = False
if not nesting_state.stack:
check_params = True # top level
elif (isinstance(nesting_state.stack[-1], _ClassInfo) or
isinstance(nesting_state.stack[-1], _NamespaceInfo)):
check_params = True # within class or namespace
elif Match(r'.*{\s*$', line):
if (len(nesting_state.stack) == 1 or
isinstance(nesting_state.stack[-2], _ClassInfo) or
isinstance(nesting_state.stack[-2], _NamespaceInfo)):
check_params = True # just opened global/class/namespace block
# We allow non-const references in a few standard places, like functions
# called "swap()" or iostream operators like "<<" or ">>". Do not check
# those function parameters.
#
# We also accept & in static_assert, which looks like a function but
# it's actually a declaration expression.
whitelisted_functions = (r'(?:[sS]wap(?:<\w:+>)?|'
r'operator\s*[<>][<>]|'
r'static_assert|COMPILE_ASSERT'
r')\s*\(')
if Search(whitelisted_functions, line):
check_params = False
elif not Search(r'\S+\([^)]*$', line):
# Don't see a whitelisted function on this line. Actually we
# didn't see any function name on this line, so this is likely a
# multi-line parameter list. Try a bit harder to catch this case.
for i in xrange(2):
if (linenum > i and
Search(whitelisted_functions, clean_lines.elided[linenum - i - 1])):
check_params = False
break
if check_params:
decls = ReplaceAll(r'{[^}]*}', ' ', line) # exclude function body
for parameter in re.findall(_RE_PATTERN_REF_PARAM, decls):
if not Match(_RE_PATTERN_CONST_REF_PARAM, parameter):
error(filename, linenum, 'runtime/references', 2,
'Is this a non-const reference? '
'If so, make const or use a pointer: ' +
ReplaceAll(' *<', '<', parameter))
def CheckCStyleCast(filename, linenum, line, raw_line, cast_type, pattern,
error):
"""Checks for a C-style cast by looking for the pattern.
Args:
filename: The name of the current file.
linenum: The number of the line to check.
line: The line of code to check.
raw_line: The raw line of code to check, with comments.
cast_type: The string for the C++ cast to recommend. This is either
reinterpret_cast, static_cast, or const_cast, depending.
pattern: The regular expression used to find C-style casts.
error: The function to call with any errors found.
Returns:
True if an error was emitted.
False otherwise.
"""
match = Search(pattern, line)
if not match:
return False
# e.g., sizeof(int)
sizeof_match = Match(r'.*sizeof\s*$', line[0:match.start(1) - 1])
if sizeof_match:
error(filename, linenum, 'runtime/sizeof', 1,
'Using sizeof(type). Use sizeof(varname) instead if possible')
return True
# operator++(int) and operator--(int)
if (line[0:match.start(1) - 1].endswith(' operator++') or
line[0:match.start(1) - 1].endswith(' operator--')):
return False
# A single unnamed argument for a function tends to look like old
# style cast. If we see those, don't issue warnings for deprecated
# casts, instead issue warnings for unnamed arguments where
# appropriate.
#
# These are things that we want warnings for, since the style guide
# explicitly require all parameters to be named:
# Function(int);
# Function(int) {
# ConstMember(int) const;
# ConstMember(int) const {
# ExceptionMember(int) throw (...);
# ExceptionMember(int) throw (...) {
# PureVirtual(int) = 0;
#
# These are functions of some sort, where the compiler would be fine
# if they had named parameters, but people often omit those
# identifiers to reduce clutter:
# (FunctionPointer)(int);
# (FunctionPointer)(int) = value;
# Function((function_pointer_arg)(int))
# <TemplateArgument(int)>;
# <(FunctionPointerTemplateArgument)(int)>;
remainder = line[match.end(0):]
if Match(r'^\s*(?:;|const\b|throw\b|=|>|\{|\))', remainder):
# Looks like an unnamed parameter.
# Don't warn on any kind of template arguments.
if Match(r'^\s*>', remainder):
return False
# Don't warn on assignments to function pointers, but keep warnings for
# unnamed parameters to pure virtual functions. Note that this pattern
# will also pass on assignments of "0" to function pointers, but the
# preferred values for those would be "nullptr" or "NULL".
matched_zero = Match(r'^\s=\s*(\S+)\s*;', remainder)
if matched_zero and matched_zero.group(1) != '0':
return False
# Don't warn on function pointer declarations. For this we need
# to check what came before the "(type)" string.
if Match(r'.*\)\s*$', line[0:match.start(0)]):
return False
# Don't warn if the parameter is named with block comments, e.g.:
# Function(int /*unused_param*/);
if '/*' in raw_line:
return False
# Passed all filters, issue warning here.
error(filename, linenum, 'readability/function', 3,
'All parameters should be named in a function')
return True
# At this point, all that should be left is actual casts.
error(filename, linenum, 'readability/casting', 4,
'Using C-style cast. Use %s<%s>(...) instead' %
(cast_type, match.group(1)))
return True
_HEADERS_CONTAINING_TEMPLATES = (
('<deque>', ('deque',)),
('<functional>', ('unary_function', 'binary_function',
'plus', 'minus', 'multiplies', 'divides', 'modulus',
'negate',
'equal_to', 'not_equal_to', 'greater', 'less',
'greater_equal', 'less_equal',
'logical_and', 'logical_or', 'logical_not',
'unary_negate', 'not1', 'binary_negate', 'not2',
'bind1st', 'bind2nd',
'pointer_to_unary_function',
'pointer_to_binary_function',
'ptr_fun',
'mem_fun_t', 'mem_fun', 'mem_fun1_t', 'mem_fun1_ref_t',
'mem_fun_ref_t',
'const_mem_fun_t', 'const_mem_fun1_t',
'const_mem_fun_ref_t', 'const_mem_fun1_ref_t',
'mem_fun_ref',
)),
('<limits>', ('numeric_limits',)),
('<list>', ('list',)),
('<map>', ('map', 'multimap',)),
('<memory>', ('allocator',)),
('<queue>', ('queue', 'priority_queue',)),
('<set>', ('set', 'multiset',)),
('<stack>', ('stack',)),
('<string>', ('char_traits', 'basic_string',)),
('<utility>', ('pair',)),
('<vector>', ('vector',)),
# gcc extensions.
# Note: std::hash is their hash, ::hash is our hash
('<hash_map>', ('hash_map', 'hash_multimap',)),
('<hash_set>', ('hash_set', 'hash_multiset',)),
('<slist>', ('slist',)),
)
_RE_PATTERN_STRING = re.compile(r'\bstring\b')
_re_pattern_algorithm_header = []
for _template in ('copy', 'max', 'min', 'min_element', 'sort', 'swap',
'transform'):
# Match max<type>(..., ...), max(..., ...), but not foo->max, foo.max or
# type::max().
_re_pattern_algorithm_header.append(
(re.compile(r'[^>.]\b' + _template + r'(<.*?>)?\([^\)]'),
_template,
'<algorithm>'))
_re_pattern_templates = []
for _header, _templates in _HEADERS_CONTAINING_TEMPLATES:
for _template in _templates:
_re_pattern_templates.append(
(re.compile(r'(\<|\b)' + _template + r'\s*\<'),
_template + '<>',
_header))
def FilesBelongToSameModule(filename_cc, filename_h):
"""Check if these two filenames belong to the same module.
The concept of a 'module' here is a as follows:
foo.h, foo-inl.h, foo.cc, foo_test.cc and foo_unittest.cc belong to the
same 'module' if they are in the same directory.
some/path/public/xyzzy and some/path/internal/xyzzy are also considered
to belong to the same module here.
If the filename_cc contains a longer path than the filename_h, for example,
'/absolute/path/to/base/sysinfo.cc', and this file would include
'base/sysinfo.h', this function also produces the prefix needed to open the
header. This is used by the caller of this function to more robustly open the
header file. We don't have access to the real include paths in this context,
so we need this guesswork here.
Known bugs: tools/base/bar.cc and base/bar.h belong to the same module
according to this implementation. Because of this, this function gives
some false positives. This should be sufficiently rare in practice.
Args:
filename_cc: is the path for the .cc file
filename_h: is the path for the header path
Returns:
Tuple with a bool and a string:
bool: True if filename_cc and filename_h belong to the same module.
string: the additional prefix needed to open the header file.
"""
if not filename_cc.endswith('.cc'):
return (False, '')
filename_cc = filename_cc[:-len('.cc')]
if filename_cc.endswith('_unittest'):
filename_cc = filename_cc[:-len('_unittest')]
elif filename_cc.endswith('_test'):
filename_cc = filename_cc[:-len('_test')]
filename_cc = filename_cc.replace('/public/', '/')
filename_cc = filename_cc.replace('/internal/', '/')
if not filename_h.endswith('.h'):
return (False, '')
filename_h = filename_h[:-len('.h')]
if filename_h.endswith('-inl'):
filename_h = filename_h[:-len('-inl')]
filename_h = filename_h.replace('/public/', '/')
filename_h = filename_h.replace('/internal/', '/')
files_belong_to_same_module = filename_cc.endswith(filename_h)
common_path = ''
if files_belong_to_same_module:
common_path = filename_cc[:-len(filename_h)]
return files_belong_to_same_module, common_path
def UpdateIncludeState(filename, include_state, io=codecs):
"""Fill up the include_state with new includes found from the file.
Args:
filename: the name of the header to read.
include_state: an _IncludeState instance in which the headers are inserted.
io: The io factory to use to read the file. Provided for testability.
Returns:
True if a header was succesfully added. False otherwise.
"""
headerfile = None
try:
headerfile = io.open(filename, 'r', 'utf8', 'replace')
except IOError:
return False
linenum = 0
for line in headerfile:
linenum += 1
clean_line = CleanseComments(line)
match = _RE_PATTERN_INCLUDE.search(clean_line)
if match:
include = match.group(2)
# The value formatting is cute, but not really used right now.
# What matters here is that the key is in include_state.
include_state.setdefault(include, '%s:%d' % (filename, linenum))
return True
def CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error,
io=codecs):
"""Reports for missing stl includes.
This function will output warnings to make sure you are including the headers
necessary for the stl containers and functions that you use. We only give one
reason to include a header. For example, if you use both equal_to<> and
less<> in a .h file, only one (the latter in the file) of these will be
reported as a reason to include the <functional>.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
include_state: An _IncludeState instance.
error: The function to call with any errors found.
io: The IO factory to use to read the header file. Provided for unittest
injection.
"""
required = {} # A map of header name to linenumber and the template entity.
# Example of required: { '<functional>': (1219, 'less<>') }
for linenum in xrange(clean_lines.NumLines()):
line = clean_lines.elided[linenum]
if not line or line[0] == '#':
continue
# String is special -- it is a non-templatized type in STL.
matched = _RE_PATTERN_STRING.search(line)
if matched:
# Don't warn about strings in non-STL namespaces:
# (We check only the first match per line; good enough.)
prefix = line[:matched.start()]
if prefix.endswith('std::') or not prefix.endswith('::'):
required['<string>'] = (linenum, 'string')
for pattern, template, header in _re_pattern_algorithm_header:
if pattern.search(line):
required[header] = (linenum, template)
# The following function is just a speed up, no semantics are changed.
if not '<' in line: # Reduces the cpu time usage by skipping lines.
continue
for pattern, template, header in _re_pattern_templates:
if pattern.search(line):
required[header] = (linenum, template)
# The policy is that if you #include something in foo.h you don't need to
# include it again in foo.cc. Here, we will look at possible includes.
# Let's copy the include_state so it is only messed up within this function.
include_state = include_state.copy()
# Did we find the header for this file (if any) and succesfully load it?
header_found = False
# Use the absolute path so that matching works properly.
abs_filename = FileInfo(filename).FullName()
# For Emacs's flymake.
# If cpplint is invoked from Emacs's flymake, a temporary file is generated
# by flymake and that file name might end with '_flymake.cc'. In that case,
# restore original file name here so that the corresponding header file can be
# found.
# e.g. If the file name is 'foo_flymake.cc', we should search for 'foo.h'
# instead of 'foo_flymake.h'
abs_filename = re.sub(r'_flymake\.cc$', '.cc', abs_filename)
# include_state is modified during iteration, so we iterate over a copy of
# the keys.
header_keys = include_state.keys()
for header in header_keys:
(same_module, common_path) = FilesBelongToSameModule(abs_filename, header)
fullpath = common_path + header
if same_module and UpdateIncludeState(fullpath, include_state, io):
header_found = True
# If we can't find the header file for a .cc, assume it's because we don't
# know where to look. In that case we'll give up as we're not sure they
# didn't include it in the .h file.
# TODO(unknown): Do a better job of finding .h files so we are confident that
# not having the .h file means there isn't one.
if filename.endswith('.cc') and not header_found:
return
# All the lines have been processed, report the errors found.
for required_header_unstripped in required:
template = required[required_header_unstripped][1]
if required_header_unstripped.strip('<>"') not in include_state:
error(filename, required[required_header_unstripped][0],
'build/include_what_you_use', 4,
'Add #include ' + required_header_unstripped + ' for ' + template)
_RE_PATTERN_EXPLICIT_MAKEPAIR = re.compile(r'\bmake_pair\s*<')
def CheckMakePairUsesDeduction(filename, clean_lines, linenum, error):
"""Check that make_pair's template arguments are deduced.
G++ 4.6 in C++0x mode fails badly if make_pair's template arguments are
specified explicitly, and such use isn't intended in any case.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
match = _RE_PATTERN_EXPLICIT_MAKEPAIR.search(line)
if match:
error(filename, linenum, 'build/explicit_make_pair',
4, # 4 = high confidence
'For C++11-compatibility, omit template arguments from make_pair'
' OR use pair directly OR if appropriate, construct a pair directly')
def ProcessLine(filename, file_extension, clean_lines, line,
include_state, function_state, nesting_state, error,
extra_check_functions=[]):
"""Processes a single line in the file.
Args:
filename: Filename of the file that is being processed.
file_extension: The extension (dot not included) of the file.
clean_lines: An array of strings, each representing a line of the file,
with comments stripped.
line: Number of line being processed.
include_state: An _IncludeState instance in which the headers are inserted.
function_state: A _FunctionState instance which counts function lines, etc.
nesting_state: A _NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
extra_check_functions: An array of additional check functions that will be
run on each source line. Each function takes 4
arguments: filename, clean_lines, line, error
"""
raw_lines = clean_lines.raw_lines
ParseNolintSuppressions(filename, raw_lines[line], line, error)
nesting_state.Update(filename, clean_lines, line, error)
if nesting_state.stack and nesting_state.stack[-1].inline_asm != _NO_ASM:
return
CheckForFunctionLengths(filename, clean_lines, line, function_state, error)
CheckForMultilineCommentsAndStrings(filename, clean_lines, line, error)
CheckStyle(filename, clean_lines, line, file_extension, nesting_state, error)
CheckLanguage(filename, clean_lines, line, file_extension, include_state,
nesting_state, error)
CheckForNonConstReference(filename, clean_lines, line, nesting_state, error)
CheckForNonStandardConstructs(filename, clean_lines, line,
nesting_state, error)
CheckVlogArguments(filename, clean_lines, line, error)
CheckPosixThreading(filename, clean_lines, line, error)
CheckInvalidIncrement(filename, clean_lines, line, error)
CheckMakePairUsesDeduction(filename, clean_lines, line, error)
for check_fn in extra_check_functions:
check_fn(filename, clean_lines, line, error)
def ProcessFileData(filename, file_extension, lines, error,
extra_check_functions=[]):
"""Performs lint checks and reports any errors to the given error function.
Args:
filename: Filename of the file that is being processed.
file_extension: The extension (dot not included) of the file.
lines: An array of strings, each representing a line of the file, with the
last element being empty if the file is terminated with a newline.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
extra_check_functions: An array of additional check functions that will be
run on each source line. Each function takes 4
arguments: filename, clean_lines, line, error
"""
lines = (['// marker so line numbers and indices both start at 1'] + lines +
['// marker so line numbers end in a known way'])
include_state = _IncludeState()
function_state = _FunctionState()
nesting_state = _NestingState()
ResetNolintSuppressions()
CheckForCopyright(filename, lines, error)
if file_extension == 'h':
CheckForHeaderGuard(filename, lines, error)
RemoveMultiLineComments(filename, lines, error)
clean_lines = CleansedLines(lines)
for line in xrange(clean_lines.NumLines()):
ProcessLine(filename, file_extension, clean_lines, line,
include_state, function_state, nesting_state, error,
extra_check_functions)
nesting_state.CheckCompletedBlocks(filename, error)
CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error)
# We check here rather than inside ProcessLine so that we see raw
# lines rather than "cleaned" lines.
CheckForBadCharacters(filename, lines, error)
CheckForNewlineAtEOF(filename, lines, error)
def ProcessFile(filename, vlevel, extra_check_functions=[]):
"""Does google-lint on a single file.
Args:
filename: The name of the file to parse.
vlevel: The level of errors to report. Every error of confidence
>= verbose_level will be reported. 0 is a good default.
extra_check_functions: An array of additional check functions that will be
run on each source line. Each function takes 4
arguments: filename, clean_lines, line, error
"""
_SetVerboseLevel(vlevel)
try:
# Support the UNIX convention of using "-" for stdin. Note that
# we are not opening the file with universal newline support
# (which codecs doesn't support anyway), so the resulting lines do
# contain trailing '\r' characters if we are reading a file that
# has CRLF endings.
# If after the split a trailing '\r' is present, it is removed
# below. If it is not expected to be present (i.e. os.linesep !=
# '\r\n' as in Windows), a warning is issued below if this file
# is processed.
if filename == '-':
lines = codecs.StreamReaderWriter(sys.stdin,
codecs.getreader('utf8'),
codecs.getwriter('utf8'),
'replace').read().split('\n')
else:
lines = codecs.open(filename, 'r', 'utf8', 'replace').read().split('\n')
carriage_return_found = False
# Remove trailing '\r'.
for linenum in range(len(lines)):
if lines[linenum].endswith('\r'):
lines[linenum] = lines[linenum].rstrip('\r')
carriage_return_found = True
except IOError:
sys.stderr.write(
"Skipping input '%s': Can't open for reading\n" % filename)
return
# Note, if no dot is found, this will give the entire filename as the ext.
file_extension = filename[filename.rfind('.') + 1:]
# When reading from stdin, the extension is unknown, so no cpplint tests
# should rely on the extension.
if filename != '-' and file_extension not in _valid_extensions:
sys.stderr.write('Ignoring %s; not a valid file name '
'(%s)\n' % (filename, ', '.join(_valid_extensions)))
else:
ProcessFileData(filename, file_extension, lines, Error,
extra_check_functions)
if carriage_return_found and os.linesep != '\r\n':
# Use 0 for linenum since outputting only one error for potentially
# several lines.
Error(filename, 0, 'whitespace/newline', 1,
'One or more unexpected \\r (^M) found;'
'better to use only a \\n')
sys.stderr.write('Done processing %s\n' % filename)
def PrintUsage(message):
"""Prints a brief usage string and exits, optionally with an error message.
Args:
message: The optional error message.
"""
sys.stderr.write(_USAGE)
if message:
sys.exit('\nFATAL ERROR: ' + message)
else:
sys.exit(1)
def PrintCategories():
"""Prints a list of all the error-categories used by error messages.
These are the categories used to filter messages via --filter.
"""
sys.stderr.write(''.join(' %s\n' % cat for cat in _ERROR_CATEGORIES))
sys.exit(0)
def ParseArguments(args):
"""Parses the command line arguments.
This may set the output format and verbosity level as side-effects.
Args:
args: The command line arguments:
Returns:
The list of filenames to lint.
"""
try:
(opts, filenames) = getopt.getopt(args, '', ['help', 'output=', 'verbose=',
'counting=',
'filter=',
'root=',
'linelength=',
'extensions='])
except getopt.GetoptError:
PrintUsage('Invalid arguments.')
verbosity = _VerboseLevel()
output_format = _OutputFormat()
filters = ''
counting_style = ''
for (opt, val) in opts:
if opt == '--help':
PrintUsage(None)
elif opt == '--output':
if val not in ('emacs', 'vs7', 'eclipse'):
PrintUsage('The only allowed output formats are emacs, vs7 and eclipse.')
output_format = val
elif opt == '--verbose':
verbosity = int(val)
elif opt == '--filter':
filters = val
if not filters:
PrintCategories()
elif opt == '--counting':
if val not in ('total', 'toplevel', 'detailed'):
PrintUsage('Valid counting options are total, toplevel, and detailed')
counting_style = val
elif opt == '--root':
global _root
_root = val
elif opt == '--linelength':
global _line_length
try:
_line_length = int(val)
except ValueError:
PrintUsage('Line length must be digits.')
elif opt == '--extensions':
global _valid_extensions
try:
_valid_extensions = set(val.split(','))
except ValueError:
PrintUsage('Extensions must be comma seperated list.')
if not filenames:
PrintUsage('No files were specified.')
_SetOutputFormat(output_format)
_SetVerboseLevel(verbosity)
_SetFilters(filters)
_SetCountingStyle(counting_style)
return filenames
def main():
filenames = ParseArguments(sys.argv[1:])
# Change stderr to write with replacement characters so we don't die
# if we try to print something containing non-ASCII characters.
sys.stderr = codecs.StreamReaderWriter(sys.stderr,
codecs.getreader('utf8'),
codecs.getwriter('utf8'),
'replace')
_cpplint_state.ResetErrorCounts()
for filename in filenames:
ProcessFile(filename, _cpplint_state.verbose_level)
_cpplint_state.PrintErrorCounts()
sys.exit(_cpplint_state.error_count > 0)
if __name__ == '__main__':
main()
| bsd-3-clause |
ganeshkoilada/libforensics | code/lf/dec/splitraw.py | 13 | 2207 | # Copyright 2010 Michael Murr
#
# This file is part of LibForensics.
#
# LibForensics is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# LibForensics is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with LibForensics. If not, see <http://www.gnu.org/licenses/>.
"""A stream for a raw/dd file that has been split into multiple pieces."""
# local imports
from lf.dec.consts import SEEK_SET
from lf.dec.base import SingleStreamContainer
from lf.dec.composite import CompositeIStream
from lf.dec.raw import RawIStream
__docformat__ = "restructuredtext en"
__all__ = [
"SplitRaw", "SplitRawIStream"
]
class SplitRaw(SingleStreamContainer):
"""A container for a raw/dd file that has been split into pieces."""
def __init__(self, names):
"""Initializes a SplitRaw object.
:type names: list of strings
:param names: A list of the names of the raw/dd files.
"""
super(SplitRaw, self).__init__()
self.stream = SplitRawIStream(names)
# end def __init__
# end class SplitRaw
class SplitRawIStream(CompositeIStream):
"""A stream for a raw/dd file that has been split into pieces.
.. attribute:: _names
A list of the names of the raw/dd files.
"""
def __init__(self, names):
"""Initializes a SplitRawIStream object.
:type names: list of strings
:param names: A list of the names of the raw/dd files.
"""
raw_streams = list()
for name in names:
raw_stream = RawIStream(name)
raw_streams.append((raw_stream, 0, raw_stream.size))
# end for
super(SplitRawIStream, self).__init__(raw_streams)
self._names = names
# end def __init__
# end class SplitRawIStream
| gpl-3.0 |
peterlauri/django | django/conf/locale/lt/formats.py | 504 | 1830 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = r'Y \m. E j \d.'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = r'Y \m. E j \d., H:i'
YEAR_MONTH_FORMAT = r'Y \m. F'
MONTH_DAY_FORMAT = r'E j \d.'
SHORT_DATE_FORMAT = 'Y-m-d'
SHORT_DATETIME_FORMAT = 'Y-m-d H:i'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%Y-%m-%d', '%d.%m.%Y', '%d.%m.%y', # '2006-10-25', '25.10.2006', '25.10.06'
]
TIME_INPUT_FORMATS = [
'%H:%M:%S', # '14:30:59'
'%H:%M:%S.%f', # '14:30:59.000200'
'%H:%M', # '14:30'
'%H.%M.%S', # '14.30.59'
'%H.%M.%S.%f', # '14.30.59.000200'
'%H.%M', # '14.30'
]
DATETIME_INPUT_FORMATS = [
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M:%S.%f', # '25.10.2006 14:30:59.000200'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
'%d.%m.%y %H:%M:%S', # '25.10.06 14:30:59'
'%d.%m.%y %H:%M:%S.%f', # '25.10.06 14:30:59.000200'
'%d.%m.%y %H:%M', # '25.10.06 14:30'
'%d.%m.%y %H.%M.%S', # '25.10.06 14.30.59'
'%d.%m.%y %H.%M.%S.%f', # '25.10.06 14.30.59.000200'
'%d.%m.%y %H.%M', # '25.10.06 14.30'
'%d.%m.%y', # '25.10.06'
]
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
| bsd-3-clause |
aetilley/scikit-learn | sklearn/semi_supervised/label_propagation.py | 128 | 15312 | # coding=utf8
"""
Label propagation in the context of this module refers to a set of
semisupervised classification algorithms. In the high level, these algorithms
work by forming a fully-connected graph between all points given and solving
for the steady-state distribution of labels at each point.
These algorithms perform very well in practice. The cost of running can be very
expensive, at approximately O(N^3) where N is the number of (labeled and
unlabeled) points. The theory (why they perform so well) is motivated by
intuitions from random walk algorithms and geometric relationships in the data.
For more information see the references below.
Model Features
--------------
Label clamping:
The algorithm tries to learn distributions of labels over the dataset. In the
"Hard Clamp" mode, the true ground labels are never allowed to change. They
are clamped into position. In the "Soft Clamp" mode, they are allowed some
wiggle room, but some alpha of their original value will always be retained.
Hard clamp is the same as soft clamping with alpha set to 1.
Kernel:
A function which projects a vector into some higher dimensional space. This
implementation supprots RBF and KNN kernels. Using the RBF kernel generates
a dense matrix of size O(N^2). KNN kernel will generate a sparse matrix of
size O(k*N) which will run much faster. See the documentation for SVMs for
more info on kernels.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
Notes
-----
References:
[1] Yoshua Bengio, Olivier Delalleau, Nicolas Le Roux. In Semi-Supervised
Learning (2006), pp. 193-216
[2] Olivier Delalleau, Yoshua Bengio, Nicolas Le Roux. Efficient
Non-Parametric Function Induction in Semi-Supervised Learning. AISTAT 2005
"""
# Authors: Clay Woolam <clay@woolam.org>
# Licence: BSD
from abc import ABCMeta, abstractmethod
from scipy import sparse
import numpy as np
from ..base import BaseEstimator, ClassifierMixin
from ..metrics.pairwise import rbf_kernel
from ..utils.graph import graph_laplacian
from ..utils.extmath import safe_sparse_dot
from ..utils.validation import check_X_y, check_is_fitted
from ..externals import six
from ..neighbors.unsupervised import NearestNeighbors
### Helper functions
def _not_converged(y_truth, y_prediction, tol=1e-3):
"""basic convergence check"""
return np.abs(y_truth - y_prediction).sum() > tol
class BaseLabelPropagation(six.with_metaclass(ABCMeta, BaseEstimator,
ClassifierMixin)):
"""Base class for label propagation module.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported..
gamma : float
Parameter for rbf kernel
alpha : float
Clamping factor
max_iter : float
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
n_neighbors : integer > 0
Parameter for knn kernel
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7,
alpha=1, max_iter=30, tol=1e-3):
self.max_iter = max_iter
self.tol = tol
# kernel parameters
self.kernel = kernel
self.gamma = gamma
self.n_neighbors = n_neighbors
# clamping factor
self.alpha = alpha
def _get_kernel(self, X, y=None):
if self.kernel == "rbf":
if y is None:
return rbf_kernel(X, X, gamma=self.gamma)
else:
return rbf_kernel(X, y, gamma=self.gamma)
elif self.kernel == "knn":
if self.nn_fit is None:
self.nn_fit = NearestNeighbors(self.n_neighbors).fit(X)
if y is None:
return self.nn_fit.kneighbors_graph(self.nn_fit._fit_X,
self.n_neighbors,
mode='connectivity')
else:
return self.nn_fit.kneighbors(y, return_distance=False)
else:
raise ValueError("%s is not a valid kernel. Only rbf and knn"
" are supported at this time" % self.kernel)
@abstractmethod
def _build_graph(self):
raise NotImplementedError("Graph construction must be implemented"
" to fit a label propagation model.")
def predict(self, X):
"""Performs inductive inference across the model.
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
y : array_like, shape = [n_samples]
Predictions for input data
"""
probas = self.predict_proba(X)
return self.classes_[np.argmax(probas, axis=1)].ravel()
def predict_proba(self, X):
"""Predict probability for each possible outcome.
Compute the probability estimates for each single sample in X
and each possible outcome seen during training (categorical
distribution).
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
probabilities : array, shape = [n_samples, n_classes]
Normalized probability distributions across
class labels
"""
check_is_fitted(self, 'X_')
if sparse.isspmatrix(X):
X_2d = X
else:
X_2d = np.atleast_2d(X)
weight_matrices = self._get_kernel(self.X_, X_2d)
if self.kernel == 'knn':
probabilities = []
for weight_matrix in weight_matrices:
ine = np.sum(self.label_distributions_[weight_matrix], axis=0)
probabilities.append(ine)
probabilities = np.array(probabilities)
else:
weight_matrices = weight_matrices.T
probabilities = np.dot(weight_matrices, self.label_distributions_)
normalizer = np.atleast_2d(np.sum(probabilities, axis=1)).T
probabilities /= normalizer
return probabilities
def fit(self, X, y):
"""Fit a semi-supervised label propagation model based
All the input data is provided matrix X (labeled and unlabeled)
and corresponding label matrix y with a dedicated marker value for
unlabeled samples.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
A {n_samples by n_samples} size matrix will be created from this
y : array_like, shape = [n_samples]
n_labeled_samples (unlabeled points are marked as -1)
All unlabeled samples will be transductively assigned labels
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y)
self.X_ = X
# actual graph construction (implementations should override this)
graph_matrix = self._build_graph()
# label construction
# construct a categorical distribution for classification only
classes = np.unique(y)
classes = (classes[classes != -1])
self.classes_ = classes
n_samples, n_classes = len(y), len(classes)
y = np.asarray(y)
unlabeled = y == -1
clamp_weights = np.ones((n_samples, 1))
clamp_weights[unlabeled, 0] = self.alpha
# initialize distributions
self.label_distributions_ = np.zeros((n_samples, n_classes))
for label in classes:
self.label_distributions_[y == label, classes == label] = 1
y_static = np.copy(self.label_distributions_)
if self.alpha > 0.:
y_static *= 1 - self.alpha
y_static[unlabeled] = 0
l_previous = np.zeros((self.X_.shape[0], n_classes))
remaining_iter = self.max_iter
if sparse.isspmatrix(graph_matrix):
graph_matrix = graph_matrix.tocsr()
while (_not_converged(self.label_distributions_, l_previous, self.tol)
and remaining_iter > 1):
l_previous = self.label_distributions_
self.label_distributions_ = safe_sparse_dot(
graph_matrix, self.label_distributions_)
# clamp
self.label_distributions_ = np.multiply(
clamp_weights, self.label_distributions_) + y_static
remaining_iter -= 1
normalizer = np.sum(self.label_distributions_, axis=1)[:, np.newaxis]
self.label_distributions_ /= normalizer
# set the transduction item
transduction = self.classes_[np.argmax(self.label_distributions_,
axis=1)]
self.transduction_ = transduction.ravel()
self.n_iter_ = self.max_iter - remaining_iter
return self
class LabelPropagation(BaseLabelPropagation):
"""Label Propagation classifier
Read more in the :ref:`User Guide <label_propagation>`.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported..
gamma : float
Parameter for rbf kernel
n_neighbors : integer > 0
Parameter for knn kernel
alpha : float
Clamping factor
max_iter : float
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
References
----------
Xiaojin Zhu and Zoubin Ghahramani. Learning from labeled and unlabeled data
with label propagation. Technical Report CMU-CALD-02-107, Carnegie Mellon
University, 2002 http://pages.cs.wisc.edu/~jerryzhu/pub/CMU-CALD-02-107.pdf
See Also
--------
LabelSpreading : Alternate label propagation strategy more robust to noise
"""
def _build_graph(self):
"""Matrix representing a fully connected graph between each sample
This basic implementation creates a non-stochastic affinity matrix, so
class distributions will exceed 1 (normalization may be desired).
"""
if self.kernel == 'knn':
self.nn_fit = None
affinity_matrix = self._get_kernel(self.X_)
normalizer = affinity_matrix.sum(axis=0)
if sparse.isspmatrix(affinity_matrix):
affinity_matrix.data /= np.diag(np.array(normalizer))
else:
affinity_matrix /= normalizer[:, np.newaxis]
return affinity_matrix
class LabelSpreading(BaseLabelPropagation):
"""LabelSpreading model for semi-supervised learning
This model is similar to the basic Label Propgation algorithm,
but uses affinity matrix based on the normalized graph Laplacian
and soft clamping across the labels.
Read more in the :ref:`User Guide <label_propagation>`.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported.
gamma : float
parameter for rbf kernel
n_neighbors : integer > 0
parameter for knn kernel
alpha : float
clamping factor
max_iter : float
maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelSpreading
>>> label_prop_model = LabelSpreading()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelSpreading(...)
References
----------
Dengyong Zhou, Olivier Bousquet, Thomas Navin Lal, Jason Weston,
Bernhard Schoelkopf. Learning with local and global consistency (2004)
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.115.3219
See Also
--------
LabelPropagation : Unregularized graph based semi-supervised learning
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7, alpha=0.2,
max_iter=30, tol=1e-3):
# this one has different base parameters
super(LabelSpreading, self).__init__(kernel=kernel, gamma=gamma,
n_neighbors=n_neighbors,
alpha=alpha, max_iter=max_iter,
tol=tol)
def _build_graph(self):
"""Graph matrix for Label Spreading computes the graph laplacian"""
# compute affinity matrix (or gram matrix)
if self.kernel == 'knn':
self.nn_fit = None
n_samples = self.X_.shape[0]
affinity_matrix = self._get_kernel(self.X_)
laplacian = graph_laplacian(affinity_matrix, normed=True)
laplacian = -laplacian
if sparse.isspmatrix(laplacian):
diag_mask = (laplacian.row == laplacian.col)
laplacian.data[diag_mask] = 0.0
else:
laplacian.flat[::n_samples + 1] = 0.0 # set diag to 0.0
return laplacian
| bsd-3-clause |
guorendong/iridium-browser-ubuntu | third_party/pyftpdlib/src/demo/anti_flood_ftpd.py | 5 | 3760 | #!/usr/bin/env python
# $Id$
# pyftpdlib is released under the MIT license, reproduced below:
# ======================================================================
# Copyright (C) 2007-2012 Giampaolo Rodola' <g.rodola@gmail.com>
#
# All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# ======================================================================
"""
A FTP server banning clients in case of commands flood.
If client sends more than 300 requests per-second it will be
disconnected and won't be able to re-connect for 1 hour.
"""
from pyftpdlib.ftpserver import FTPHandler, FTPServer, DummyAuthorizer, CallEvery
class AntiFloodHandler(FTPHandler):
cmds_per_second = 300 # max number of cmds per second
ban_for = 60 * 60 # 1 hour
banned_ips = []
def __init__(self, *args, **kwargs):
super(AntiFloodHandler, self).__init__(*args, **kwargs)
self.processed_cmds = 0
self.pcmds_callback = CallEvery(1, self.check_processed_cmds)
def handle(self):
# called when client connects.
if self.remote_ip in self.banned_ips:
self.respond('550 you are banned')
self.close()
else:
super(AntiFloodHandler, self).handle()
def check_processed_cmds(self):
# called every second; checks for the number of commands
# sent in the last second.
if self.processed_cmds > self.cmds_per_second:
self.ban(self.remote_ip)
else:
self.processed_cmds = 0
def process_command(self, *args, **kwargs):
# increase counter for every received command
self.processed_cmds += 1
super(AntiFloodHandler, self).process_command(*args, **kwargs)
def ban(self, ip):
# ban ip and schedule next un-ban
if ip not in self.banned_ips:
self.log('banned IP %s for command flooding' % ip)
self.respond('550 you are banned for %s seconds' % self.ban_for)
self.close()
self.banned_ips.append(ip)
def unban(self, ip):
# unban ip
try:
self.banned_ips.remove(ip)
except ValueError:
pass
else:
self.log('unbanning IP %s' % ip)
def close(self):
super(AntiFloodHandler, self).close()
if not self.pcmds_callback.cancelled:
self.pcmds_callback.cancel()
def main():
authorizer = DummyAuthorizer()
authorizer.add_user('user', '12345', '.', perm='elradfmw')
authorizer.add_anonymous('.')
handler = AntiFloodHandler
handler.authorizer = authorizer
ftpd = FTPServer(('', 21), handler)
ftpd.serve_forever(timeout=1)
if __name__ == '__main__':
main()
| bsd-3-clause |
switowski/invenio | invenio/modules/upgrader/upgrades/invenio_2013_09_13_new_bibEDITCACHE.py | 15 | 1408 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2013 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
from invenio.legacy.dbquery import run_sql
depends_on = ['invenio_release_1_1_0']
def info():
return "New bibedit cache (bibEDITCACHE) table"
def do_upgrade():
""" Implement your upgrades here """
run_sql("""CREATE TABLE IF NOT EXISTS `bibEDITCACHE` (
`id_bibrec` mediumint(8) unsigned NOT NULL,
`uid` int(15) unsigned NOT NULL,
`data` LONGBLOB,
`post_date` datetime NOT NULL,
`is_active` tinyint(1) NOT NULL DEFAULT 1,
PRIMARY KEY (`id_bibrec`, `uid`),
INDEX `post_date` (`post_date`)
) ENGINE=MyISAM""")
def estimate():
""" Estimate running time of upgrade in seconds (optional). """
return 1
| gpl-2.0 |
Antiun/hr | hr_contract_default_trial_length/models/hr_contract_type.py | 13 | 1141 | # -*- coding: utf-8 -*-
###############################################################################
#
# Copyright (C) 2015 Salton Massally (<smassally@idtlabs.sl>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from openerp import fields, models
class HrContractType(models.Model):
_inherit = 'hr.contract.type'
trial_length = fields.Integer(
help="Default contract trial length in Days"
)
| agpl-3.0 |
sffjunkie/home-assistant | tests/components/test_rfxtrx.py | 5 | 4095 | """Th tests for the Rfxtrx component."""
# pylint: disable=too-many-public-methods,protected-access
import unittest
import time
from homeassistant.bootstrap import _setup_component
from homeassistant.components import rfxtrx as rfxtrx
from tests.common import get_test_home_assistant
class TestRFXTRX(unittest.TestCase):
"""Test the Rfxtrx component."""
def setUp(self):
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant(0)
def tearDown(self):
"""Stop everything that was started."""
rfxtrx.RECEIVED_EVT_SUBSCRIBERS = []
rfxtrx.RFX_DEVICES = {}
if rfxtrx.RFXOBJECT:
rfxtrx.RFXOBJECT.close_connection()
self.hass.stop()
def test_default_config(self):
"""Test configuration."""
self.assertTrue(_setup_component(self.hass, 'rfxtrx', {
'rfxtrx': {
'device': '/dev/serial/by-id/usb' +
'-RFXCOM_RFXtrx433_A1Y0NJGR-if00-port0',
'dummy': True}
}))
self.assertTrue(_setup_component(self.hass, 'sensor', {
'sensor': {'platform': 'rfxtrx',
'automatic_add': True,
'devices': {}}}))
while len(rfxtrx.RFX_DEVICES) < 2:
time.sleep(0.1)
self.assertEqual(len(rfxtrx.RFXOBJECT.sensors()), 2)
def test_valid_config(self):
"""Test configuration."""
self.assertTrue(_setup_component(self.hass, 'rfxtrx', {
'rfxtrx': {
'device': '/dev/serial/by-id/usb' +
'-RFXCOM_RFXtrx433_A1Y0NJGR-if00-port0',
'dummy': True}}))
self.assertTrue(_setup_component(self.hass, 'rfxtrx', {
'rfxtrx': {
'device': '/dev/serial/by-id/usb' +
'-RFXCOM_RFXtrx433_A1Y0NJGR-if00-port0',
'dummy': True,
'debug': True}}))
def test_invalid_config(self):
"""Test configuration."""
self.assertFalse(_setup_component(self.hass, 'rfxtrx', {
'rfxtrx': {}
}))
self.assertFalse(_setup_component(self.hass, 'rfxtrx', {
'rfxtrx': {
'device': '/dev/serial/by-id/usb' +
'-RFXCOM_RFXtrx433_A1Y0NJGR-if00-port0',
'invalid_key': True}}))
def test_fire_event(self):
"""Test fire event."""
self.assertTrue(_setup_component(self.hass, 'rfxtrx', {
'rfxtrx': {
'device': '/dev/serial/by-id/usb' +
'-RFXCOM_RFXtrx433_A1Y0NJGR-if00-port0',
'dummy': True}
}))
self.assertTrue(_setup_component(self.hass, 'switch', {
'switch': {'platform': 'rfxtrx',
'automatic_add': True,
'devices':
{'0b1100cd0213c7f210010f51': {
'name': 'Test',
rfxtrx.ATTR_FIREEVENT: True}
}}}))
calls = []
def record_event(event):
"""Add recorded event to set."""
calls.append(event)
self.hass.bus.listen(rfxtrx.EVENT_BUTTON_PRESSED, record_event)
entity = rfxtrx.RFX_DEVICES['213c7f216']
self.assertEqual('Test', entity.name)
self.assertEqual('off', entity.state)
self.assertTrue(entity.should_fire_event)
event = rfxtrx.get_rfx_object('0b1100cd0213c7f210010f51')
event.data = bytearray([0x0b, 0x11, 0x00, 0x10, 0x01, 0x18,
0xcd, 0xea, 0x01, 0x01, 0x0f, 0x70])
rfxtrx.RECEIVED_EVT_SUBSCRIBERS[0](event)
self.hass.pool.block_till_done()
self.assertEqual(event.values['Command'], "On")
self.assertEqual('on', entity.state)
self.assertEqual(1, len(rfxtrx.RFX_DEVICES))
self.assertEqual(1, len(calls))
self.assertEqual(calls[0].data,
{'entity_id': 'switch.test', 'state': 'on'})
| mit |
valtech-mooc/edx-platform | common/djangoapps/student/tests/test_bulk_email_settings.py | 14 | 5792 | """
Unit tests for email feature flag in student dashboard. Additionally tests
that bulk email is always disabled for non-Mongo backed courses, regardless
of email feature flag, and that the view is conditionally available when
Course Auth is turned on.
"""
import unittest
from django.conf import settings
from django.core.urlresolvers import reverse
from mock import patch
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from student.tests.factories import UserFactory, CourseEnrollmentFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.django_utils import TEST_DATA_MIXED_TOY_MODULESTORE
from xmodule.modulestore.tests.factories import CourseFactory
# This import is for an lms djangoapp.
# Its testcases are only run under lms.
from bulk_email.models import CourseAuthorization # pylint: disable=import-error
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
class TestStudentDashboardEmailView(ModuleStoreTestCase):
"""
Check for email view displayed with flag
"""
def setUp(self):
super(TestStudentDashboardEmailView, self).setUp()
self.course = CourseFactory.create()
# Create student account
student = UserFactory.create()
CourseEnrollmentFactory.create(user=student, course_id=self.course.id)
self.client.login(username=student.username, password="test")
self.url = reverse('dashboard')
# URL for email settings modal
self.email_modal_link = (
'<a href="#email-settings-modal" class="action action-email-settings" rel="leanModal" '
'data-course-id="{org}/{num}/{name}" data-course-number="{num}" '
'data-dashboard-index="0" data-optout="False">Email Settings</a>'
).format(
org=self.course.org,
num=self.course.number,
name=self.course.display_name.replace(' ', '_'),
)
def tearDown(self):
"""
Undo all patches.
"""
patch.stopall()
@patch.dict(settings.FEATURES, {'ENABLE_INSTRUCTOR_EMAIL': True, 'REQUIRE_COURSE_EMAIL_AUTH': False})
def test_email_flag_true(self):
# Assert that the URL for the email view is in the response
response = self.client.get(self.url)
self.assertTrue(self.email_modal_link in response.content)
@patch.dict(settings.FEATURES, {'ENABLE_INSTRUCTOR_EMAIL': False})
def test_email_flag_false(self):
# Assert that the URL for the email view is not in the response
response = self.client.get(self.url)
self.assertFalse(self.email_modal_link in response.content)
@patch.dict(settings.FEATURES, {'ENABLE_INSTRUCTOR_EMAIL': True, 'REQUIRE_COURSE_EMAIL_AUTH': True})
def test_email_unauthorized(self):
# Assert that instructor email is not enabled for this course
self.assertFalse(CourseAuthorization.instructor_email_enabled(self.course.id))
# Assert that the URL for the email view is not in the response
# if this course isn't authorized
response = self.client.get(self.url)
self.assertFalse(self.email_modal_link in response.content)
@patch.dict(settings.FEATURES, {'ENABLE_INSTRUCTOR_EMAIL': True, 'REQUIRE_COURSE_EMAIL_AUTH': True})
def test_email_authorized(self):
# Authorize the course to use email
cauth = CourseAuthorization(course_id=self.course.id, email_enabled=True)
cauth.save()
# Assert that instructor email is enabled for this course
self.assertTrue(CourseAuthorization.instructor_email_enabled(self.course.id))
# Assert that the URL for the email view is not in the response
# if this course isn't authorized
response = self.client.get(self.url)
self.assertTrue(self.email_modal_link in response.content)
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
class TestStudentDashboardEmailViewXMLBacked(ModuleStoreTestCase):
"""
Check for email view on student dashboard, with XML backed course.
"""
MODULESTORE = TEST_DATA_MIXED_TOY_MODULESTORE
def setUp(self):
self.course_name = 'edX/toy/2012_Fall'
# Create student account
student = UserFactory.create()
CourseEnrollmentFactory.create(
user=student,
course_id=SlashSeparatedCourseKey.from_deprecated_string(self.course_name)
)
self.client.login(username=student.username, password="test")
self.url = reverse('dashboard')
# URL for email settings modal
self.email_modal_link = (
'<a href="#email-settings-modal" class="action action-email-settings" rel="leanModal" '
'data-course-id="{org}/{num}/{name}" data-course-number="{num}" '
'data-dashboard-index="0" data-optout="False">Email Settings</a>'
).format(
org='edX',
num='toy',
name='2012_Fall',
)
@patch.dict(settings.FEATURES, {'ENABLE_INSTRUCTOR_EMAIL': True, 'REQUIRE_COURSE_EMAIL_AUTH': False})
def test_email_flag_true_xml_store(self):
# The flag is enabled, and since REQUIRE_COURSE_EMAIL_AUTH is False, all courses should
# be authorized to use email. But the course is not Mongo-backed (should not work)
response = self.client.get(self.url)
self.assertFalse(self.email_modal_link in response.content)
@patch.dict(settings.FEATURES, {'ENABLE_INSTRUCTOR_EMAIL': False, 'REQUIRE_COURSE_EMAIL_AUTH': False})
def test_email_flag_false_xml_store(self):
# Email disabled, shouldn't see link.
response = self.client.get(self.url)
self.assertFalse(self.email_modal_link in response.content)
| agpl-3.0 |
broferek/ansible | test/units/modules/network/fortios/test_fortios_alertemail_setting.py | 21 | 16780 | # Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <https://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
from mock import ANY
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
try:
from ansible.modules.network.fortios import fortios_alertemail_setting
except ImportError:
pytest.skip("Could not load required modules for testing", allow_module_level=True)
@pytest.fixture(autouse=True)
def connection_mock(mocker):
connection_class_mock = mocker.patch('ansible.modules.network.fortios.fortios_alertemail_setting.Connection')
return connection_class_mock
fos_instance = FortiOSHandler(connection_mock)
def test_alertemail_setting_creation(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'alertemail_setting': {
'admin_login_logs': 'enable',
'alert_interval': '4',
'amc_interface_bypass_mode': 'enable',
'antivirus_logs': 'enable',
'configuration_changes_logs': 'enable',
'critical_interval': '8',
'debug_interval': '9',
'email_interval': '10',
'emergency_interval': '11',
'error_interval': '12',
'FDS_license_expiring_days': '13',
'FDS_license_expiring_warning': 'enable',
'FDS_update_logs': 'enable',
'filter_mode': 'category',
'FIPS_CC_errors': 'enable',
'firewall_authentication_failure_logs': 'enable',
'fortiguard_log_quota_warning': 'enable',
'FSSO_disconnect_logs': 'enable',
'HA_logs': 'enable',
'information_interval': '22',
'IPS_logs': 'enable',
'IPsec_errors_logs': 'enable',
'local_disk_usage': '25',
'log_disk_usage_warning': 'enable',
'mailto1': 'test_value_27',
'mailto2': 'test_value_28',
'mailto3': 'test_value_29',
'notification_interval': '30',
'PPP_errors_logs': 'enable',
'severity': 'emergency',
'ssh_logs': 'enable',
'sslvpn_authentication_errors_logs': 'enable',
'username': 'test_value_35',
'violation_traffic_logs': 'enable',
'warning_interval': '37',
'webfilter_logs': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_alertemail_setting.fortios_alertemail(input_data, fos_instance)
expected_data = {
'admin-login-logs': 'enable',
'alert-interval': '4',
'amc-interface-bypass-mode': 'enable',
'antivirus-logs': 'enable',
'configuration-changes-logs': 'enable',
'critical-interval': '8',
'debug-interval': '9',
'email-interval': '10',
'emergency-interval': '11',
'error-interval': '12',
'FDS-license-expiring-days': '13',
'FDS-license-expiring-warning': 'enable',
'FDS-update-logs': 'enable',
'filter-mode': 'category',
'FIPS-CC-errors': 'enable',
'firewall-authentication-failure-logs': 'enable',
'fortiguard-log-quota-warning': 'enable',
'FSSO-disconnect-logs': 'enable',
'HA-logs': 'enable',
'information-interval': '22',
'IPS-logs': 'enable',
'IPsec-errors-logs': 'enable',
'local-disk-usage': '25',
'log-disk-usage-warning': 'enable',
'mailto1': 'test_value_27',
'mailto2': 'test_value_28',
'mailto3': 'test_value_29',
'notification-interval': '30',
'PPP-errors-logs': 'enable',
'severity': 'emergency',
'ssh-logs': 'enable',
'sslvpn-authentication-errors-logs': 'enable',
'username': 'test_value_35',
'violation-traffic-logs': 'enable',
'warning-interval': '37',
'webfilter-logs': 'enable'
}
set_method_mock.assert_called_with('alertemail', 'setting', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_alertemail_setting_creation_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'alertemail_setting': {
'admin_login_logs': 'enable',
'alert_interval': '4',
'amc_interface_bypass_mode': 'enable',
'antivirus_logs': 'enable',
'configuration_changes_logs': 'enable',
'critical_interval': '8',
'debug_interval': '9',
'email_interval': '10',
'emergency_interval': '11',
'error_interval': '12',
'FDS_license_expiring_days': '13',
'FDS_license_expiring_warning': 'enable',
'FDS_update_logs': 'enable',
'filter_mode': 'category',
'FIPS_CC_errors': 'enable',
'firewall_authentication_failure_logs': 'enable',
'fortiguard_log_quota_warning': 'enable',
'FSSO_disconnect_logs': 'enable',
'HA_logs': 'enable',
'information_interval': '22',
'IPS_logs': 'enable',
'IPsec_errors_logs': 'enable',
'local_disk_usage': '25',
'log_disk_usage_warning': 'enable',
'mailto1': 'test_value_27',
'mailto2': 'test_value_28',
'mailto3': 'test_value_29',
'notification_interval': '30',
'PPP_errors_logs': 'enable',
'severity': 'emergency',
'ssh_logs': 'enable',
'sslvpn_authentication_errors_logs': 'enable',
'username': 'test_value_35',
'violation_traffic_logs': 'enable',
'warning_interval': '37',
'webfilter_logs': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_alertemail_setting.fortios_alertemail(input_data, fos_instance)
expected_data = {
'admin-login-logs': 'enable',
'alert-interval': '4',
'amc-interface-bypass-mode': 'enable',
'antivirus-logs': 'enable',
'configuration-changes-logs': 'enable',
'critical-interval': '8',
'debug-interval': '9',
'email-interval': '10',
'emergency-interval': '11',
'error-interval': '12',
'FDS-license-expiring-days': '13',
'FDS-license-expiring-warning': 'enable',
'FDS-update-logs': 'enable',
'filter-mode': 'category',
'FIPS-CC-errors': 'enable',
'firewall-authentication-failure-logs': 'enable',
'fortiguard-log-quota-warning': 'enable',
'FSSO-disconnect-logs': 'enable',
'HA-logs': 'enable',
'information-interval': '22',
'IPS-logs': 'enable',
'IPsec-errors-logs': 'enable',
'local-disk-usage': '25',
'log-disk-usage-warning': 'enable',
'mailto1': 'test_value_27',
'mailto2': 'test_value_28',
'mailto3': 'test_value_29',
'notification-interval': '30',
'PPP-errors-logs': 'enable',
'severity': 'emergency',
'ssh-logs': 'enable',
'sslvpn-authentication-errors-logs': 'enable',
'username': 'test_value_35',
'violation-traffic-logs': 'enable',
'warning-interval': '37',
'webfilter-logs': 'enable'
}
set_method_mock.assert_called_with('alertemail', 'setting', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_alertemail_setting_idempotent(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'DELETE', 'http_status': 404}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'alertemail_setting': {
'admin_login_logs': 'enable',
'alert_interval': '4',
'amc_interface_bypass_mode': 'enable',
'antivirus_logs': 'enable',
'configuration_changes_logs': 'enable',
'critical_interval': '8',
'debug_interval': '9',
'email_interval': '10',
'emergency_interval': '11',
'error_interval': '12',
'FDS_license_expiring_days': '13',
'FDS_license_expiring_warning': 'enable',
'FDS_update_logs': 'enable',
'filter_mode': 'category',
'FIPS_CC_errors': 'enable',
'firewall_authentication_failure_logs': 'enable',
'fortiguard_log_quota_warning': 'enable',
'FSSO_disconnect_logs': 'enable',
'HA_logs': 'enable',
'information_interval': '22',
'IPS_logs': 'enable',
'IPsec_errors_logs': 'enable',
'local_disk_usage': '25',
'log_disk_usage_warning': 'enable',
'mailto1': 'test_value_27',
'mailto2': 'test_value_28',
'mailto3': 'test_value_29',
'notification_interval': '30',
'PPP_errors_logs': 'enable',
'severity': 'emergency',
'ssh_logs': 'enable',
'sslvpn_authentication_errors_logs': 'enable',
'username': 'test_value_35',
'violation_traffic_logs': 'enable',
'warning_interval': '37',
'webfilter_logs': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_alertemail_setting.fortios_alertemail(input_data, fos_instance)
expected_data = {
'admin-login-logs': 'enable',
'alert-interval': '4',
'amc-interface-bypass-mode': 'enable',
'antivirus-logs': 'enable',
'configuration-changes-logs': 'enable',
'critical-interval': '8',
'debug-interval': '9',
'email-interval': '10',
'emergency-interval': '11',
'error-interval': '12',
'FDS-license-expiring-days': '13',
'FDS-license-expiring-warning': 'enable',
'FDS-update-logs': 'enable',
'filter-mode': 'category',
'FIPS-CC-errors': 'enable',
'firewall-authentication-failure-logs': 'enable',
'fortiguard-log-quota-warning': 'enable',
'FSSO-disconnect-logs': 'enable',
'HA-logs': 'enable',
'information-interval': '22',
'IPS-logs': 'enable',
'IPsec-errors-logs': 'enable',
'local-disk-usage': '25',
'log-disk-usage-warning': 'enable',
'mailto1': 'test_value_27',
'mailto2': 'test_value_28',
'mailto3': 'test_value_29',
'notification-interval': '30',
'PPP-errors-logs': 'enable',
'severity': 'emergency',
'ssh-logs': 'enable',
'sslvpn-authentication-errors-logs': 'enable',
'username': 'test_value_35',
'violation-traffic-logs': 'enable',
'warning-interval': '37',
'webfilter-logs': 'enable'
}
set_method_mock.assert_called_with('alertemail', 'setting', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 404
def test_alertemail_setting_filter_foreign_attributes(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'alertemail_setting': {
'random_attribute_not_valid': 'tag',
'admin_login_logs': 'enable',
'alert_interval': '4',
'amc_interface_bypass_mode': 'enable',
'antivirus_logs': 'enable',
'configuration_changes_logs': 'enable',
'critical_interval': '8',
'debug_interval': '9',
'email_interval': '10',
'emergency_interval': '11',
'error_interval': '12',
'FDS_license_expiring_days': '13',
'FDS_license_expiring_warning': 'enable',
'FDS_update_logs': 'enable',
'filter_mode': 'category',
'FIPS_CC_errors': 'enable',
'firewall_authentication_failure_logs': 'enable',
'fortiguard_log_quota_warning': 'enable',
'FSSO_disconnect_logs': 'enable',
'HA_logs': 'enable',
'information_interval': '22',
'IPS_logs': 'enable',
'IPsec_errors_logs': 'enable',
'local_disk_usage': '25',
'log_disk_usage_warning': 'enable',
'mailto1': 'test_value_27',
'mailto2': 'test_value_28',
'mailto3': 'test_value_29',
'notification_interval': '30',
'PPP_errors_logs': 'enable',
'severity': 'emergency',
'ssh_logs': 'enable',
'sslvpn_authentication_errors_logs': 'enable',
'username': 'test_value_35',
'violation_traffic_logs': 'enable',
'warning_interval': '37',
'webfilter_logs': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_alertemail_setting.fortios_alertemail(input_data, fos_instance)
expected_data = {
'admin-login-logs': 'enable',
'alert-interval': '4',
'amc-interface-bypass-mode': 'enable',
'antivirus-logs': 'enable',
'configuration-changes-logs': 'enable',
'critical-interval': '8',
'debug-interval': '9',
'email-interval': '10',
'emergency-interval': '11',
'error-interval': '12',
'FDS-license-expiring-days': '13',
'FDS-license-expiring-warning': 'enable',
'FDS-update-logs': 'enable',
'filter-mode': 'category',
'FIPS-CC-errors': 'enable',
'firewall-authentication-failure-logs': 'enable',
'fortiguard-log-quota-warning': 'enable',
'FSSO-disconnect-logs': 'enable',
'HA-logs': 'enable',
'information-interval': '22',
'IPS-logs': 'enable',
'IPsec-errors-logs': 'enable',
'local-disk-usage': '25',
'log-disk-usage-warning': 'enable',
'mailto1': 'test_value_27',
'mailto2': 'test_value_28',
'mailto3': 'test_value_29',
'notification-interval': '30',
'PPP-errors-logs': 'enable',
'severity': 'emergency',
'ssh-logs': 'enable',
'sslvpn-authentication-errors-logs': 'enable',
'username': 'test_value_35',
'violation-traffic-logs': 'enable',
'warning-interval': '37',
'webfilter-logs': 'enable'
}
set_method_mock.assert_called_with('alertemail', 'setting', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
| gpl-3.0 |
mchristopher/PokemonGo-DesktopMap | app/pywin/Lib/idlelib/idle_test/test_formatparagraph.py | 7 | 14343 | # Test the functions and main class method of FormatParagraph.py
import unittest
from idlelib import FormatParagraph as fp
from idlelib.EditorWindow import EditorWindow
from Tkinter import Tk, Text
from test.test_support import requires
class Is_Get_Test(unittest.TestCase):
"""Test the is_ and get_ functions"""
test_comment = '# This is a comment'
test_nocomment = 'This is not a comment'
trailingws_comment = '# This is a comment '
leadingws_comment = ' # This is a comment'
leadingws_nocomment = ' This is not a comment'
def test_is_all_white(self):
self.assertTrue(fp.is_all_white(''))
self.assertTrue(fp.is_all_white('\t\n\r\f\v'))
self.assertFalse(fp.is_all_white(self.test_comment))
def test_get_indent(self):
Equal = self.assertEqual
Equal(fp.get_indent(self.test_comment), '')
Equal(fp.get_indent(self.trailingws_comment), '')
Equal(fp.get_indent(self.leadingws_comment), ' ')
Equal(fp.get_indent(self.leadingws_nocomment), ' ')
def test_get_comment_header(self):
Equal = self.assertEqual
# Test comment strings
Equal(fp.get_comment_header(self.test_comment), '#')
Equal(fp.get_comment_header(self.trailingws_comment), '#')
Equal(fp.get_comment_header(self.leadingws_comment), ' #')
# Test non-comment strings
Equal(fp.get_comment_header(self.leadingws_nocomment), ' ')
Equal(fp.get_comment_header(self.test_nocomment), '')
class FindTest(unittest.TestCase):
"""Test the find_paragraph function in FormatParagraph.
Using the runcase() function, find_paragraph() is called with 'mark' set at
multiple indexes before and inside the test paragraph.
It appears that code with the same indentation as a quoted string is grouped
as part of the same paragraph, which is probably incorrect behavior.
"""
@classmethod
def setUpClass(cls):
from idlelib.idle_test.mock_tk import Text
cls.text = Text()
def runcase(self, inserttext, stopline, expected):
# Check that find_paragraph returns the expected paragraph when
# the mark index is set to beginning, middle, end of each line
# up to but not including the stop line
text = self.text
text.insert('1.0', inserttext)
for line in range(1, stopline):
linelength = int(text.index("%d.end" % line).split('.')[1])
for col in (0, linelength//2, linelength):
tempindex = "%d.%d" % (line, col)
self.assertEqual(fp.find_paragraph(text, tempindex), expected)
text.delete('1.0', 'end')
def test_find_comment(self):
comment = (
"# Comment block with no blank lines before\n"
"# Comment line\n"
"\n")
self.runcase(comment, 3, ('1.0', '3.0', '#', comment[0:58]))
comment = (
"\n"
"# Comment block with whitespace line before and after\n"
"# Comment line\n"
"\n")
self.runcase(comment, 4, ('2.0', '4.0', '#', comment[1:70]))
comment = (
"\n"
" # Indented comment block with whitespace before and after\n"
" # Comment line\n"
"\n")
self.runcase(comment, 4, ('2.0', '4.0', ' #', comment[1:82]))
comment = (
"\n"
"# Single line comment\n"
"\n")
self.runcase(comment, 3, ('2.0', '3.0', '#', comment[1:23]))
comment = (
"\n"
" # Single line comment with leading whitespace\n"
"\n")
self.runcase(comment, 3, ('2.0', '3.0', ' #', comment[1:51]))
comment = (
"\n"
"# Comment immediately followed by code\n"
"x = 42\n"
"\n")
self.runcase(comment, 3, ('2.0', '3.0', '#', comment[1:40]))
comment = (
"\n"
" # Indented comment immediately followed by code\n"
"x = 42\n"
"\n")
self.runcase(comment, 3, ('2.0', '3.0', ' #', comment[1:53]))
comment = (
"\n"
"# Comment immediately followed by indented code\n"
" x = 42\n"
"\n")
self.runcase(comment, 3, ('2.0', '3.0', '#', comment[1:49]))
def test_find_paragraph(self):
teststring = (
'"""String with no blank lines before\n'
'String line\n'
'"""\n'
'\n')
self.runcase(teststring, 4, ('1.0', '4.0', '', teststring[0:53]))
teststring = (
"\n"
'"""String with whitespace line before and after\n'
'String line.\n'
'"""\n'
'\n')
self.runcase(teststring, 5, ('2.0', '5.0', '', teststring[1:66]))
teststring = (
'\n'
' """Indented string with whitespace before and after\n'
' Comment string.\n'
' """\n'
'\n')
self.runcase(teststring, 5, ('2.0', '5.0', ' ', teststring[1:85]))
teststring = (
'\n'
'"""Single line string."""\n'
'\n')
self.runcase(teststring, 3, ('2.0', '3.0', '', teststring[1:27]))
teststring = (
'\n'
' """Single line string with leading whitespace."""\n'
'\n')
self.runcase(teststring, 3, ('2.0', '3.0', ' ', teststring[1:55]))
class ReformatFunctionTest(unittest.TestCase):
"""Test the reformat_paragraph function without the editor window."""
def test_reformat_paragrah(self):
Equal = self.assertEqual
reform = fp.reformat_paragraph
hw = "O hello world"
Equal(reform(' ', 1), ' ')
Equal(reform("Hello world", 20), "Hello world")
# Test without leading newline
Equal(reform(hw, 1), "O\nhello\nworld")
Equal(reform(hw, 6), "O\nhello\nworld")
Equal(reform(hw, 7), "O hello\nworld")
Equal(reform(hw, 12), "O hello\nworld")
Equal(reform(hw, 13), "O hello world")
# Test with leading newline
hw = "\nO hello world"
Equal(reform(hw, 1), "\nO\nhello\nworld")
Equal(reform(hw, 6), "\nO\nhello\nworld")
Equal(reform(hw, 7), "\nO hello\nworld")
Equal(reform(hw, 12), "\nO hello\nworld")
Equal(reform(hw, 13), "\nO hello world")
class ReformatCommentTest(unittest.TestCase):
"""Test the reformat_comment function without the editor window."""
def test_reformat_comment(self):
Equal = self.assertEqual
# reformat_comment formats to a minimum of 20 characters
test_string = (
" \"\"\"this is a test of a reformat for a triple quoted string"
" will it reformat to less than 70 characters for me?\"\"\"")
result = fp.reformat_comment(test_string, 70, " ")
expected = (
" \"\"\"this is a test of a reformat for a triple quoted string will it\n"
" reformat to less than 70 characters for me?\"\"\"")
Equal(result, expected)
test_comment = (
"# this is a test of a reformat for a triple quoted string will "
"it reformat to less than 70 characters for me?")
result = fp.reformat_comment(test_comment, 70, "#")
expected = (
"# this is a test of a reformat for a triple quoted string will it\n"
"# reformat to less than 70 characters for me?")
Equal(result, expected)
class FormatClassTest(unittest.TestCase):
def test_init_close(self):
instance = fp.FormatParagraph('editor')
self.assertEqual(instance.editwin, 'editor')
instance.close()
self.assertEqual(instance.editwin, None)
# For testing format_paragraph_event, Initialize FormatParagraph with
# a mock Editor with .text and .get_selection_indices. The text must
# be a Text wrapper that adds two methods
# A real EditorWindow creates unneeded, time-consuming baggage and
# sometimes emits shutdown warnings like this:
# "warning: callback failed in WindowList <class '_tkinter.TclError'>
# : invalid command name ".55131368.windows".
# Calling EditorWindow._close in tearDownClass prevents this but causes
# other problems (windows left open).
class TextWrapper:
def __init__(self, master):
self.text = Text(master=master)
def __getattr__(self, name):
return getattr(self.text, name)
def undo_block_start(self): pass
def undo_block_stop(self): pass
class Editor:
def __init__(self, root):
self.text = TextWrapper(root)
get_selection_indices = EditorWindow. get_selection_indices.im_func
class FormatEventTest(unittest.TestCase):
"""Test the formatting of text inside a Text widget.
This is done with FormatParagraph.format.paragraph_event,
which calls functions in the module as appropriate.
"""
test_string = (
" '''this is a test of a reformat for a triple "
"quoted string will it reformat to less than 70 "
"characters for me?'''\n")
multiline_test_string = (
" '''The first line is under the max width.\n"
" The second line's length is way over the max width. It goes "
"on and on until it is over 100 characters long.\n"
" Same thing with the third line. It is also way over the max "
"width, but FormatParagraph will fix it.\n"
" '''\n")
multiline_test_comment = (
"# The first line is under the max width.\n"
"# The second line's length is way over the max width. It goes on "
"and on until it is over 100 characters long.\n"
"# Same thing with the third line. It is also way over the max "
"width, but FormatParagraph will fix it.\n"
"# The fourth line is short like the first line.")
@classmethod
def setUpClass(cls):
requires('gui')
cls.root = Tk()
editor = Editor(root=cls.root)
cls.text = editor.text.text # Test code does not need the wrapper.
cls.formatter = fp.FormatParagraph(editor).format_paragraph_event
# Sets the insert mark just after the re-wrapped and inserted text.
@classmethod
def tearDownClass(cls):
del cls.text, cls.formatter
cls.root.destroy()
del cls.root
def test_short_line(self):
self.text.insert('1.0', "Short line\n")
self.formatter("Dummy")
self.assertEqual(self.text.get('1.0', 'insert'), "Short line\n" )
self.text.delete('1.0', 'end')
def test_long_line(self):
text = self.text
# Set cursor ('insert' mark) to '1.0', within text.
text.insert('1.0', self.test_string)
text.mark_set('insert', '1.0')
self.formatter('ParameterDoesNothing', limit=70)
result = text.get('1.0', 'insert')
# find function includes \n
expected = (
" '''this is a test of a reformat for a triple quoted string will it\n"
" reformat to less than 70 characters for me?'''\n") # yes
self.assertEqual(result, expected)
text.delete('1.0', 'end')
# Select from 1.11 to line end.
text.insert('1.0', self.test_string)
text.tag_add('sel', '1.11', '1.end')
self.formatter('ParameterDoesNothing', limit=70)
result = text.get('1.0', 'insert')
# selection excludes \n
expected = (
" '''this is a test of a reformat for a triple quoted string will it reformat\n"
" to less than 70 characters for me?'''") # no
self.assertEqual(result, expected)
text.delete('1.0', 'end')
def test_multiple_lines(self):
text = self.text
# Select 2 long lines.
text.insert('1.0', self.multiline_test_string)
text.tag_add('sel', '2.0', '4.0')
self.formatter('ParameterDoesNothing', limit=70)
result = text.get('2.0', 'insert')
expected = (
" The second line's length is way over the max width. It goes on and\n"
" on until it is over 100 characters long. Same thing with the third\n"
" line. It is also way over the max width, but FormatParagraph will\n"
" fix it.\n")
self.assertEqual(result, expected)
text.delete('1.0', 'end')
def test_comment_block(self):
text = self.text
# Set cursor ('insert') to '1.0', within block.
text.insert('1.0', self.multiline_test_comment)
self.formatter('ParameterDoesNothing', limit=70)
result = text.get('1.0', 'insert')
expected = (
"# The first line is under the max width. The second line's length is\n"
"# way over the max width. It goes on and on until it is over 100\n"
"# characters long. Same thing with the third line. It is also way over\n"
"# the max width, but FormatParagraph will fix it. The fourth line is\n"
"# short like the first line.\n")
self.assertEqual(result, expected)
text.delete('1.0', 'end')
# Select line 2, verify line 1 unaffected.
text.insert('1.0', self.multiline_test_comment)
text.tag_add('sel', '2.0', '3.0')
self.formatter('ParameterDoesNothing', limit=70)
result = text.get('1.0', 'insert')
expected = (
"# The first line is under the max width.\n"
"# The second line's length is way over the max width. It goes on and\n"
"# on until it is over 100 characters long.\n")
self.assertEqual(result, expected)
text.delete('1.0', 'end')
# The following block worked with EditorWindow but fails with the mock.
# Lines 2 and 3 get pasted together even though the previous block left
# the previous line alone. More investigation is needed.
## # Select lines 3 and 4
## text.insert('1.0', self.multiline_test_comment)
## text.tag_add('sel', '3.0', '5.0')
## self.formatter('ParameterDoesNothing')
## result = text.get('3.0', 'insert')
## expected = (
##"# Same thing with the third line. It is also way over the max width,\n"
##"# but FormatParagraph will fix it. The fourth line is short like the\n"
##"# first line.\n")
## self.assertEqual(result, expected)
## text.delete('1.0', 'end')
if __name__ == '__main__':
unittest.main(verbosity=2, exit=2)
| mit |
mfalaize/homelab | carnet_auto/urls.py | 2 | 1618 | from django.conf.urls import url
from carnet_auto import views
urlpatterns = [
url(r'^$', views.Home.as_view(), name='home'),
url(r'^ajout_voiture/$', views.AjoutVoiture.as_view(), name='ajout_voiture'),
url(r'^edite_voiture/(?P<pk>[0-9]+)/$', views.EditeVoiture.as_view(), name='edite_voiture'),
url(r'^supprime_voiture/(?P<pk>[0-9]+)/$', views.SupprimeVoiture.as_view(), name='supprime_voiture'),
url(r'^voiture/(?P<pk>[0-9]+)/$', views.ManageVoiture.as_view(), name='voiture'),
url(r'^voiture/(?P<pk>[0-9]+)/programme/$', views.ManageProgrammeMaintenance.as_view(), name='programme'),
url(r'^voiture/(?P<pk>[0-9]+)/ajout_programme/$', views.AjoutProgrammeMaintenance.as_view(),
name='ajout_programme'),
url(r'^voiture/(?P<pk>[0-9]+)/edite_programme/(?P<pk_programme>[0-9]+)$', views.EditeProgrammeMaintenance.as_view(),
name='edite_programme'),
url(r'^voiture/(?P<pk>[0-9]+)/supprime_programme/(?P<pk_programme>[0-9]+)$',
views.SupprimeProgrammeMaintenance.as_view(), name='supprime_programme'),
url(r'^voiture/(?P<pk>[0-9]+)/ajout_revision/$', views.AjoutRevision.as_view(),
name='ajout_revision'),
url(r'^voiture/(?P<pk>[0-9]+)/edite_revision/(?P<pk_revision>[0-9]+)$', views.EditeRevision.as_view(),
name='edite_revision'),
url(r'^voiture/(?P<pk>[0-9]+)/supprime_revision/(?P<pk_revision>[0-9]+)$',
views.SupprimeRevision.as_view(), name='supprime_revision'),
url(r'^voiture/(?P<pk>[0-9]+)/supprime_operation/(?P<pk_operation>[0-9]+)$',
views.SupprimeOperation.as_view(), name='supprime_operation'),
]
| gpl-3.0 |
twobob/buildroot-kindle | output/build/host-python-2.7.2/Demo/tix/samples/BtnBox.py | 9 | 1565 | # -*-mode: python; fill-column: 75; tab-width: 8; coding: iso-latin-1-unix -*-
#
# $Id$
#
# Tix Demostration Program
#
# This sample program is structured in such a way so that it can be
# executed from the Tix demo program "tixwidgets.py": it must have a
# procedure called "RunSample". It should also have the "if" statment
# at the end of this file so that it can be run as a standalone
# program.
# This file demonstrates the use of the tixButtonBox widget, which is a
# group of TK buttons. You can use it to manage the buttons in a dialog box,
# for example.
#
import Tix
def RunSample(w):
# Create the label on the top of the dialog box
#
top = Tix.Label(w, padx=20, pady=10, bd=1, relief=Tix.RAISED,
anchor=Tix.CENTER, text='This dialog box is\n a demonstration of the\n tixButtonBox widget')
# Create the button box and add a few buttons in it. Set the
# -width of all the buttons to the same value so that they
# appear in the same size.
#
# Note that the -text, -underline, -command and -width options are all
# standard options of the button widgets.
#
box = Tix.ButtonBox(w, orientation=Tix.HORIZONTAL)
box.add('ok', text='OK', underline=0, width=5,
command=lambda w=w: w.destroy())
box.add('close', text='Cancel', underline=0, width=5,
command=lambda w=w: w.destroy())
box.pack(side=Tix.BOTTOM, fill=Tix.X)
top.pack(side=Tix.TOP, fill=Tix.BOTH, expand=1)
if __name__ == '__main__':
root = Tix.Tk()
RunSample(root)
root.mainloop()
| gpl-2.0 |
jaidevd/scikit-learn | sklearn/preprocessing/data.py | 5 | 68210 | # Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Eric Martin <eric@ericmart.in>
# Giorgio Patrini <giorgio.patrini@anu.edu.au>
# License: BSD 3 clause
from itertools import chain, combinations
import numbers
import warnings
from itertools import combinations_with_replacement as combinations_w_r
import numpy as np
from scipy import sparse
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..utils import check_array
from ..utils.extmath import row_norms
from ..utils.extmath import _incremental_mean_and_var
from ..utils.fixes import bincount
from ..utils.sparsefuncs_fast import (inplace_csr_row_normalize_l1,
inplace_csr_row_normalize_l2)
from ..utils.sparsefuncs import (inplace_column_scale,
mean_variance_axis, incr_mean_variance_axis,
min_max_axis)
from ..utils.validation import check_is_fitted, FLOAT_DTYPES
zip = six.moves.zip
map = six.moves.map
range = six.moves.range
__all__ = [
'Binarizer',
'KernelCenterer',
'MinMaxScaler',
'MaxAbsScaler',
'Normalizer',
'OneHotEncoder',
'RobustScaler',
'StandardScaler',
'add_dummy_feature',
'binarize',
'normalize',
'scale',
'robust_scale',
'maxabs_scale',
'minmax_scale',
]
def _handle_zeros_in_scale(scale, copy=True):
''' Makes sure that whenever scale is zero, we handle it correctly.
This happens in most scalers when we have constant features.'''
# if we are fitting on 1D arrays, scale might be a scalar
if np.isscalar(scale):
if scale == .0:
scale = 1.
return scale
elif isinstance(scale, np.ndarray):
if copy:
# New array to avoid side-effects
scale = scale.copy()
scale[scale == 0.0] = 1.0
return scale
def scale(X, axis=0, with_mean=True, with_std=True, copy=True):
"""Standardize a dataset along any axis
Center to the mean and component wise scale to unit variance.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : {array-like, sparse matrix}
The data to center and scale.
axis : int (0 by default)
axis used to compute the means and standard deviations along. If 0,
independently standardize each feature, otherwise (if 1) standardize
each sample.
with_mean : boolean, True by default
If True, center the data before scaling.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSC matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_mean=False` (in that case, only variance scaling will be
performed on the features of the CSC matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSC matrix.
See also
--------
StandardScaler: Performs scaling to unit variance using the``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
""" # noqa
X = check_array(X, accept_sparse='csc', copy=copy, ensure_2d=False,
warn_on_dtype=True, estimator='the scale function',
dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` instead"
" See docstring for motivation and alternatives.")
if axis != 0:
raise ValueError("Can only scale sparse matrix on axis=0, "
" got axis=%d" % axis)
if with_std:
_, var = mean_variance_axis(X, axis=0)
var = _handle_zeros_in_scale(var, copy=False)
inplace_column_scale(X, 1 / np.sqrt(var))
else:
X = np.asarray(X)
if with_mean:
mean_ = np.mean(X, axis)
if with_std:
scale_ = np.std(X, axis)
# Xr is a view on the original array that enables easy use of
# broadcasting on the axis in which we are interested in
Xr = np.rollaxis(X, axis)
if with_mean:
Xr -= mean_
mean_1 = Xr.mean(axis=0)
# Verify that mean_1 is 'close to zero'. If X contains very
# large values, mean_1 can also be very large, due to a lack of
# precision of mean_. In this case, a pre-scaling of the
# concerned feature is efficient, for instance by its mean or
# maximum.
if not np.allclose(mean_1, 0):
warnings.warn("Numerical issues were encountered "
"when centering the data "
"and might not be solved. Dataset may "
"contain too large values. You may need "
"to prescale your features.")
Xr -= mean_1
if with_std:
scale_ = _handle_zeros_in_scale(scale_, copy=False)
Xr /= scale_
if with_mean:
mean_2 = Xr.mean(axis=0)
# If mean_2 is not 'close to zero', it comes from the fact that
# scale_ is very small so that mean_2 = mean_1/scale_ > 0, even
# if mean_1 was close to zero. The problem is thus essentially
# due to the lack of precision of mean_. A solution is then to
# subtract the mean again:
if not np.allclose(mean_2, 0):
warnings.warn("Numerical issues were encountered "
"when scaling the data "
"and might not be solved. The standard "
"deviation of the data is probably "
"very close to 0. ")
Xr -= mean_2
return X
class MinMaxScaler(BaseEstimator, TransformerMixin):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
feature_range : tuple (min, max), default=(0, 1)
Desired range of transformed data.
copy : boolean, optional, default True
Set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array).
Attributes
----------
min_ : ndarray, shape (n_features,)
Per feature adjustment for minimum.
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
.. versionadded:: 0.17
*scale_* attribute.
data_min_ : ndarray, shape (n_features,)
Per feature minimum seen in the data
.. versionadded:: 0.17
*data_min_*
data_max_ : ndarray, shape (n_features,)
Per feature maximum seen in the data
.. versionadded:: 0.17
*data_max_*
data_range_ : ndarray, shape (n_features,)
Per feature range ``(data_max_ - data_min_)`` seen in the data
.. versionadded:: 0.17
*data_range_*
See also
--------
minmax_scale: Equivalent function without the object oriented API.
"""
def __init__(self, feature_range=(0, 1), copy=True):
self.feature_range = feature_range
self.copy = copy
def _reset(self):
"""Reset internal data-dependent state of the scaler, if necessary.
__init__ parameters are not touched.
"""
# Checking one attribute is enough, becase they are all set together
# in partial_fit
if hasattr(self, 'scale_'):
del self.scale_
del self.min_
del self.n_samples_seen_
del self.data_min_
del self.data_max_
del self.data_range_
def fit(self, X, y=None):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
# Reset internal state before fitting
self._reset()
return self.partial_fit(X, y)
def partial_fit(self, X, y=None):
"""Online computation of min and max on X for later scaling.
All of X is processed as a single batch. This is intended for cases
when `fit` is not feasible due to very large number of `n_samples`
or because X is read from a continuous stream.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y : Passthrough for ``Pipeline`` compatibility.
"""
feature_range = self.feature_range
if feature_range[0] >= feature_range[1]:
raise ValueError("Minimum of desired feature range must be smaller"
" than maximum. Got %s." % str(feature_range))
if sparse.issparse(X):
raise TypeError("MinMaxScaler does no support sparse input. "
"You may consider to use MaxAbsScaler instead.")
X = check_array(X, copy=self.copy, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
data_min = np.min(X, axis=0)
data_max = np.max(X, axis=0)
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.n_samples_seen_ = X.shape[0]
# Next steps
else:
data_min = np.minimum(self.data_min_, data_min)
data_max = np.maximum(self.data_max_, data_max)
self.n_samples_seen_ += X.shape[0]
data_range = data_max - data_min
self.scale_ = ((feature_range[1] - feature_range[0]) /
_handle_zeros_in_scale(data_range))
self.min_ = feature_range[0] - data_min * self.scale_
self.data_min_ = data_min
self.data_max_ = data_max
self.data_range_ = data_range
return self
def transform(self, X):
"""Scaling features of X according to feature_range.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Input data that will be transformed.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, dtype=FLOAT_DTYPES)
X *= self.scale_
X += self.min_
return X
def inverse_transform(self, X):
"""Undo the scaling of X according to feature_range.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Input data that will be transformed. It cannot be sparse.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, dtype=FLOAT_DTYPES)
X -= self.min_
X /= self.scale_
return X
def minmax_scale(X, feature_range=(0, 1), axis=0, copy=True):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
.. versionadded:: 0.17
*minmax_scale* function interface
to :class:`sklearn.preprocessing.MinMaxScaler`.
Parameters
----------
feature_range : tuple (min, max), default=(0, 1)
Desired range of transformed data.
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
See also
--------
MinMaxScaler: Performs scaling to a given range using the``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
""" # noqa
# Unlike the scaler object, this function allows 1d input.
# If copy is required, it will be done inside the scaler object.
X = check_array(X, copy=False, ensure_2d=False, warn_on_dtype=True,
dtype=FLOAT_DTYPES)
original_ndim = X.ndim
if original_ndim == 1:
X = X.reshape(X.shape[0], 1)
s = MinMaxScaler(feature_range=feature_range, copy=copy)
if axis == 0:
X = s.fit_transform(X)
else:
X = s.fit_transform(X.T).T
if original_ndim == 1:
X = X.ravel()
return X
class StandardScaler(BaseEstimator, TransformerMixin):
"""Standardize features by removing the mean and scaling to unit variance
Centering and scaling happen independently on each feature by computing
the relevant statistics on the samples in the training set. Mean and
standard deviation are then stored to be used on later data using the
`transform` method.
Standardization of a dataset is a common requirement for many
machine learning estimators: they might behave badly if the
individual feature do not more or less look like standard normally
distributed data (e.g. Gaussian with 0 mean and unit variance).
For instance many elements used in the objective function of
a learning algorithm (such as the RBF kernel of Support Vector
Machines or the L1 and L2 regularizers of linear models) assume that
all features are centered around 0 and have variance in the same
order. If a feature has a variance that is orders of magnitude larger
that others, it might dominate the objective function and make the
estimator unable to learn from other features correctly as expected.
This scaler can also be applied to sparse CSR or CSC matrices by passing
`with_mean=False` to avoid breaking the sparsity structure of the data.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_mean : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
.. versionadded:: 0.17
*scale_*
mean_ : array of floats with shape [n_features]
The mean value for each feature in the training set.
var_ : array of floats with shape [n_features]
The variance for each feature in the training set. Used to compute
`scale_`
n_samples_seen_ : int
The number of samples processed by the estimator. Will be reset on
new calls to fit, but increments across ``partial_fit`` calls.
See also
--------
scale: Equivalent function without the object oriented API.
:class:`sklearn.decomposition.PCA`
Further removes the linear correlation across features with 'whiten=True'.
""" # noqa
def __init__(self, copy=True, with_mean=True, with_std=True):
self.with_mean = with_mean
self.with_std = with_std
self.copy = copy
def _reset(self):
"""Reset internal data-dependent state of the scaler, if necessary.
__init__ parameters are not touched.
"""
# Checking one attribute is enough, becase they are all set together
# in partial_fit
if hasattr(self, 'scale_'):
del self.scale_
del self.n_samples_seen_
del self.mean_
del self.var_
def fit(self, X, y=None):
"""Compute the mean and std to be used for later scaling.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y : Passthrough for ``Pipeline`` compatibility.
"""
# Reset internal state before fitting
self._reset()
return self.partial_fit(X, y)
def partial_fit(self, X, y=None):
"""Online computation of mean and std on X for later scaling.
All of X is processed as a single batch. This is intended for cases
when `fit` is not feasible due to very large number of `n_samples`
or because X is read from a continuous stream.
The algorithm for incremental mean and std is given in Equation 1.5a,b
in Chan, Tony F., Gene H. Golub, and Randall J. LeVeque. "Algorithms
for computing the sample variance: Analysis and recommendations."
The American Statistician 37.3 (1983): 242-247:
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y : Passthrough for ``Pipeline`` compatibility.
"""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
warn_on_dtype=True, estimator=self, dtype=FLOAT_DTYPES)
# Even in the case of `with_mean=False`, we update the mean anyway
# This is needed for the incremental computation of the var
# See incr_mean_variance_axis and _incremental_mean_variance_axis
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
if self.with_std:
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.mean_, self.var_ = mean_variance_axis(X, axis=0)
self.n_samples_seen_ = X.shape[0]
# Next passes
else:
self.mean_, self.var_, self.n_samples_seen_ = \
incr_mean_variance_axis(X, axis=0,
last_mean=self.mean_,
last_var=self.var_,
last_n=self.n_samples_seen_)
else:
self.mean_ = None
self.var_ = None
else:
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.mean_ = .0
self.n_samples_seen_ = 0
if self.with_std:
self.var_ = .0
else:
self.var_ = None
self.mean_, self.var_, self.n_samples_seen_ = \
_incremental_mean_and_var(X, self.mean_, self.var_,
self.n_samples_seen_)
if self.with_std:
self.scale_ = _handle_zeros_in_scale(np.sqrt(self.var_))
else:
self.scale_ = None
return self
def transform(self, X, y=None, copy=None):
"""Perform standardization by centering and scaling
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'scale_')
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr', copy=copy, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
if self.scale_ is not None:
inplace_column_scale(X, 1 / self.scale_)
else:
if self.with_mean:
X -= self.mean_
if self.with_std:
X /= self.scale_
return X
def inverse_transform(self, X, copy=None):
"""Scale back the data to the original representation
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'scale_')
copy = copy if copy is not None else self.copy
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot uncenter sparse matrices: pass `with_mean=False` "
"instead See docstring for motivation and alternatives.")
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
if self.scale_ is not None:
inplace_column_scale(X, self.scale_)
else:
X = np.asarray(X)
if copy:
X = X.copy()
if self.with_std:
X *= self.scale_
if self.with_mean:
X += self.mean_
return X
class MaxAbsScaler(BaseEstimator, TransformerMixin):
"""Scale each feature by its maximum absolute value.
This estimator scales and translates each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0. It does not shift/center the data, and
thus does not destroy any sparsity.
This scaler can also be applied to sparse CSR or CSC matrices.
.. versionadded:: 0.17
Parameters
----------
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
Attributes
----------
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
.. versionadded:: 0.17
*scale_* attribute.
max_abs_ : ndarray, shape (n_features,)
Per feature maximum absolute value.
n_samples_seen_ : int
The number of samples processed by the estimator. Will be reset on
new calls to fit, but increments across ``partial_fit`` calls.
See also
--------
maxabs_scale: Equivalent function without the object oriented API.
"""
def __init__(self, copy=True):
self.copy = copy
def _reset(self):
"""Reset internal data-dependent state of the scaler, if necessary.
__init__ parameters are not touched.
"""
# Checking one attribute is enough, becase they are all set together
# in partial_fit
if hasattr(self, 'scale_'):
del self.scale_
del self.n_samples_seen_
del self.max_abs_
def fit(self, X, y=None):
"""Compute the maximum absolute value to be used for later scaling.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
# Reset internal state before fitting
self._reset()
return self.partial_fit(X, y)
def partial_fit(self, X, y=None):
"""Online computation of max absolute value of X for later scaling.
All of X is processed as a single batch. This is intended for cases
when `fit` is not feasible due to very large number of `n_samples`
or because X is read from a continuous stream.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y : Passthrough for ``Pipeline`` compatibility.
"""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
mins, maxs = min_max_axis(X, axis=0)
max_abs = np.maximum(np.abs(mins), np.abs(maxs))
else:
max_abs = np.abs(X).max(axis=0)
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.n_samples_seen_ = X.shape[0]
# Next passes
else:
max_abs = np.maximum(self.max_abs_, max_abs)
self.n_samples_seen_ += X.shape[0]
self.max_abs_ = max_abs
self.scale_ = _handle_zeros_in_scale(max_abs)
return self
def transform(self, X, y=None):
"""Scale the data
Parameters
----------
X : {array-like, sparse matrix}
The data that should be scaled.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
inplace_column_scale(X, 1.0 / self.scale_)
else:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : {array-like, sparse matrix}
The data that should be transformed back.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
inplace_column_scale(X, self.scale_)
else:
X *= self.scale_
return X
def maxabs_scale(X, axis=0, copy=True):
"""Scale each feature to the [-1, 1] range without breaking the sparsity.
This estimator scales each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0.
This scaler can also be applied to sparse CSR or CSC matrices.
Parameters
----------
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
See also
--------
MaxAbsScaler: Performs scaling to the [-1, 1] range using the``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
""" # noqa
# Unlike the scaler object, this function allows 1d input.
# If copy is required, it will be done inside the scaler object.
X = check_array(X, accept_sparse=('csr', 'csc'), copy=False,
ensure_2d=False, dtype=FLOAT_DTYPES)
original_ndim = X.ndim
if original_ndim == 1:
X = X.reshape(X.shape[0], 1)
s = MaxAbsScaler(copy=copy)
if axis == 0:
X = s.fit_transform(X)
else:
X = s.fit_transform(X.T).T
if original_ndim == 1:
X = X.ravel()
return X
class RobustScaler(BaseEstimator, TransformerMixin):
"""Scale features using statistics that are robust to outliers.
This Scaler removes the median and scales the data according to
the quantile range (defaults to IQR: Interquartile Range).
The IQR is the range between the 1st quartile (25th quantile)
and the 3rd quartile (75th quantile).
Centering and scaling happen independently on each feature (or each
sample, depending on the `axis` argument) by computing the relevant
statistics on the samples in the training set. Median and interquartile
range are then stored to be used on later data using the `transform`
method.
Standardization of a dataset is a common requirement for many
machine learning estimators. Typically this is done by removing the mean
and scaling to unit variance. However, outliers can often influence the
sample mean / variance in a negative way. In such cases, the median and
the interquartile range often give better results.
.. versionadded:: 0.17
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_centering : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_scaling : boolean, True by default
If True, scale the data to interquartile range.
quantile_range : tuple (q_min, q_max), 0.0 < q_min < q_max < 100.0
Default: (25.0, 75.0) = (1st quantile, 3rd quantile) = IQR
Quantile range used to calculate ``scale_``.
.. versionadded:: 0.18
copy : boolean, optional, default is True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
center_ : array of floats
The median value for each feature in the training set.
scale_ : array of floats
The (scaled) interquartile range for each feature in the training set.
.. versionadded:: 0.17
*scale_* attribute.
See also
--------
robust_scale: Equivalent function without the object oriented API.
:class:`sklearn.decomposition.PCA`
Further removes the linear correlation across features with
'whiten=True'.
Notes
-----
See examples/preprocessing/plot_robust_scaling.py for an example.
https://en.wikipedia.org/wiki/Median_(statistics)
https://en.wikipedia.org/wiki/Interquartile_range
"""
def __init__(self, with_centering=True, with_scaling=True,
quantile_range=(25.0, 75.0), copy=True):
self.with_centering = with_centering
self.with_scaling = with_scaling
self.quantile_range = quantile_range
self.copy = copy
def _check_array(self, X, copy):
"""Makes sure centering is not enabled for sparse matrices."""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if self.with_centering:
raise ValueError(
"Cannot center sparse matrices: use `with_centering=False`"
" instead. See docstring for motivation and alternatives.")
return X
def fit(self, X, y=None):
"""Compute the median and quantiles to be used for scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the median and quantiles
used for later scaling along the features axis.
"""
if sparse.issparse(X):
raise TypeError("RobustScaler cannot be fitted on sparse inputs")
X = self._check_array(X, self.copy)
if self.with_centering:
self.center_ = np.median(X, axis=0)
if self.with_scaling:
q_min, q_max = self.quantile_range
if not 0 <= q_min <= q_max <= 100:
raise ValueError("Invalid quantile range: %s" %
str(self.quantile_range))
q = np.percentile(X, self.quantile_range, axis=0)
self.scale_ = (q[1] - q[0])
self.scale_ = _handle_zeros_in_scale(self.scale_, copy=False)
return self
def transform(self, X, y=None):
"""Center and scale the data
Parameters
----------
X : array-like
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if sparse.issparse(X):
if self.with_scaling:
inplace_column_scale(X, 1.0 / self.scale_)
else:
if self.with_centering:
X -= self.center_
if self.with_scaling:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : array-like
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if sparse.issparse(X):
if self.with_scaling:
inplace_column_scale(X, self.scale_)
else:
if self.with_scaling:
X *= self.scale_
if self.with_centering:
X += self.center_
return X
def robust_scale(X, axis=0, with_centering=True, with_scaling=True,
quantile_range=(25.0, 75.0), copy=True):
"""Standardize a dataset along any axis
Center to the median and component wise scale
according to the interquartile range.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : array-like
The data to center and scale.
axis : int (0 by default)
axis used to compute the medians and IQR along. If 0,
independently scale each feature, otherwise (if 1) scale
each sample.
with_centering : boolean, True by default
If True, center the data before scaling.
with_scaling : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
quantile_range : tuple (q_min, q_max), 0.0 < q_min < q_max < 100.0
Default: (25.0, 75.0) = (1st quantile, 3rd quantile) = IQR
Quantile range used to calculate ``scale_``.
.. versionadded:: 0.18
copy : boolean, optional, default is True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_centering=False` (in that case, only variance scaling will be
performed on the features of the CSR matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSR matrix.
See also
--------
RobustScaler: Performs centering and scaling using the ``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
"""
s = RobustScaler(with_centering=with_centering, with_scaling=with_scaling,
quantile_range=quantile_range, copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class PolynomialFeatures(BaseEstimator, TransformerMixin):
"""Generate polynomial and interaction features.
Generate a new feature matrix consisting of all polynomial combinations
of the features with degree less than or equal to the specified degree.
For example, if an input sample is two dimensional and of the form
[a, b], the degree-2 polynomial features are [1, a, b, a^2, ab, b^2].
Parameters
----------
degree : integer
The degree of the polynomial features. Default = 2.
interaction_only : boolean, default = False
If true, only interaction features are produced: features that are
products of at most ``degree`` *distinct* input features (so not
``x[1] ** 2``, ``x[0] * x[2] ** 3``, etc.).
include_bias : boolean
If True (default), then include a bias column, the feature in which
all polynomial powers are zero (i.e. a column of ones - acts as an
intercept term in a linear model).
Examples
--------
>>> X = np.arange(6).reshape(3, 2)
>>> X
array([[0, 1],
[2, 3],
[4, 5]])
>>> poly = PolynomialFeatures(2)
>>> poly.fit_transform(X)
array([[ 1., 0., 1., 0., 0., 1.],
[ 1., 2., 3., 4., 6., 9.],
[ 1., 4., 5., 16., 20., 25.]])
>>> poly = PolynomialFeatures(interaction_only=True)
>>> poly.fit_transform(X)
array([[ 1., 0., 1., 0.],
[ 1., 2., 3., 6.],
[ 1., 4., 5., 20.]])
Attributes
----------
powers_ : array, shape (n_output_features, n_input_features)
powers_[i, j] is the exponent of the jth input in the ith output.
n_input_features_ : int
The total number of input features.
n_output_features_ : int
The total number of polynomial output features. The number of output
features is computed by iterating over all suitably sized combinations
of input features.
Notes
-----
Be aware that the number of features in the output array scales
polynomially in the number of features of the input array, and
exponentially in the degree. High degrees can cause overfitting.
See :ref:`examples/linear_model/plot_polynomial_interpolation.py
<sphx_glr_auto_examples_linear_model_plot_polynomial_interpolation.py>`
"""
def __init__(self, degree=2, interaction_only=False, include_bias=True):
self.degree = degree
self.interaction_only = interaction_only
self.include_bias = include_bias
@staticmethod
def _combinations(n_features, degree, interaction_only, include_bias):
comb = (combinations if interaction_only else combinations_w_r)
start = int(not include_bias)
return chain.from_iterable(comb(range(n_features), i)
for i in range(start, degree + 1))
@property
def powers_(self):
check_is_fitted(self, 'n_input_features_')
combinations = self._combinations(self.n_input_features_, self.degree,
self.interaction_only,
self.include_bias)
return np.vstack(bincount(c, minlength=self.n_input_features_)
for c in combinations)
def get_feature_names(self, input_features=None):
"""
Return feature names for output features
Parameters
----------
input_features : list of string, length n_features, optional
String names for input features if available. By default,
"x0", "x1", ... "xn_features" is used.
Returns
-------
output_feature_names : list of string, length n_output_features
"""
powers = self.powers_
if input_features is None:
input_features = ['x%d' % i for i in range(powers.shape[1])]
feature_names = []
for row in powers:
inds = np.where(row)[0]
if len(inds):
name = " ".join("%s^%d" % (input_features[ind], exp)
if exp != 1 else input_features[ind]
for ind, exp in zip(inds, row[inds]))
else:
name = "1"
feature_names.append(name)
return feature_names
def fit(self, X, y=None):
"""
Compute number of output features.
"""
n_samples, n_features = check_array(X).shape
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
self.n_input_features_ = n_features
self.n_output_features_ = sum(1 for _ in combinations)
return self
def transform(self, X, y=None):
"""Transform data to polynomial features
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data to transform, row by row.
Returns
-------
XP : np.ndarray shape [n_samples, NP]
The matrix of features, where NP is the number of polynomial
features generated from the combination of inputs.
"""
check_is_fitted(self, ['n_input_features_', 'n_output_features_'])
X = check_array(X, dtype=FLOAT_DTYPES)
n_samples, n_features = X.shape
if n_features != self.n_input_features_:
raise ValueError("X shape does not match training shape")
# allocate output data
XP = np.empty((n_samples, self.n_output_features_), dtype=X.dtype)
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
for i, c in enumerate(combinations):
XP[:, i] = X[:, c].prod(1)
return XP
def normalize(X, norm='l2', axis=1, copy=True, return_norm=False):
"""Scale input vectors individually to unit norm (vector length).
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to normalize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample (or each non-zero
feature if axis is 0).
axis : 0 or 1, optional (1 by default)
axis used to normalize the data along. If 1, independently normalize
each sample, otherwise (if 0) normalize each feature.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
return_norm : boolean, default False
whether to return the computed norms
Returns
-------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
Normalized input X.
norms : array, shape [n_samples] if axis=1 else [n_features]
An array of norms along given axis for X.
When X is sparse, a NotImplementedError will be raised
for norm 'l1' or 'l2'.
See also
--------
Normalizer: Performs normalization using the ``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
"""
if norm not in ('l1', 'l2', 'max'):
raise ValueError("'%s' is not a supported norm" % norm)
if axis == 0:
sparse_format = 'csc'
elif axis == 1:
sparse_format = 'csr'
else:
raise ValueError("'%d' is not a supported axis" % axis)
X = check_array(X, sparse_format, copy=copy, warn_on_dtype=True,
estimator='the normalize function', dtype=FLOAT_DTYPES)
if axis == 0:
X = X.T
if sparse.issparse(X):
if return_norm and norm in ('l1', 'l2'):
raise NotImplementedError("return_norm=True is not implemented "
"for sparse matrices with norm 'l1' "
"or norm 'l2'")
if norm == 'l1':
inplace_csr_row_normalize_l1(X)
elif norm == 'l2':
inplace_csr_row_normalize_l2(X)
elif norm == 'max':
_, norms = min_max_axis(X, 1)
norms_elementwise = norms.repeat(np.diff(X.indptr))
mask = norms_elementwise != 0
X.data[mask] /= norms_elementwise[mask]
else:
if norm == 'l1':
norms = np.abs(X).sum(axis=1)
elif norm == 'l2':
norms = row_norms(X)
elif norm == 'max':
norms = np.max(X, axis=1)
norms = _handle_zeros_in_scale(norms, copy=False)
X /= norms[:, np.newaxis]
if axis == 0:
X = X.T
if return_norm:
return X, norms
else:
return X
class Normalizer(BaseEstimator, TransformerMixin):
"""Normalize samples individually to unit norm.
Each sample (i.e. each row of the data matrix) with at least one
non zero component is rescaled independently of other samples so
that its norm (l1 or l2) equals one.
This transformer is able to work both with dense numpy arrays and
scipy.sparse matrix (use CSR format if you want to avoid the burden of
a copy / conversion).
Scaling inputs to unit norms is a common operation for text
classification or clustering for instance. For instance the dot
product of two l2-normalized TF-IDF vectors is the cosine similarity
of the vectors and is the base similarity metric for the Vector
Space Model commonly used by the Information Retrieval community.
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix).
Notes
-----
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
See also
--------
normalize: Equivalent function without the object oriented API.
"""
def __init__(self, norm='l2', copy=True):
self.norm = norm
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
X = check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Scale each non zero row of X to unit norm
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to normalize, row by row. scipy.sparse matrices should be
in CSR format to avoid an un-necessary copy.
"""
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr')
return normalize(X, norm=self.norm, axis=1, copy=copy)
def binarize(X, threshold=0.0, copy=True):
"""Boolean thresholding of array-like or scipy.sparse matrix
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR or CSC format to avoid an
un-necessary copy.
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy
(if the input is already a numpy array or a scipy.sparse CSR / CSC
matrix and if axis is 1).
See also
--------
Binarizer: Performs binarization using the ``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
"""
X = check_array(X, accept_sparse=['csr', 'csc'], copy=copy)
if sparse.issparse(X):
if threshold < 0:
raise ValueError('Cannot binarize a sparse matrix with threshold '
'< 0')
cond = X.data > threshold
not_cond = np.logical_not(cond)
X.data[cond] = 1
X.data[not_cond] = 0
X.eliminate_zeros()
else:
cond = X > threshold
not_cond = np.logical_not(cond)
X[cond] = 1
X[not_cond] = 0
return X
class Binarizer(BaseEstimator, TransformerMixin):
"""Binarize data (set feature values to 0 or 1) according to a threshold
Values greater than the threshold map to 1, while values less than
or equal to the threshold map to 0. With the default threshold of 0,
only positive values map to 1.
Binarization is a common operation on text count data where the
analyst can decide to only consider the presence or absence of a
feature rather than a quantified number of occurrences for instance.
It can also be used as a pre-processing step for estimators that
consider boolean random variables (e.g. modelled using the Bernoulli
distribution in a Bayesian setting).
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy (if
the input is already a numpy array or a scipy.sparse CSR matrix).
Notes
-----
If the input is a sparse matrix, only the non-zero values are subject
to update by the Binarizer class.
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
See also
--------
binarize: Equivalent function without the object oriented API.
"""
def __init__(self, threshold=0.0, copy=True):
self.threshold = threshold
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Binarize each element of X
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
"""
copy = copy if copy is not None else self.copy
return binarize(X, threshold=self.threshold, copy=copy)
class KernelCenterer(BaseEstimator, TransformerMixin):
"""Center a kernel matrix
Let K(x, z) be a kernel defined by phi(x)^T phi(z), where phi is a
function mapping x to a Hilbert space. KernelCenterer centers (i.e.,
normalize to have zero mean) the data without explicitly computing phi(x).
It is equivalent to centering phi(x) with
sklearn.preprocessing.StandardScaler(with_std=False).
Read more in the :ref:`User Guide <kernel_centering>`.
"""
def fit(self, K, y=None):
"""Fit KernelCenterer
Parameters
----------
K : numpy array of shape [n_samples, n_samples]
Kernel matrix.
Returns
-------
self : returns an instance of self.
"""
K = check_array(K, dtype=FLOAT_DTYPES)
n_samples = K.shape[0]
self.K_fit_rows_ = np.sum(K, axis=0) / n_samples
self.K_fit_all_ = self.K_fit_rows_.sum() / n_samples
return self
def transform(self, K, y=None, copy=True):
"""Center kernel matrix.
Parameters
----------
K : numpy array of shape [n_samples1, n_samples2]
Kernel matrix.
copy : boolean, optional, default True
Set to False to perform inplace computation.
Returns
-------
K_new : numpy array of shape [n_samples1, n_samples2]
"""
check_is_fitted(self, 'K_fit_all_')
K = check_array(K, copy=copy, dtype=FLOAT_DTYPES)
K_pred_cols = (np.sum(K, axis=1) /
self.K_fit_rows_.shape[0])[:, np.newaxis]
K -= self.K_fit_rows_
K -= K_pred_cols
K += self.K_fit_all_
return K
@property
def _pairwise(self):
return True
def add_dummy_feature(X, value=1.0):
"""Augment dataset with an additional dummy feature.
This is useful for fitting an intercept term with implementations which
cannot otherwise fit it directly.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
Data.
value : float
Value to use for the dummy feature.
Returns
-------
X : {array, sparse matrix}, shape [n_samples, n_features + 1]
Same data with dummy feature added as first column.
Examples
--------
>>> from sklearn.preprocessing import add_dummy_feature
>>> add_dummy_feature([[0, 1], [1, 0]])
array([[ 1., 0., 1.],
[ 1., 1., 0.]])
"""
X = check_array(X, accept_sparse=['csc', 'csr', 'coo'], dtype=FLOAT_DTYPES)
n_samples, n_features = X.shape
shape = (n_samples, n_features + 1)
if sparse.issparse(X):
if sparse.isspmatrix_coo(X):
# Shift columns to the right.
col = X.col + 1
# Column indices of dummy feature are 0 everywhere.
col = np.concatenate((np.zeros(n_samples), col))
# Row indices of dummy feature are 0, ..., n_samples-1.
row = np.concatenate((np.arange(n_samples), X.row))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.coo_matrix((data, (row, col)), shape)
elif sparse.isspmatrix_csc(X):
# Shift index pointers since we need to add n_samples elements.
indptr = X.indptr + n_samples
# indptr[0] must be 0.
indptr = np.concatenate((np.array([0]), indptr))
# Row indices of dummy feature are 0, ..., n_samples-1.
indices = np.concatenate((np.arange(n_samples), X.indices))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.csc_matrix((data, indices, indptr), shape)
else:
klass = X.__class__
return klass(add_dummy_feature(X.tocoo(), value))
else:
return np.hstack((np.ones((n_samples, 1)) * value, X))
def _transform_selected(X, transform, selected="all", copy=True):
"""Apply a transform function to portion of selected features
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
Dense array or sparse matrix.
transform : callable
A callable transform(X) -> X_transformed
copy : boolean, optional
Copy X even if it could be avoided.
selected: "all" or array of indices or mask
Specify which features to apply the transform to.
Returns
-------
X : array or sparse matrix, shape=(n_samples, n_features_new)
"""
X = check_array(X, accept_sparse='csc', copy=copy, dtype=FLOAT_DTYPES)
if isinstance(selected, six.string_types) and selected == "all":
return transform(X)
if len(selected) == 0:
return X
n_features = X.shape[1]
ind = np.arange(n_features)
sel = np.zeros(n_features, dtype=bool)
sel[np.asarray(selected)] = True
not_sel = np.logical_not(sel)
n_selected = np.sum(sel)
if n_selected == 0:
# No features selected.
return X
elif n_selected == n_features:
# All features selected.
return transform(X)
else:
X_sel = transform(X[:, ind[sel]])
X_not_sel = X[:, ind[not_sel]]
if sparse.issparse(X_sel) or sparse.issparse(X_not_sel):
return sparse.hstack((X_sel, X_not_sel))
else:
return np.hstack((X_sel, X_not_sel))
class OneHotEncoder(BaseEstimator, TransformerMixin):
"""Encode categorical integer features using a one-hot aka one-of-K scheme.
The input to this transformer should be a matrix of integers, denoting
the values taken on by categorical (discrete) features. The output will be
a sparse matrix where each column corresponds to one possible value of one
feature. It is assumed that input features take on values in the range
[0, n_values).
This encoding is needed for feeding categorical data to many scikit-learn
estimators, notably linear models and SVMs with the standard kernels.
Note: a one-hot encoding of y labels should use a LabelBinarizer
instead.
Read more in the :ref:`User Guide <preprocessing_categorical_features>`.
Parameters
----------
n_values : 'auto', int or array of ints
Number of values per feature.
- 'auto' : determine value range from training data.
- int : number of categorical values per feature.
Each feature value should be in ``range(n_values)``
- array : ``n_values[i]`` is the number of categorical values in
``X[:, i]``. Each feature value should be
in ``range(n_values[i])``
categorical_features : "all" or array of indices or mask
Specify what features are treated as categorical.
- 'all' (default): All features are treated as categorical.
- array of indices: Array of categorical feature indices.
- mask: Array of length n_features and with dtype=bool.
Non-categorical features are always stacked to the right of the matrix.
dtype : number type, default=np.float
Desired dtype of output.
sparse : boolean, default=True
Will return sparse matrix if set True else will return an array.
handle_unknown : str, 'error' or 'ignore'
Whether to raise an error or ignore if a unknown categorical feature is
present during transform.
Attributes
----------
active_features_ : array
Indices for active features, meaning values that actually occur
in the training set. Only available when n_values is ``'auto'``.
feature_indices_ : array of shape (n_features,)
Indices to feature ranges.
Feature ``i`` in the original data is mapped to features
from ``feature_indices_[i]`` to ``feature_indices_[i+1]``
(and then potentially masked by `active_features_` afterwards)
n_values_ : array of shape (n_features,)
Maximum number of values per feature.
Examples
--------
Given a dataset with three features and two samples, we let the encoder
find the maximum value per feature and transform the data to a binary
one-hot encoding.
>>> from sklearn.preprocessing import OneHotEncoder
>>> enc = OneHotEncoder()
>>> enc.fit([[0, 0, 3], [1, 1, 0], [0, 2, 1], \
[1, 0, 2]]) # doctest: +ELLIPSIS
OneHotEncoder(categorical_features='all', dtype=<... 'numpy.float64'>,
handle_unknown='error', n_values='auto', sparse=True)
>>> enc.n_values_
array([2, 3, 4])
>>> enc.feature_indices_
array([0, 2, 5, 9])
>>> enc.transform([[0, 1, 1]]).toarray()
array([[ 1., 0., 0., 1., 0., 0., 1., 0., 0.]])
See also
--------
sklearn.feature_extraction.DictVectorizer : performs a one-hot encoding of
dictionary items (also handles string-valued features).
sklearn.feature_extraction.FeatureHasher : performs an approximate one-hot
encoding of dictionary items or strings.
sklearn.preprocessing.LabelBinarizer : binarizes labels in a one-vs-all
fashion.
sklearn.preprocessing.MultiLabelBinarizer : transforms between iterable of
iterables and a multilabel format, e.g. a (samples x classes) binary
matrix indicating the presence of a class label.
sklearn.preprocessing.LabelEncoder : encodes labels with values between 0
and n_classes-1.
"""
def __init__(self, n_values="auto", categorical_features="all",
dtype=np.float64, sparse=True, handle_unknown='error'):
self.n_values = n_values
self.categorical_features = categorical_features
self.dtype = dtype
self.sparse = sparse
self.handle_unknown = handle_unknown
def fit(self, X, y=None):
"""Fit OneHotEncoder to X.
Parameters
----------
X : array-like, shape [n_samples, n_feature]
Input array of type int.
Returns
-------
self
"""
self.fit_transform(X)
return self
def _fit_transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
if (isinstance(self.n_values, six.string_types) and
self.n_values == 'auto'):
n_values = np.max(X, axis=0) + 1
elif isinstance(self.n_values, numbers.Integral):
if (np.max(X, axis=0) >= self.n_values).any():
raise ValueError("Feature out of bounds for n_values=%d"
% self.n_values)
n_values = np.empty(n_features, dtype=np.int)
n_values.fill(self.n_values)
else:
try:
n_values = np.asarray(self.n_values, dtype=int)
except (ValueError, TypeError):
raise TypeError("Wrong type for parameter `n_values`. Expected"
" 'auto', int or array of ints, got %r"
% type(X))
if n_values.ndim < 1 or n_values.shape[0] != X.shape[1]:
raise ValueError("Shape mismatch: if n_values is an array,"
" it has to be of shape (n_features,).")
self.n_values_ = n_values
n_values = np.hstack([[0], n_values])
indices = np.cumsum(n_values)
self.feature_indices_ = indices
column_indices = (X + indices[:-1]).ravel()
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)
data = np.ones(n_samples * n_features)
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if (isinstance(self.n_values, six.string_types) and
self.n_values == 'auto'):
mask = np.array(out.sum(axis=0)).ravel() != 0
active_features = np.where(mask)[0]
out = out[:, active_features]
self.active_features_ = active_features
return out if self.sparse else out.toarray()
def fit_transform(self, X, y=None):
"""Fit OneHotEncoder to X, then transform X.
Equivalent to self.fit(X).transform(X), but more convenient and more
efficient. See fit for the parameters, transform for the return value.
"""
return _transform_selected(X, self._fit_transform,
self.categorical_features, copy=True)
def _transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
indices = self.feature_indices_
if n_features != indices.shape[0] - 1:
raise ValueError("X has different shape than during fitting."
" Expected %d, got %d."
% (indices.shape[0] - 1, n_features))
# We use only those categorical features of X that are known using fit.
# i.e lesser than n_values_ using mask.
# This means, if self.handle_unknown is "ignore", the row_indices and
# col_indices corresponding to the unknown categorical feature are
# ignored.
mask = (X < self.n_values_).ravel()
if np.any(~mask):
if self.handle_unknown not in ['error', 'ignore']:
raise ValueError("handle_unknown should be either error or "
"unknown got %s" % self.handle_unknown)
if self.handle_unknown == 'error':
raise ValueError("unknown categorical feature present %s "
"during transform." % X.ravel()[~mask])
column_indices = (X + indices[:-1]).ravel()[mask]
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)[mask]
data = np.ones(np.sum(mask))
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if (isinstance(self.n_values, six.string_types) and
self.n_values == 'auto'):
out = out[:, self.active_features_]
return out if self.sparse else out.toarray()
def transform(self, X):
"""Transform X using one-hot encoding.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Input array of type int.
Returns
-------
X_out : sparse matrix if sparse=True else a 2-d array, dtype=int
Transformed input.
"""
return _transform_selected(X, self._transform,
self.categorical_features, copy=True)
| bsd-3-clause |
damonkohler/sl4a | python-build/python-libs/gdata/src/atom/core.py | 137 | 20292 | #!/usr/bin/env python
#
# Copyright (C) 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
__author__ = 'j.s@google.com (Jeff Scudder)'
import inspect
try:
from xml.etree import cElementTree as ElementTree
except ImportError:
try:
import cElementTree as ElementTree
except ImportError:
try:
from xml.etree import ElementTree
except ImportError:
from elementtree import ElementTree
STRING_ENCODING = 'utf-8'
class XmlElement(object):
"""Represents an element node in an XML document.
The text member is a UTF-8 encoded str or unicode.
"""
_qname = None
_other_elements = None
_other_attributes = None
# The rule set contains mappings for XML qnames to child members and the
# appropriate member classes.
_rule_set = None
_members = None
text = None
def __init__(self, text=None, *args, **kwargs):
if ('_members' not in self.__class__.__dict__
or self.__class__._members is None):
self.__class__._members = tuple(self.__class__._list_xml_members())
for member_name, member_type in self.__class__._members:
if member_name in kwargs:
setattr(self, member_name, kwargs[member_name])
else:
if isinstance(member_type, list):
setattr(self, member_name, [])
else:
setattr(self, member_name, None)
self._other_elements = []
self._other_attributes = {}
if text is not None:
self.text = text
def _list_xml_members(cls):
"""Generator listing all members which are XML elements or attributes.
The following members would be considered XML members:
foo = 'abc' - indicates an XML attribute with the qname abc
foo = SomeElement - indicates an XML child element
foo = [AnElement] - indicates a repeating XML child element, each instance
will be stored in a list in this member
foo = ('att1', '{http://example.com/namespace}att2') - indicates an XML
attribute which has different parsing rules in different versions of
the protocol. Version 1 of the XML parsing rules will look for an
attribute with the qname 'att1' but verion 2 of the parsing rules will
look for a namespaced attribute with the local name of 'att2' and an
XML namespace of 'http://example.com/namespace'.
"""
members = []
for pair in inspect.getmembers(cls):
if not pair[0].startswith('_') and pair[0] != 'text':
member_type = pair[1]
if (isinstance(member_type, tuple) or isinstance(member_type, list)
or isinstance(member_type, (str, unicode))
or (inspect.isclass(member_type)
and issubclass(member_type, XmlElement))):
members.append(pair)
return members
_list_xml_members = classmethod(_list_xml_members)
def _get_rules(cls, version):
"""Initializes the _rule_set for the class which is used when parsing XML.
This method is used internally for parsing and generating XML for an
XmlElement. It is not recommended that you call this method directly.
Returns:
A tuple containing the XML parsing rules for the appropriate version.
The tuple looks like:
(qname, {sub_element_qname: (member_name, member_class, repeating), ..},
{attribute_qname: member_name})
To give a couple of concrete example, the atom.data.Control _get_rules
with version of 2 will return:
('{http://www.w3.org/2007/app}control',
{'{http://www.w3.org/2007/app}draft': ('draft',
<class 'atom.data.Draft'>,
False)},
{})
Calling _get_rules with version 1 on gdata.data.FeedLink will produce:
('{http://schemas.google.com/g/2005}feedLink',
{'{http://www.w3.org/2005/Atom}feed': ('feed',
<class 'gdata.data.GDFeed'>,
False)},
{'href': 'href', 'readOnly': 'read_only', 'countHint': 'count_hint',
'rel': 'rel'})
"""
# Initialize the _rule_set to make sure there is a slot available to store
# the parsing rules for this version of the XML schema.
# Look for rule set in the class __dict__ proxy so that only the
# _rule_set for this class will be found. By using the dict proxy
# we avoid finding rule_sets defined in superclasses.
# The four lines below provide support for any number of versions, but it
# runs a bit slower then hard coding slots for two versions, so I'm using
# the below two lines.
#if '_rule_set' not in cls.__dict__ or cls._rule_set is None:
# cls._rule_set = []
#while len(cls.__dict__['_rule_set']) < version:
# cls._rule_set.append(None)
# If there is no rule set cache in the class, provide slots for two XML
# versions. If and when there is a version 3, this list will need to be
# expanded.
if '_rule_set' not in cls.__dict__ or cls._rule_set is None:
cls._rule_set = [None, None]
# If a version higher than 2 is requested, fall back to version 2 because
# 2 is currently the highest supported version.
if version > 2:
return cls._get_rules(2)
# Check the dict proxy for the rule set to avoid finding any rule sets
# which belong to the superclass. We only want rule sets for this class.
if cls._rule_set[version-1] is None:
# The rule set for each version consists of the qname for this element
# ('{namespace}tag'), a dictionary (elements) for looking up the
# corresponding class member when given a child element's qname, and a
# dictionary (attributes) for looking up the corresponding class member
# when given an XML attribute's qname.
elements = {}
attributes = {}
if ('_members' not in cls.__dict__ or cls._members is None):
cls._members = tuple(cls._list_xml_members())
for member_name, target in cls._members:
if isinstance(target, list):
# This member points to a repeating element.
elements[_get_qname(target[0], version)] = (member_name, target[0],
True)
elif isinstance(target, tuple):
# This member points to a versioned XML attribute.
if version <= len(target):
attributes[target[version-1]] = member_name
else:
attributes[target[-1]] = member_name
elif isinstance(target, (str, unicode)):
# This member points to an XML attribute.
attributes[target] = member_name
elif issubclass(target, XmlElement):
# This member points to a single occurance element.
elements[_get_qname(target, version)] = (member_name, target, False)
version_rules = (_get_qname(cls, version), elements, attributes)
cls._rule_set[version-1] = version_rules
return version_rules
else:
return cls._rule_set[version-1]
_get_rules = classmethod(_get_rules)
def get_elements(self, tag=None, namespace=None, version=1):
"""Find all sub elements which match the tag and namespace.
To find all elements in this object, call get_elements with the tag and
namespace both set to None (the default). This method searches through
the object's members and the elements stored in _other_elements which
did not match any of the XML parsing rules for this class.
Args:
tag: str
namespace: str
version: int Specifies the version of the XML rules to be used when
searching for matching elements.
Returns:
A list of the matching XmlElements.
"""
matches = []
ignored1, elements, ignored2 = self.__class__._get_rules(version)
if elements:
for qname, element_def in elements.iteritems():
member = getattr(self, element_def[0])
if member:
if _qname_matches(tag, namespace, qname):
if element_def[2]:
# If this is a repeating element, copy all instances into the
# result list.
matches.extend(member)
else:
matches.append(member)
for element in self._other_elements:
if _qname_matches(tag, namespace, element._qname):
matches.append(element)
return matches
GetElements = get_elements
# FindExtensions and FindChildren are provided for backwards compatibility
# to the atom.AtomBase class.
# However, FindExtensions may return more results than the v1 atom.AtomBase
# method does, because get_elements searches both the expected children
# and the unexpected "other elements". The old AtomBase.FindExtensions
# method searched only "other elements" AKA extension_elements.
FindExtensions = get_elements
FindChildren = get_elements
def get_attributes(self, tag=None, namespace=None, version=1):
"""Find all attributes which match the tag and namespace.
To find all attributes in this object, call get_attributes with the tag
and namespace both set to None (the default). This method searches
through the object's members and the attributes stored in
_other_attributes which did not fit any of the XML parsing rules for this
class.
Args:
tag: str
namespace: str
version: int Specifies the version of the XML rules to be used when
searching for matching attributes.
Returns:
A list of XmlAttribute objects for the matching attributes.
"""
matches = []
ignored1, ignored2, attributes = self.__class__._get_rules(version)
if attributes:
for qname, attribute_def in attributes.iteritems():
member = getattr(self, attribute_def[0])
if member:
if _qname_matches(tag, namespace, qname):
matches.append(XmlAttribute(qname, member))
for qname, value in self._other_attributes.iteritems():
if _qname_matches(tag, namespace, qname):
matches.append(XmlAttribute(qname, value))
return matches
GetAttributes = get_attributes
def _harvest_tree(self, tree, version=1):
"""Populates object members from the data in the tree Element."""
qname, elements, attributes = self.__class__._get_rules(version)
for element in tree:
if elements and element.tag in elements:
definition = elements[element.tag]
# If this is a repeating element, make sure the member is set to a
# list.
if definition[2]:
if getattr(self, definition[0]) is None:
setattr(self, definition[0], [])
getattr(self, definition[0]).append(_xml_element_from_tree(element,
definition[1], version))
else:
setattr(self, definition[0], _xml_element_from_tree(element,
definition[1], version))
else:
self._other_elements.append(_xml_element_from_tree(element, XmlElement,
version))
for attrib, value in tree.attrib.iteritems():
if attributes and attrib in attributes:
setattr(self, attributes[attrib], value)
else:
self._other_attributes[attrib] = value
if tree.text:
self.text = tree.text
def _to_tree(self, version=1, encoding=None):
new_tree = ElementTree.Element(_get_qname(self, version))
self._attach_members(new_tree, version, encoding)
return new_tree
def _attach_members(self, tree, version=1, encoding=None):
"""Convert members to XML elements/attributes and add them to the tree.
Args:
tree: An ElementTree.Element which will be modified. The members of
this object will be added as child elements or attributes
according to the rules described in _expected_elements and
_expected_attributes. The elements and attributes stored in
other_attributes and other_elements are also added a children
of this tree.
version: int Ingnored in this method but used by VersionedElement.
encoding: str (optional)
"""
qname, elements, attributes = self.__class__._get_rules(version)
encoding = encoding or STRING_ENCODING
# Add the expected elements and attributes to the tree.
if elements:
for tag, element_def in elements.iteritems():
member = getattr(self, element_def[0])
# If this is a repeating element and there are members in the list.
if member and element_def[2]:
for instance in member:
instance._become_child(tree, version)
elif member:
member._become_child(tree, version)
if attributes:
for attribute_tag, member_name in attributes.iteritems():
value = getattr(self, member_name)
if value:
tree.attrib[attribute_tag] = value
# Add the unexpected (other) elements and attributes to the tree.
for element in self._other_elements:
element._become_child(tree, version)
for key, value in self._other_attributes.iteritems():
# I'm not sure if unicode can be used in the attribute name, so for now
# we assume the encoding is correct for the attribute name.
if not isinstance(value, unicode):
value = value.decode(encoding)
tree.attrib[key] = value
if self.text:
if isinstance(self.text, unicode):
tree.text = self.text
else:
tree.text = self.text.decode(encoding)
def to_string(self, version=1, encoding=None):
"""Converts this object to XML."""
return ElementTree.tostring(self._to_tree(version, encoding))
ToString = to_string
def __str__(self):
return self.to_string()
def _become_child(self, tree, version=1):
"""Adds a child element to tree with the XML data in self."""
new_child = ElementTree.Element('')
tree.append(new_child)
new_child.tag = _get_qname(self, version)
self._attach_members(new_child, version)
def __get_extension_elements(self):
return self._other_elements
def __set_extension_elements(self, elements):
self._other_elements = elements
extension_elements = property(__get_extension_elements,
__set_extension_elements,
"""Provides backwards compatibility for v1 atom.AtomBase classes.""")
def __get_extension_attributes(self):
return self._other_attributes
def __set_extension_attributes(self, attributes):
self._other_attributes = attributes
extension_attributes = property(__get_extension_attributes,
__set_extension_attributes,
"""Provides backwards compatibility for v1 atom.AtomBase classes.""")
def _get_tag(self, version=1):
qname = _get_qname(self, version)
return qname[qname.find('}')+1:]
def _get_namespace(self, version=1):
qname = _get_qname(self, version)
if qname.startswith('{'):
return qname[1:qname.find('}')]
else:
return None
def _set_tag(self, tag):
if isinstance(self._qname, tuple):
self._qname = self._qname.copy()
if self._qname[0].startswith('{'):
self._qname[0] = '{%s}%s' % (self._get_namespace(1), tag)
else:
self._qname[0] = tag
else:
if self._qname.startswith('{'):
self._qname = '{%s}%s' % (self._get_namespace(), tag)
else:
self._qname = tag
def _set_namespace(self, namespace):
if isinstance(self._qname, tuple):
self._qname = self._qname.copy()
if namespace:
self._qname[0] = '{%s}%s' % (namespace, self._get_tag(1))
else:
self._qname[0] = self._get_tag(1)
else:
if namespace:
self._qname = '{%s}%s' % (namespace, self._get_tag(1))
else:
self._qname = self._get_tag(1)
tag = property(_get_tag, _set_tag,
"""Provides backwards compatibility for v1 atom.AtomBase classes.""")
namespace = property(_get_namespace, _set_namespace,
"""Provides backwards compatibility for v1 atom.AtomBase classes.""")
# Provided for backwards compatibility to atom.ExtensionElement
children = extension_elements
attributes = extension_attributes
def _get_qname(element, version):
if isinstance(element._qname, tuple):
if version <= len(element._qname):
return element._qname[version-1]
else:
return element._qname[-1]
else:
return element._qname
def _qname_matches(tag, namespace, qname):
"""Logic determines if a QName matches the desired local tag and namespace.
This is used in XmlElement.get_elements and XmlElement.get_attributes to
find matches in the element's members (among all expected-and-unexpected
elements-and-attributes).
Args:
expected_tag: string
expected_namespace: string
qname: string in the form '{xml_namespace}localtag' or 'tag' if there is
no namespace.
Returns:
boolean True if the member's tag and namespace fit the expected tag and
namespace.
"""
# If there is no expected namespace or tag, then everything will match.
if qname is None:
member_tag = None
member_namespace = None
else:
if qname.startswith('{'):
member_namespace = qname[1:qname.index('}')]
member_tag = qname[qname.index('}') + 1:]
else:
member_namespace = None
member_tag = qname
return ((tag is None and namespace is None)
# If there is a tag, but no namespace, see if the local tag matches.
or (namespace is None and member_tag == tag)
# There was no tag, but there was a namespace so see if the namespaces
# match.
or (tag is None and member_namespace == namespace)
# There was no tag, and the desired elements have no namespace, so check
# to see that the member's namespace is None.
or (tag is None and namespace == ''
and member_namespace is None)
# The tag and the namespace both match.
or (tag == member_tag
and namespace == member_namespace)
# The tag matches, and the expected namespace is the empty namespace,
# check to make sure the member's namespace is None.
or (tag == member_tag and namespace == ''
and member_namespace is None))
def parse(xml_string, target_class=None, version=1, encoding=None):
"""Parses the XML string according to the rules for the target_class.
Args:
xml_string: str or unicode
target_class: XmlElement or a subclass. If None is specified, the
XmlElement class is used.
version: int (optional) The version of the schema which should be used when
converting the XML into an object. The default is 1.
encoding: str (optional) The character encoding of the bytes in the
xml_string. Default is 'UTF-8'.
"""
if target_class is None:
target_class = XmlElement
if isinstance(xml_string, unicode):
if encoding is None:
xml_string = xml_string.encode(STRING_ENCODING)
else:
xml_string = xml_string.encode(encoding)
tree = ElementTree.fromstring(xml_string)
return _xml_element_from_tree(tree, target_class, version)
Parse = parse
xml_element_from_string = parse
XmlElementFromString = xml_element_from_string
def _xml_element_from_tree(tree, target_class, version=1):
if target_class._qname is None:
instance = target_class()
instance._qname = tree.tag
instance._harvest_tree(tree, version)
return instance
# TODO handle the namespace-only case
# Namespace only will be used with Google Spreadsheets rows and
# Google Base item attributes.
elif tree.tag == _get_qname(target_class, version):
instance = target_class()
instance._harvest_tree(tree, version)
return instance
return None
class XmlAttribute(object):
def __init__(self, qname, value):
self._qname = qname
self.value = value
| apache-2.0 |
jeffery-do/Vizdoombot | doom/lib/python3.5/site-packages/scipy/optimize/tests/test_minpack.py | 10 | 21708 | """
Unit tests for optimization routines from minpack.py.
"""
from __future__ import division, print_function, absolute_import
import warnings
from numpy.testing import (assert_, assert_almost_equal, assert_array_equal,
assert_array_almost_equal, TestCase, run_module_suite, assert_raises,
assert_allclose)
import numpy as np
from numpy import array, float64, matrix
from scipy import optimize
from scipy.special import lambertw
from scipy.optimize.minpack import leastsq, curve_fit, fixed_point
from scipy._lib._numpy_compat import _assert_warns
from scipy.optimize import OptimizeWarning
class ReturnShape(object):
"""This class exists to create a callable that does not have a '__name__' attribute.
__init__ takes the argument 'shape', which should be a tuple of ints. When an instance
it called with a single argument 'x', it returns numpy.ones(shape).
"""
def __init__(self, shape):
self.shape = shape
def __call__(self, x):
return np.ones(self.shape)
def dummy_func(x, shape):
"""A function that returns an array of ones of the given shape.
`x` is ignored.
"""
return np.ones(shape)
# Function and jacobian for tests of solvers for systems of nonlinear
# equations
def pressure_network(flow_rates, Qtot, k):
"""Evaluate non-linear equation system representing
the pressures and flows in a system of n parallel pipes::
f_i = P_i - P_0, for i = 1..n
f_0 = sum(Q_i) - Qtot
Where Q_i is the flow rate in pipe i and P_i the pressure in that pipe.
Pressure is modeled as a P=kQ**2 where k is a valve coefficient and
Q is the flow rate.
Parameters
----------
flow_rates : float
A 1D array of n flow rates [kg/s].
k : float
A 1D array of n valve coefficients [1/kg m].
Qtot : float
A scalar, the total input flow rate [kg/s].
Returns
-------
F : float
A 1D array, F[i] == f_i.
"""
P = k * flow_rates**2
F = np.hstack((P[1:] - P[0], flow_rates.sum() - Qtot))
return F
def pressure_network_jacobian(flow_rates, Qtot, k):
"""Return the jacobian of the equation system F(flow_rates)
computed by `pressure_network` with respect to
*flow_rates*. See `pressure_network` for the detailed
description of parrameters.
Returns
-------
jac : float
*n* by *n* matrix ``df_i/dQ_i`` where ``n = len(flow_rates)``
and *f_i* and *Q_i* are described in the doc for `pressure_network`
"""
n = len(flow_rates)
pdiff = np.diag(flow_rates[1:] * 2 * k[1:] - 2 * flow_rates[0] * k[0])
jac = np.empty((n, n))
jac[:n-1, :n-1] = pdiff * 0
jac[:n-1, n-1] = 0
jac[n-1, :] = np.ones(n)
return jac
def pressure_network_fun_and_grad(flow_rates, Qtot, k):
return (pressure_network(flow_rates, Qtot, k),
pressure_network_jacobian(flow_rates, Qtot, k))
class TestFSolve(TestCase):
def test_pressure_network_no_gradient(self):
# fsolve without gradient, equal pipes -> equal flows.
k = np.ones(4) * 0.5
Qtot = 4
initial_guess = array([2., 0., 2., 0.])
final_flows, info, ier, mesg = optimize.fsolve(
pressure_network, initial_guess, args=(Qtot, k),
full_output=True)
assert_array_almost_equal(final_flows, np.ones(4))
assert_(ier == 1, mesg)
def test_pressure_network_with_gradient(self):
# fsolve with gradient, equal pipes -> equal flows
k = np.ones(4) * 0.5
Qtot = 4
initial_guess = array([2., 0., 2., 0.])
final_flows = optimize.fsolve(
pressure_network, initial_guess, args=(Qtot, k),
fprime=pressure_network_jacobian)
assert_array_almost_equal(final_flows, np.ones(4))
def test_wrong_shape_func_callable(self):
func = ReturnShape(1)
# x0 is a list of two elements, but func will return an array with
# length 1, so this should result in a TypeError.
x0 = [1.5, 2.0]
assert_raises(TypeError, optimize.fsolve, func, x0)
def test_wrong_shape_func_function(self):
# x0 is a list of two elements, but func will return an array with
# length 1, so this should result in a TypeError.
x0 = [1.5, 2.0]
assert_raises(TypeError, optimize.fsolve, dummy_func, x0, args=((1,),))
def test_wrong_shape_fprime_callable(self):
func = ReturnShape(1)
deriv_func = ReturnShape((2,2))
assert_raises(TypeError, optimize.fsolve, func, x0=[0,1], fprime=deriv_func)
def test_wrong_shape_fprime_function(self):
func = lambda x: dummy_func(x, (2,))
deriv_func = lambda x: dummy_func(x, (3,3))
assert_raises(TypeError, optimize.fsolve, func, x0=[0,1], fprime=deriv_func)
def test_float32(self):
func = lambda x: np.array([x[0] - 100, x[1] - 1000], dtype=np.float32)**2
p = optimize.fsolve(func, np.array([1, 1], np.float32))
assert_allclose(func(p), [0, 0], atol=1e-3)
class TestRootHybr(TestCase):
def test_pressure_network_no_gradient(self):
# root/hybr without gradient, equal pipes -> equal flows
k = np.ones(4) * 0.5
Qtot = 4
initial_guess = array([2., 0., 2., 0.])
final_flows = optimize.root(pressure_network, initial_guess,
method='hybr', args=(Qtot, k)).x
assert_array_almost_equal(final_flows, np.ones(4))
def test_pressure_network_with_gradient(self):
# root/hybr with gradient, equal pipes -> equal flows
k = np.ones(4) * 0.5
Qtot = 4
initial_guess = matrix([2., 0., 2., 0.])
final_flows = optimize.root(pressure_network, initial_guess,
args=(Qtot, k), method='hybr',
jac=pressure_network_jacobian).x
assert_array_almost_equal(final_flows, np.ones(4))
def test_pressure_network_with_gradient_combined(self):
# root/hybr with gradient and function combined, equal pipes -> equal
# flows
k = np.ones(4) * 0.5
Qtot = 4
initial_guess = array([2., 0., 2., 0.])
final_flows = optimize.root(pressure_network_fun_and_grad,
initial_guess, args=(Qtot, k),
method='hybr', jac=True).x
assert_array_almost_equal(final_flows, np.ones(4))
class TestRootLM(TestCase):
def test_pressure_network_no_gradient(self):
# root/lm without gradient, equal pipes -> equal flows
k = np.ones(4) * 0.5
Qtot = 4
initial_guess = array([2., 0., 2., 0.])
final_flows = optimize.root(pressure_network, initial_guess,
method='lm', args=(Qtot, k)).x
assert_array_almost_equal(final_flows, np.ones(4))
class TestLeastSq(TestCase):
def setUp(self):
x = np.linspace(0, 10, 40)
a,b,c = 3.1, 42, -304.2
self.x = x
self.abc = a,b,c
y_true = a*x**2 + b*x + c
np.random.seed(0)
self.y_meas = y_true + 0.01*np.random.standard_normal(y_true.shape)
def residuals(self, p, y, x):
a,b,c = p
err = y-(a*x**2 + b*x + c)
return err
def test_basic(self):
p0 = array([0,0,0])
params_fit, ier = leastsq(self.residuals, p0,
args=(self.y_meas, self.x))
assert_(ier in (1,2,3,4), 'solution not found (ier=%d)' % ier)
# low precision due to random
assert_array_almost_equal(params_fit, self.abc, decimal=2)
def test_full_output(self):
p0 = matrix([0,0,0])
full_output = leastsq(self.residuals, p0,
args=(self.y_meas, self.x),
full_output=True)
params_fit, cov_x, infodict, mesg, ier = full_output
assert_(ier in (1,2,3,4), 'solution not found: %s' % mesg)
def test_input_untouched(self):
p0 = array([0,0,0],dtype=float64)
p0_copy = array(p0, copy=True)
full_output = leastsq(self.residuals, p0,
args=(self.y_meas, self.x),
full_output=True)
params_fit, cov_x, infodict, mesg, ier = full_output
assert_(ier in (1,2,3,4), 'solution not found: %s' % mesg)
assert_array_equal(p0, p0_copy)
def test_wrong_shape_func_callable(self):
func = ReturnShape(1)
# x0 is a list of two elements, but func will return an array with
# length 1, so this should result in a TypeError.
x0 = [1.5, 2.0]
assert_raises(TypeError, optimize.leastsq, func, x0)
def test_wrong_shape_func_function(self):
# x0 is a list of two elements, but func will return an array with
# length 1, so this should result in a TypeError.
x0 = [1.5, 2.0]
assert_raises(TypeError, optimize.leastsq, dummy_func, x0, args=((1,),))
def test_wrong_shape_Dfun_callable(self):
func = ReturnShape(1)
deriv_func = ReturnShape((2,2))
assert_raises(TypeError, optimize.leastsq, func, x0=[0,1], Dfun=deriv_func)
def test_wrong_shape_Dfun_function(self):
func = lambda x: dummy_func(x, (2,))
deriv_func = lambda x: dummy_func(x, (3,3))
assert_raises(TypeError, optimize.leastsq, func, x0=[0,1], Dfun=deriv_func)
def test_float32(self):
# Regression test for gh-1447
def func(p,x,y):
q = p[0]*np.exp(-(x-p[1])**2/(2.0*p[2]**2))+p[3]
return q - y
x = np.array([1.475,1.429,1.409,1.419,1.455,1.519,1.472, 1.368,1.286,
1.231], dtype=np.float32)
y = np.array([0.0168,0.0193,0.0211,0.0202,0.0171,0.0151,0.0185,0.0258,
0.034,0.0396], dtype=np.float32)
p0 = np.array([1.0,1.0,1.0,1.0])
p1, success = optimize.leastsq(func, p0, args=(x,y))
assert_(success in [1,2,3,4])
assert_((func(p1,x,y)**2).sum() < 1e-4 * (func(p0,x,y)**2).sum())
class TestCurveFit(TestCase):
def setUp(self):
self.y = array([1.0, 3.2, 9.5, 13.7])
self.x = array([1.0, 2.0, 3.0, 4.0])
def test_one_argument(self):
def func(x,a):
return x**a
popt, pcov = curve_fit(func, self.x, self.y)
assert_(len(popt) == 1)
assert_(pcov.shape == (1,1))
assert_almost_equal(popt[0], 1.9149, decimal=4)
assert_almost_equal(pcov[0,0], 0.0016, decimal=4)
# Test if we get the same with full_output. Regression test for #1415.
res = curve_fit(func, self.x, self.y, full_output=1)
(popt2, pcov2, infodict, errmsg, ier) = res
assert_array_almost_equal(popt, popt2)
def test_two_argument(self):
def func(x, a, b):
return b*x**a
popt, pcov = curve_fit(func, self.x, self.y)
assert_(len(popt) == 2)
assert_(pcov.shape == (2,2))
assert_array_almost_equal(popt, [1.7989, 1.1642], decimal=4)
assert_array_almost_equal(pcov, [[0.0852, -0.1260], [-0.1260, 0.1912]],
decimal=4)
def test_func_is_classmethod(self):
class test_self(object):
"""This class tests if curve_fit passes the correct number of
arguments when the model function is a class instance method.
"""
def func(self, x, a, b):
return b * x**a
test_self_inst = test_self()
popt, pcov = curve_fit(test_self_inst.func, self.x, self.y)
assert_(pcov.shape == (2,2))
assert_array_almost_equal(popt, [1.7989, 1.1642], decimal=4)
assert_array_almost_equal(pcov, [[0.0852, -0.1260], [-0.1260, 0.1912]],
decimal=4)
def test_regression_2639(self):
# This test fails if epsfcn in leastsq is too large.
x = [574.14200000000005, 574.154, 574.16499999999996,
574.17700000000002, 574.18799999999999, 574.19899999999996,
574.21100000000001, 574.22199999999998, 574.23400000000004,
574.245]
y = [859.0, 997.0, 1699.0, 2604.0, 2013.0, 1964.0, 2435.0,
1550.0, 949.0, 841.0]
guess = [574.1861428571428, 574.2155714285715, 1302.0, 1302.0,
0.0035019999999983615, 859.0]
good = [5.74177150e+02, 5.74209188e+02, 1.74187044e+03, 1.58646166e+03,
1.0068462e-02, 8.57450661e+02]
def f_double_gauss(x, x0, x1, A0, A1, sigma, c):
return (A0*np.exp(-(x-x0)**2/(2.*sigma**2))
+ A1*np.exp(-(x-x1)**2/(2.*sigma**2)) + c)
popt, pcov = curve_fit(f_double_gauss, x, y, guess, maxfev=10000)
assert_allclose(popt, good, rtol=1e-5)
def test_pcov(self):
xdata = np.array([0, 1, 2, 3, 4, 5])
ydata = np.array([1, 1, 5, 7, 8, 12])
sigma = np.array([1, 2, 1, 2, 1, 2])
def f(x, a, b):
return a*x + b
for method in ['lm', 'trf', 'dogbox']:
popt, pcov = curve_fit(f, xdata, ydata, p0=[2, 0], sigma=sigma,
method=method)
perr_scaled = np.sqrt(np.diag(pcov))
assert_allclose(perr_scaled, [0.20659803, 0.57204404], rtol=1e-3)
popt, pcov = curve_fit(f, xdata, ydata, p0=[2, 0], sigma=3*sigma,
method=method)
perr_scaled = np.sqrt(np.diag(pcov))
assert_allclose(perr_scaled, [0.20659803, 0.57204404], rtol=1e-3)
popt, pcov = curve_fit(f, xdata, ydata, p0=[2, 0], sigma=sigma,
absolute_sigma=True, method=method)
perr = np.sqrt(np.diag(pcov))
assert_allclose(perr, [0.30714756, 0.85045308], rtol=1e-3)
popt, pcov = curve_fit(f, xdata, ydata, p0=[2, 0], sigma=3*sigma,
absolute_sigma=True, method=method)
perr = np.sqrt(np.diag(pcov))
assert_allclose(perr, [3*0.30714756, 3*0.85045308], rtol=1e-3)
# infinite variances
def f_flat(x, a, b):
return a*x
with warnings.catch_warnings():
# suppress warnings when testing with inf's
warnings.filterwarnings('ignore', category=OptimizeWarning)
popt, pcov = curve_fit(f_flat, xdata, ydata, p0=[2, 0],
sigma=sigma)
assert_(pcov.shape == (2, 2))
pcov_expected = np.array([np.inf]*4).reshape(2, 2)
assert_array_equal(pcov, pcov_expected)
popt, pcov = curve_fit(f, xdata[:2], ydata[:2], p0=[2, 0])
assert_(pcov.shape == (2, 2))
assert_array_equal(pcov, pcov_expected)
def test_array_like(self):
# Test sequence input. Regression test for gh-3037.
def f_linear(x, a, b):
return a*x + b
x = [1, 2, 3, 4]
y = [3, 5, 7, 9]
assert_allclose(curve_fit(f_linear, x, y)[0], [2, 1], atol=1e-10)
def test_indeterminate_covariance(self):
# Test that a warning is returned when pcov is indeterminate
xdata = np.array([1, 2, 3, 4, 5, 6])
ydata = np.array([1, 2, 3, 4, 5.5, 6])
_assert_warns(OptimizeWarning, curve_fit,
lambda x, a, b: a*x, xdata, ydata)
def test_NaN_handling(self):
# Test for correct handling of NaNs in input data: gh-3422
# create input with NaNs
xdata = np.array([1, np.nan, 3])
ydata = np.array([1, 2, 3])
assert_raises(ValueError, curve_fit,
lambda x, a, b: a*x + b, xdata, ydata)
assert_raises(ValueError, curve_fit,
lambda x, a, b: a*x + b, ydata, xdata)
assert_raises(ValueError, curve_fit, lambda x, a, b: a*x + b,
xdata, ydata, **{"check_finite": True})
def test_method_argument(self):
def f(x, a, b):
return a * np.exp(-b*x)
xdata = np.linspace(0, 1, 11)
ydata = f(xdata, 2., 2.)
for method in ['trf', 'dogbox', 'lm', None]:
popt, pcov = curve_fit(f, xdata, ydata, method=method)
assert_allclose(popt, [2., 2.])
assert_raises(ValueError, curve_fit, f, xdata, ydata, method='unknown')
def test_bounds(self):
def f(x, a, b):
return a * np.exp(-b*x)
xdata = np.linspace(0, 1, 11)
ydata = f(xdata, 2., 2.)
# The minimum w/out bounds is at [2., 2.],
# and with bounds it's at [1.5, smth].
bounds = ([1., 0], [1.5, 3.])
for method in [None, 'trf', 'dogbox']:
popt, pcov = curve_fit(f, xdata, ydata, bounds=bounds,
method=method)
assert_allclose(popt[0], 1.5)
# With bounds, the starting estimate is feasible.
popt, pcov = curve_fit(f, xdata, ydata, method='trf',
bounds=([0., 0], [0.6, np.inf]))
assert_allclose(popt[0], 0.6)
# method='lm' doesn't support bounds.
assert_raises(ValueError, curve_fit, f, xdata, ydata, bounds=bounds,
method='lm')
def test_bounds_p0(self):
# This test is for issue #5719. The problem was that an initial guess
# was ignored when 'trf' or 'dogbox' methods were invoked.
def f(x, a):
return np.sin(x + a)
xdata = np.linspace(-2*np.pi, 2*np.pi, 40)
ydata = np.sin(xdata)
bounds = (-3 * np.pi, 3 * np.pi)
for method in ['trf', 'dogbox']:
popt_1, _ = curve_fit(f, xdata, ydata, p0=2.1*np.pi)
popt_2, _ = curve_fit(f, xdata, ydata, p0=2.1*np.pi,
bounds=bounds, method=method)
# If the initial guess is ignored, then popt_2 would be close 0.
assert_allclose(popt_1, popt_2)
def test_jac(self):
# Test that Jacobian callable is handled correctly and
# weighted if sigma is provided.
def f(x, a, b):
return a * np.exp(-b*x)
def jac(x, a, b):
e = np.exp(-b*x)
return np.vstack((e, -a * x * e)).T
xdata = np.linspace(0, 1, 11)
ydata = f(xdata, 2., 2.)
# Test numerical options for least_squares backend.
for method in ['trf', 'dogbox']:
for scheme in ['2-point', '3-point', 'cs']:
popt, pcov = curve_fit(f, xdata, ydata, jac=scheme,
method=method)
assert_allclose(popt, [2, 2])
# Test the analytic option.
for method in ['lm', 'trf', 'dogbox']:
popt, pcov = curve_fit(f, xdata, ydata, method=method, jac=jac)
assert_allclose(popt, [2, 2])
# Now add an outlier and provide sigma.
ydata[5] = 100
sigma = np.ones(xdata.shape[0])
sigma[5] = 200
for method in ['lm', 'trf', 'dogbox']:
popt, pcov = curve_fit(f, xdata, ydata, sigma=sigma, method=method,
jac=jac)
# Still the optimization process is influenced somehow,
# have to set rtol=1e-3.
assert_allclose(popt, [2, 2], rtol=1e-3)
class TestFixedPoint(TestCase):
def test_scalar_trivial(self):
# f(x) = 2x; fixed point should be x=0
def func(x):
return 2.0*x
x0 = 1.0
x = fixed_point(func, x0)
assert_almost_equal(x, 0.0)
def test_scalar_basic1(self):
# f(x) = x**2; x0=1.05; fixed point should be x=1
def func(x):
return x**2
x0 = 1.05
x = fixed_point(func, x0)
assert_almost_equal(x, 1.0)
def test_scalar_basic2(self):
# f(x) = x**0.5; x0=1.05; fixed point should be x=1
def func(x):
return x**0.5
x0 = 1.05
x = fixed_point(func, x0)
assert_almost_equal(x, 1.0)
def test_array_trivial(self):
def func(x):
return 2.0*x
x0 = [0.3, 0.15]
olderr = np.seterr(all='ignore')
try:
x = fixed_point(func, x0)
finally:
np.seterr(**olderr)
assert_almost_equal(x, [0.0, 0.0])
def test_array_basic1(self):
# f(x) = c * x**2; fixed point should be x=1/c
def func(x, c):
return c * x**2
c = array([0.75, 1.0, 1.25])
x0 = [1.1, 1.15, 0.9]
olderr = np.seterr(all='ignore')
try:
x = fixed_point(func, x0, args=(c,))
finally:
np.seterr(**olderr)
assert_almost_equal(x, 1.0/c)
def test_array_basic2(self):
# f(x) = c * x**0.5; fixed point should be x=c**2
def func(x, c):
return c * x**0.5
c = array([0.75, 1.0, 1.25])
x0 = [0.8, 1.1, 1.1]
x = fixed_point(func, x0, args=(c,))
assert_almost_equal(x, c**2)
def test_lambertw(self):
# python-list/2010-December/594592.html
xxroot = fixed_point(lambda xx: np.exp(-2.0*xx)/2.0, 1.0,
args=(), xtol=1e-12, maxiter=500)
assert_allclose(xxroot, np.exp(-2.0*xxroot)/2.0)
assert_allclose(xxroot, lambertw(1)/2)
def test_no_acceleration(self):
# github issue 5460
ks = 2
kl = 6
m = 1.3
n0 = 1.001
i0 = ((m-1)/m)*(kl/ks/m)**(1/(m-1))
def func(n):
return np.log(kl/ks/n) / np.log((i0*n/(n - 1))) + 1
n = fixed_point(func, n0, method='iteration')
assert_allclose(n, m)
if __name__ == "__main__":
run_module_suite()
| mit |
BeDjango/intef-openedx | cms/djangoapps/contentstore/management/commands/tests/test_delete_orphans.py | 86 | 5026 | """Tests running the delete_orphan command"""
import ddt
from django.core.management import call_command, CommandError
from contentstore.tests.test_orphan import TestOrphanBase
from xmodule.modulestore.tests.factories import CourseFactory
from xmodule.modulestore import ModuleStoreEnum
@ddt.ddt
class TestDeleteOrphan(TestOrphanBase):
"""
Tests for running the delete_orphan management command.
Inherits from TestOrphan in order to use its setUp method.
"""
def test_no_args(self):
"""
Test delete_orphans command with no arguments
"""
with self.assertRaisesRegexp(CommandError, 'Error: too few arguments'):
call_command('delete_orphans')
@ddt.data(ModuleStoreEnum.Type.split, ModuleStoreEnum.Type.mongo)
def test_delete_orphans_no_commit(self, default_store):
"""
Tests that running the command without a '--commit' argument
results in no orphans being deleted
"""
course = self.create_course_with_orphans(default_store)
call_command('delete_orphans', unicode(course.id))
self.assertTrue(self.store.has_item(course.id.make_usage_key('html', 'multi_parent_html')))
self.assertTrue(self.store.has_item(course.id.make_usage_key('vertical', 'OrphanVert')))
self.assertTrue(self.store.has_item(course.id.make_usage_key('chapter', 'OrphanChapter')))
self.assertTrue(self.store.has_item(course.id.make_usage_key('html', 'OrphanHtml')))
@ddt.data(ModuleStoreEnum.Type.split, ModuleStoreEnum.Type.mongo)
def test_delete_orphans_commit(self, default_store):
"""
Tests that running the command WITH the '--commit' argument
results in the orphans being deleted
"""
course = self.create_course_with_orphans(default_store)
call_command('delete_orphans', unicode(course.id), '--commit')
# make sure this module wasn't deleted
self.assertTrue(self.store.has_item(course.id.make_usage_key('html', 'multi_parent_html')))
# and make sure that these were
self.assertFalse(self.store.has_item(course.id.make_usage_key('vertical', 'OrphanVert')))
self.assertFalse(self.store.has_item(course.id.make_usage_key('chapter', 'OrphanChapter')))
self.assertFalse(self.store.has_item(course.id.make_usage_key('html', 'OrphanHtml')))
def test_delete_orphans_published_branch_split(self):
"""
Tests that if there are orphans only on the published branch,
running delete orphans with a course key that specifies
the published branch will delete the published orphan
"""
course, orphan = self.create_split_course_with_published_orphan()
published_branch = course.id.for_branch(ModuleStoreEnum.BranchName.published)
items_in_published = self.store.get_items(published_branch)
items_in_draft_preferred = self.store.get_items(course.id)
# call delete orphans, specifying the published branch
# of the course
call_command('delete_orphans', unicode(published_branch), '--commit')
# now all orphans should be deleted
self.assertOrphanCount(course.id, 0)
self.assertOrphanCount(published_branch, 0)
self.assertNotIn(orphan, self.store.get_items(published_branch))
# we should have one fewer item in the published branch of the course
self.assertEqual(
len(items_in_published) - 1,
len(self.store.get_items(published_branch)),
)
# and the same amount of items in the draft branch of the course
self.assertEqual(
len(items_in_draft_preferred),
len(self.store.get_items(course.id)),
)
def create_split_course_with_published_orphan(self):
"""
Helper to create a split course with a published orphan
"""
course = CourseFactory.create(default_store=ModuleStoreEnum.Type.split)
# create an orphan
orphan = self.store.create_item(
self.user.id, course.id, 'html', "PublishedOnlyOrphan"
)
self.store.publish(orphan.location, self.user.id)
# grab the published branch of the course
published_branch = course.id.for_branch(
ModuleStoreEnum.BranchName.published
)
# assert that this orphan is present in both branches
self.assertOrphanCount(course.id, 1)
self.assertOrphanCount(published_branch, 1)
# delete this orphan from the draft branch without
# auto-publishing this change to the published branch
self.store.delete_item(
orphan.location, self.user.id, skip_auto_publish=True
)
# now there should be no orphans in the draft branch, but
# there should be one in published
self.assertOrphanCount(course.id, 0)
self.assertOrphanCount(published_branch, 1)
self.assertIn(orphan, self.store.get_items(published_branch))
return course, orphan
| agpl-3.0 |
rethinkdb/rethinkdb | test/common/http_support/jinja2/_compat.py | 638 | 4042 | # -*- coding: utf-8 -*-
"""
jinja2._compat
~~~~~~~~~~~~~~
Some py2/py3 compatibility support based on a stripped down
version of six so we don't have to depend on a specific version
of it.
:copyright: Copyright 2013 by the Jinja team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import sys
PY2 = sys.version_info[0] == 2
PYPY = hasattr(sys, 'pypy_translation_info')
_identity = lambda x: x
if not PY2:
unichr = chr
range_type = range
text_type = str
string_types = (str,)
iterkeys = lambda d: iter(d.keys())
itervalues = lambda d: iter(d.values())
iteritems = lambda d: iter(d.items())
import pickle
from io import BytesIO, StringIO
NativeStringIO = StringIO
def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
ifilter = filter
imap = map
izip = zip
intern = sys.intern
implements_iterator = _identity
implements_to_string = _identity
encode_filename = _identity
get_next = lambda x: x.__next__
else:
unichr = unichr
text_type = unicode
range_type = xrange
string_types = (str, unicode)
iterkeys = lambda d: d.iterkeys()
itervalues = lambda d: d.itervalues()
iteritems = lambda d: d.iteritems()
import cPickle as pickle
from cStringIO import StringIO as BytesIO, StringIO
NativeStringIO = BytesIO
exec('def reraise(tp, value, tb=None):\n raise tp, value, tb')
from itertools import imap, izip, ifilter
intern = intern
def implements_iterator(cls):
cls.next = cls.__next__
del cls.__next__
return cls
def implements_to_string(cls):
cls.__unicode__ = cls.__str__
cls.__str__ = lambda x: x.__unicode__().encode('utf-8')
return cls
get_next = lambda x: x.next
def encode_filename(filename):
if isinstance(filename, unicode):
return filename.encode('utf-8')
return filename
try:
next = next
except NameError:
def next(it):
return it.next()
def with_metaclass(meta, *bases):
# This requires a bit of explanation: the basic idea is to make a
# dummy metaclass for one level of class instanciation that replaces
# itself with the actual metaclass. Because of internal type checks
# we also need to make sure that we downgrade the custom metaclass
# for one level to something closer to type (that's why __call__ and
# __init__ comes back from type etc.).
#
# This has the advantage over six.with_metaclass in that it does not
# introduce dummy classes into the final MRO.
class metaclass(meta):
__call__ = type.__call__
__init__ = type.__init__
def __new__(cls, name, this_bases, d):
if this_bases is None:
return type.__new__(cls, name, (), d)
return meta(name, bases, d)
return metaclass('temporary_class', None, {})
try:
from collections import Mapping as mapping_types
except ImportError:
import UserDict
mapping_types = (UserDict.UserDict, UserDict.DictMixin, dict)
# common types. These do exist in the special types module too which however
# does not exist in IronPython out of the box. Also that way we don't have
# to deal with implementation specific stuff here
class _C(object):
def method(self): pass
def _func():
yield None
function_type = type(_func)
generator_type = type(_func())
method_type = type(_C().method)
code_type = type(_C.method.__code__)
try:
raise TypeError()
except TypeError:
_tb = sys.exc_info()[2]
traceback_type = type(_tb)
frame_type = type(_tb.tb_frame)
try:
from urllib.parse import quote_from_bytes as url_quote
except ImportError:
from urllib import quote as url_quote
try:
from thread import allocate_lock
except ImportError:
try:
from threading import Lock as allocate_lock
except ImportError:
from dummy_thread import allocate_lock
| apache-2.0 |
dhootha/hadoop-common | src/contrib/hod/testing/helper.py | 182 | 1122 | #Licensed to the Apache Software Foundation (ASF) under one
#or more contributor license agreements. See the NOTICE file
#distributed with this work for additional information
#regarding copyright ownership. The ASF licenses this file
#to you under the Apache License, Version 2.0 (the
#"License"); you may not use this file except in compliance
#with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
import sys
sampleText = "Hello World!"
if __name__=="__main__":
args = sys.argv[1:]
if args[0] == "1":
# print sample text to stderr
sys.stdout.write(sampleText)
elif args[0] == "2":
# print sample text to stderr
sys.stderr.write(sampleText)
# Add any other helper programs here, with different values for args[0]
pass
| apache-2.0 |
arjunsinghy96/coala-bears | tests/python/PycodestyleBearTest.py | 11 | 1254 | from bears.python.PycodestyleBear import PycodestyleBear
from coalib.testing.LocalBearTestHelper import verify_local_bear
good_file = '''
def hello():
print("hello world")
'''
bad_file = '''
import something
def hello():
print("hello world")
'''
PycodestyleBearTest = verify_local_bear(
PycodestyleBear,
valid_files=(good_file,),
invalid_files=(bad_file,))
PycodestyleBearNoIgnoreTest = verify_local_bear(
PycodestyleBear,
valid_files=(good_file,),
invalid_files=(bad_file,),
settings={'pycodestyle_ignore': ''})
PycodestyleBearConfigIgnoreTest = verify_local_bear(
PycodestyleBear,
valid_files=(good_file, bad_file),
invalid_files=[],
settings={'pycodestyle_ignore': 'E303'})
PycodestyleBearConfigSelectTest = verify_local_bear(
PycodestyleBear,
valid_files=(good_file, bad_file),
invalid_files=[],
settings={'pycodestyle_select': 'E300'})
long_line = 'a = "{0}"'.format('a' * 100)
PycodestyleBearLineLengthTest = verify_local_bear(
PycodestyleBear,
valid_files=(),
invalid_files=(long_line,)
)
PycodestyleBearLineLengthTest = verify_local_bear(
PycodestyleBear,
valid_files=(long_line,),
invalid_files=(),
settings={'max_line_length': 200}
)
| agpl-3.0 |
otadmor/Open-Knesset | events/views.py | 8 | 1957 | # Create your views here.
from datetime import datetime
import vobject
from django.conf import settings
from django.http import HttpResponse
from auxiliary.views import GetMoreView
from hashnav.detail import DetailView
from models import Event
class EventDetailView(DetailView):
model = Event
def get_context_data(self, *args, **kwargs):
context = super(EventDetailView, self).get_context_data(**kwargs)
obj = context['object']
time_diff = obj.when - datetime.now()
context['in_days'] = time_diff.days
context['in_minutes'] = (time_diff.seconds / 60) % 60
context['in_hours'] = time_diff.seconds / 3600
creators = []
for i in obj.who.all():
if i.mk:
creators.append(i.mk)
context['creators']=creators
return context
class MoreUpcomingEventsView(GetMoreView):
"""Get partially rendered member actions content for AJAX calls to 'More'"""
paginate_by = 10
template_name = 'events/events_partials.html'
def get_queryset(self):
return Event.objects.get_upcoming()
def icalendar(request, summary_length=50, future_only=True):
"""
return a single icalendar file, default to future_only.
"""
summary_length = int(summary_length)
cal = vobject.iCalendar()
now = datetime.now()
for e in Event.objects.all():
# TODO - use a proper query once I recall the syntax (actually the
# implementation of is_future makes this query pretty bad, since it
# will execute multiple times - to do this properly I need some sort of
# select * from table where start_date > $now, then now is only
# calculated once, and this should be log(N) time (assuming an ordered
# index on start_date)
if future_only and e.when <= now:
continue
e.add_vevent_to_ical(cal, summary_length=summary_length)
return HttpResponse(cal.serialize())
| bsd-3-clause |
12520054/pybot | chatterbot/trainers.py | 1 | 11234 | import logging
from .conversation import Statement, Response
class Trainer(object):
"""
Base class for all other trainer classes.
"""
def __init__(self, storage, **kwargs):
self.storage = storage
self.logger = logging.getLogger(__name__)
def train(self, *args, **kwargs):
"""
This class must be overridden by a class the inherits from 'Trainer'.
"""
raise self.TrainerInitializationException()
def get_or_create(self, statement_text):
"""
Return a statement if it exists.
Create and return the statement if it does not exist.
"""
statement = self.storage.find(statement_text)
if not statement:
statement = Statement(statement_text)
return statement
class TrainerInitializationException(Exception):
"""
Exception raised when a base class has not overridden
the required methods on the Trainer base class.
"""
def __init__(self, value=None):
default = (
'A training class must specified before calling train(). ' +
'See http://chatterbot.readthedocs.io/en/stable/training.html'
)
self.value = value or default
def __str__(self):
return repr(self.value)
def _generate_export_data(self):
result = []
for statement in self.storage.filter():
for response in statement.in_response_to:
result.append([response.text, statement.text])
return result
def export_for_training(self, file_path='./export.json'):
"""
Create a file from the database that can be used to
train other chat bots.
"""
from jsondb.db import Database
database = Database(file_path)
export = {'export': self._generate_export_data()}
database.data(dictionary=export)
class ListTrainer(Trainer):
"""
Allaows a chat bot to be trained using a list of strings
where the list represents a conversation.
"""
def train(self, conversation):
"""
Train the chat bot based on the provided list of
statements that represents a single conversation.
"""
statement_history = []
for text in conversation:
statement = self.get_or_create(text)
if statement_history:
statement.add_response(
Response(statement_history[-1].text)
)
statement_history.append(statement)
self.storage.update(statement, force=True)
class ChatterBotCorpusTrainer(Trainer):
"""
Allows the chat bot to be trained using data from the
ChatterBot dialog corpus.
"""
def __init__(self, storage, **kwargs):
super(ChatterBotCorpusTrainer, self).__init__(storage, **kwargs)
from .corpus import Corpus
self.corpus = Corpus()
def train(self, *corpora):
trainer = ListTrainer(self.storage)
# Allow a list of coupora to be passed instead of arguments
if len(corpora) == 1:
if isinstance(corpora[0], list):
corpora = corpora[0]
for corpus in corpora:
corpus_data = self.corpus.load_corpus(corpus)
for data in corpus_data:
for pair in data:
trainer.train(pair)
class TwitterTrainer(Trainer):
"""
Allows the chat bot to be trained using data
gathered from Twitter.
"""
def __init__(self, storage, **kwargs):
super(TwitterTrainer, self).__init__(storage, **kwargs)
from twitter import Api as TwitterApi
self.api = TwitterApi(
consumer_key=kwargs.get('twitter_consumer_key'),
consumer_secret=kwargs.get('twitter_consumer_secret'),
access_token_key=kwargs.get('twitter_access_token_key'),
access_token_secret=kwargs.get('twitter_access_token_secret')
)
def random_word(self, base_word='random'):
"""
Generate a random word using the Twitter API.
Search twitter for recent tweets containing the term 'random'.
Then randomly select one word from those tweets and do another
search with that word. Return a randomly selected word from the
new set of results.
"""
import random
random_tweets = self.api.GetSearch(term=base_word, count=5)
random_words = self.get_words_from_tweets(random_tweets)
random_word = random.choice(list(random_words))
tweets = self.api.GetSearch(term=random_word, count=5)
words = self.get_words_from_tweets(tweets)
word = random.choice(list(words))
return word
def get_words_from_tweets(self, tweets):
"""
Given a list of tweets, return the set of
words from the tweets.
"""
words = set()
for tweet in tweets:
# TODO: Handle non-ascii characters properly
cleaned_text = ''.join(
[i if ord(i) < 128 else ' ' for i in tweet.text]
)
tweet_words = cleaned_text.split()
for word in tweet_words:
# If the word contains only letters with a length from 4 to 9
if word.isalpha() and len(word) > 3 and len(word) <= 9:
words.add(word)
return words
def get_statements(self):
"""
Returns list of random statements from the API.
"""
from twitter import TwitterError
statements = []
# Generate a random word
random_word = self.random_word()
self.logger.info(u'Requesting 50 random tweets containing the word {}'.format(random_word))
tweets = self.api.GetSearch(term=random_word, count=50)
for tweet in tweets:
statement = Statement(tweet.text)
if tweet.in_reply_to_status_id:
try:
status = self.api.GetStatus(tweet.in_reply_to_status_id)
statement.add_response(Response(status.text))
statements.append(statement)
except TwitterError as error:
self.logger.warning(str(error))
self.logger.info('Adding {} tweets with responses'.format(len(statements)))
return statements
def train(self):
for _ in range(0, 10):
statements = self.get_statements()
for statement in statements:
self.storage.update(statement, force=True)
class UbuntuCorpusTrainer(Trainer):
"""
Allow chatbots to be trained with the data from
the Ubuntu Dialog Corpus.
"""
def __init__(self, storage, **kwargs):
super(UbuntuCorpusTrainer, self).__init__(storage, **kwargs)
import os
self.data_download_url = kwargs.get(
'ubuntu_corpus_data_download_url',
'http://cs.mcgill.ca/~jpineau/datasets/ubuntu-corpus-1.0/ubuntu_dialogs.tgz'
)
self.data_directory = kwargs.get(
'ubuntu_corpus_data_directory',
'./data/'
)
# Create the data directory if it does not already exist
if not os.path.exists(self.data_directory):
os.makedirs(self.data_directory)
def download(self, url, show_status=True):
"""
Download a file from the given url.
Show a progress indicator for the download status.
Based on: http://stackoverflow.com/a/15645088/1547223
"""
import os
import sys
import requests
file_name = url.split('/')[-1]
file_path = os.path.join(self.data_directory, file_name)
# Do not download the data if it already exists
if os.path.exists(file_path):
self.logger.info('File is already downloaded')
return file_path
with open(file_path, 'wb') as open_file:
print('Downloading %s' % file_name)
response = requests.get(url, stream=True)
total_length = response.headers.get('content-length')
if total_length is None:
# No content length header
open_file.write(response.content)
else:
download = 0
total_length = int(total_length)
for data in response.iter_content(chunk_size=4096):
download += len(data)
open_file.write(data)
if show_status:
done = int(50 * download / total_length)
sys.stdout.write('\r[%s%s]' % ('=' * done, ' ' * (50 - done)))
sys.stdout.flush()
return file_path
def extract(self, file_path):
"""
Extract a tar file at the specified file path.
"""
import os
import tarfile
dir_name = os.path.split(file_path)[-1].split('.')[0]
extracted_file_directory = os.path.join(
self.data_directory,
dir_name
)
# Do not extract if the extracted directory already exists
if os.path.isdir(extracted_file_directory):
return False
self.logger.info('Starting file extraction')
def track_progress(members):
for member in members:
# this will be the current file being extracted
yield member
print('Extracting {}'.format(member.path))
with tarfile.open(file_path) as tar:
tar.extractall(path=self.data_directory, members=track_progress(tar))
self.logger.info('File extraction complete')
return True
def train(self):
import glob
import csv
import os
# Download and extract the Ubuntu dialog corpus
corpus_download_path = self.download(self.data_download_url)
self.extract(corpus_download_path)
extracted_corpus_path = os.path.join(
self.data_directory,
os.path.split(corpus_download_path)[-1].split('.')[0],
'**', '*.tsv'
)
for file in glob.iglob(extracted_corpus_path):
self.logger.info('Training from: {}'.format(file))
with open(file, 'r') as tsv:
reader = csv.reader(tsv, delimiter='\t')
statement_history = []
for row in reader:
if len(row) > 0:
text = row[3]
statement = self.get_or_create(text)
print(text, len(row))
statement.add_extra_data('datetime', row[0])
statement.add_extra_data('speaker', row[1])
if row[2].strip():
statement.add_extra_data('addressing_speaker', row[2])
if statement_history:
statement.add_response(
Response(statement_history[-1].text)
)
statement_history.append(statement)
self.storage.update(statement, force=True)
| mit |
darkleons/odoo | addons/l10n_fr/report/bilan_report.py | 374 | 6196 | # -*- coding: utf-8 -*-
#
#
# Copyright (c) 2008 JAILLET Simon - CrysaLEAD - www.crysalead.fr
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
#
import base_report
from openerp.osv import osv
class bilan(base_report.base_report):
def __init__(self, cr, uid, name, context):
super(bilan, self).__init__(cr, uid, name, context)
def set_context(self, objects, data, ids):
super(bilan, self).set_context(objects, data, ids)
self._load('bilan', self.localcontext['data']['form'])
self._set_variable(
'at1a',
self.localcontext['bavar1'] + self.localcontext['bavar2'] + self.localcontext['bavar3']
+ self.localcontext['bavar4'] + self.localcontext[
'bavar5'] + self.localcontext['bavar6']
+ self.localcontext['bavar7'] + self.localcontext[
'bavar8'] + self.localcontext['bavar9']
+ self.localcontext['bavar10'] + self.localcontext[
'bavar11'] + self.localcontext['bavar12']
+ self.localcontext['bavar13'] + self.localcontext[
'bavar14'] + self.localcontext['bavar15']
+ self.localcontext['bavar16'] + self.localcontext[
'bavar17'] + self.localcontext['bavar18']
+ self.localcontext['bavar19'] + self.localcontext['bavar20']
)
self._set_variable(
'at1b',
self.localcontext['bavar2b'] + self.localcontext[
'bavar3b'] + self.localcontext['bavar4b']
+ self.localcontext['bavar5b'] + self.localcontext[
'bavar6b'] + self.localcontext['bavar7b']
+ self.localcontext['bavar9b'] + self.localcontext[
'bavar10b'] + self.localcontext['bavar11b']
+ self.localcontext['bavar12b'] + self.localcontext[
'bavar13b'] + self.localcontext['bavar15b']
+ self.localcontext['bavar16b'] + self.localcontext[
'bavar17b'] + self.localcontext['bavar18b']
+ self.localcontext['bavar19b'] + self.localcontext['bavar20b']
)
self._set_variable(
'at1',
self.localcontext['at1a'] + self.localcontext['at1b']
)
self._set_variable(
'at2a',
self.localcontext['bavar21'] + self.localcontext[
'bavar22'] + self.localcontext['bavar23']
+ self.localcontext['bavar24'] + self.localcontext[
'bavar25'] + self.localcontext['bavar26']
+ self.localcontext['bavar27'] + self.localcontext[
'bavar28'] + self.localcontext['bavar29']
+ self.localcontext['bavar30'] + self.localcontext[
'bavar31'] + self.localcontext['bavar32']
+ self.localcontext['bavar33']
)
self._set_variable(
'at2b',
self.localcontext['bavar21b'] + self.localcontext[
'bavar22b'] + self.localcontext['bavar23b']
+ self.localcontext['bavar24b'] + self.localcontext[
'bavar26b'] + self.localcontext['bavar27b']
+ self.localcontext['bavar29b'] + self.localcontext['bavar30b']
)
self._set_variable(
'at2',
self.localcontext['at2a'] + self.localcontext['at2b']
)
self._set_variable(
'actif',
self.localcontext['at1'] + self.localcontext['at2'] + self.localcontext['bavar34']
+ self.localcontext['bavar35'] + self.localcontext['bavar36']
)
self._set_variable(
'pt1',
self.localcontext['bpvar1'] + self.localcontext['bpvar2'] + self.localcontext['bpvar3']
+ self.localcontext['bpvar4'] + self.localcontext[
'bpvar5'] + self.localcontext['bpvar6']
+ self.localcontext['bpvar7'] + self.localcontext[
'bpvar8'] + self.localcontext['bpvar9']
+ self.localcontext['bpvar10'] + self.localcontext[
'bpvar11'] + self.localcontext['bpvar12']
)
self._set_variable(
'pt2',
self.localcontext['bpvar13'] + self.localcontext['bpvar14']
)
self._set_variable(
'pt3',
self.localcontext['bpvar15'] + self.localcontext[
'bpvar16'] + self.localcontext['bpvar17']
+ self.localcontext['bpvar18'] + self.localcontext[
'bpvar19'] + self.localcontext['bpvar20']
+ self.localcontext['bpvar21'] + self.localcontext[
'bpvar22'] + self.localcontext['bpvar23']
+ self.localcontext['bpvar24'] + self.localcontext['bpvar25']
)
self._set_variable(
'passif',
self.localcontext['pt1'] + self.localcontext['pt2'] + self.localcontext['pt3']
+ self.localcontext['bpvar26']
)
class wrapped_report_bilan(osv.AbstractModel):
_name = 'report.l10n_fr.report_l10nfrbilan'
_inherit = 'report.abstract_report'
_template = 'l10n_fr.report_l10nfrbilan'
_wrapped_report_class = bilan
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
Grirrane/odoo | addons/l10n_in_hr_payroll/report/__init__.py | 8 | 1195 | #-*- coding:utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# d$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import report_payslip_details
import report_payroll_advice
import report_hr_salary_employee_bymonth
import payment_advice_report
import report_hr_yearly_salary_detail
import payslip_report
| agpl-3.0 |
mrdomino/capnproto | doc/_plugins/capnp_lexer.py | 35 | 2103 | #! /usr/bin/env python
from pygments.lexer import RegexLexer
from pygments.token import *
class CapnpLexer(RegexLexer):
name = "Cap'n Proto lexer"
aliases = ['capnp']
filenames = ['*.capnp']
tokens = {
'root': [
(r'#.*?$', Comment.Single),
(r'@[0-9a-zA-Z]*', Name.Decorator),
(r'=', Literal, 'expression'),
(r':', Name.Class, 'type'),
(r'\$', Name.Attribute, 'annotation'),
(r'(struct|enum|interface|union|import|using|const|annotation|extends|in|of|on|as|with|from|fixed)\b',
Token.Keyword),
(r'[a-zA-Z0-9_.]+', Token.Name),
(r'[^#@=:$a-zA-Z0-9_]+', Text),
],
'type': [
(r'[^][=;,(){}$]+', Name.Class),
(r'[[(]', Name.Class, 'parentype'),
(r'', Name.Class, '#pop')
],
'parentype': [
(r'[^][;()]+', Name.Class),
(r'[[(]', Name.Class, '#push'),
(r'[])]', Name.Class, '#pop'),
(r'', Name.Class, '#pop')
],
'expression': [
(r'[^][;,(){}$]+', Literal),
(r'[[(]', Literal, 'parenexp'),
(r'', Literal, '#pop')
],
'parenexp': [
(r'[^][;()]+', Literal),
(r'[[(]', Literal, '#push'),
(r'[])]', Literal, '#pop'),
(r'', Literal, '#pop')
],
'annotation': [
(r'[^][;,(){}=:]+', Name.Attribute),
(r'[[(]', Name.Attribute, 'annexp'),
(r'', Name.Attribute, '#pop')
],
'annexp': [
(r'[^][;()]+', Name.Attribute),
(r'[[(]', Name.Attribute, '#push'),
(r'[])]', Name.Attribute, '#pop'),
(r'', Name.Attribute, '#pop')
],
}
if __name__ == "__main__":
from setuptools import setup, find_packages
setup(name = "CapnpPygmentsLexer",
version = "0.1",
packages = find_packages(),
py_modules = [ 'capnp_lexer' ],
entry_points = {'pygments.lexers': 'capnp = capnp_lexer:CapnpLexer'})
| mit |
abrt/faf | src/pyfaf/storage/migrations/versions/8ac9b3343649_add_semver_semrel_to_.py | 1 | 4207 | # Copyright (C) 2019 ABRT Team
# Copyright (C) 2019 Red Hat, Inc.
#
# This file is part of faf.
#
# faf is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# faf is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with faf. If not, see <http://www.gnu.org/licenses/>.
"""
Add semver semrel to reportunknownpackages
Revision ID: 8ac9b3343649
Revises: cee07a513404
Create Date: 2019-06-20 15:16:12.340579
"""
import sqlalchemy as sa
from alembic.op import add_column, alter_column, drop_column, create_index, drop_index, get_bind
from pyfaf.storage import custom_types
# revision identifiers, used by Alembic.
revision = '8ac9b3343649'
down_revision = 'cee07a513404'
metadata = sa.MetaData()
def upgrade() -> None:
add_column('reportunknownpackages', sa.Column('semver', custom_types.Semver(), nullable=True))
add_column('reportunknownpackages', sa.Column('semrel', custom_types.Semver(), nullable=True))
reportunknownpackage = sa.Table("reportunknownpackages", metadata,
sa.Column("id", sa.Integer),
sa.Column("report_id", sa.Integer),
sa.Column("type", sa.Enum("CRASHED",
"RELATED",
"SELINUX_POLICY",
name="reportpackage_type")),
sa.Column("name", sa.String(length=64)),
sa.Column("epoch", sa.Integer),
sa.Column("version", sa.String(length=64)),
sa.Column("release", sa.String(length=64)),
sa.Column("arch_id", sa.Integer),
sa.Column("count", sa.Integer),
sa.Column("semver", custom_types.Semver()),
sa.Column("semrel", custom_types.Semver()))
for (pkg_id, version, release) in get_bind().execute(
sa.select(
[reportunknownpackage.c.id,
reportunknownpackage.c.version,
reportunknownpackage.c.release]
)):
semver = custom_types.to_semver(version)
semrel = custom_types.to_semver(release)
get_bind().execute((reportunknownpackage.update() #pylint: disable=no-value-for-parameter
.where(reportunknownpackage.c.id == pkg_id)
.values(semver=sa.func.to_semver(semver),
semrel=sa.func.to_semver(semrel))))
alter_column('reportunknownpackages', sa.Column('semver',
custom_types.Semver(), nullable=False))
alter_column('reportunknownpackages', sa.Column('semrel',
custom_types.Semver(), nullable=False))
create_index('ix_reportunknownpackages_semver_semrel', 'reportunknownpackages',
['semver', 'semrel'], unique=False)
create_index('ix_builds_semver_semrel', 'builds',
['semver', 'semrel'], unique=False)
drop_index('ix_builds_semver', table_name='builds')
drop_index('ix_builds_semrel', table_name='builds')
def downgrade() -> None:
drop_index('ix_reportunknownpackages_semver_semrel', table_name='reportunknownpackages')
drop_column('reportunknownpackages', 'semver')
drop_column('reportunknownpackages', 'semrel')
create_index('ix_builds_semver', 'builds', ['semver'])
create_index('ix_builds_semrel', 'builds', ['semrel'])
drop_index('ix_builds_semver_semrel', table_name='builds')
| gpl-3.0 |
eckucukoglu/arm-linux-gnueabihf | lib/python2.7/ctypes/test/test_as_parameter.py | 46 | 6799 | import unittest
from ctypes import *
import _ctypes_test
dll = CDLL(_ctypes_test.__file__)
try:
CALLBACK_FUNCTYPE = WINFUNCTYPE
except NameError:
# fake to enable this test on Linux
CALLBACK_FUNCTYPE = CFUNCTYPE
class POINT(Structure):
_fields_ = [("x", c_int), ("y", c_int)]
class BasicWrapTestCase(unittest.TestCase):
def wrap(self, param):
return param
def test_wchar_parm(self):
try:
c_wchar
except NameError:
return
f = dll._testfunc_i_bhilfd
f.argtypes = [c_byte, c_wchar, c_int, c_long, c_float, c_double]
result = f(self.wrap(1), self.wrap(u"x"), self.wrap(3), self.wrap(4), self.wrap(5.0), self.wrap(6.0))
self.assertEqual(result, 139)
self.assertTrue(type(result), int)
def test_pointers(self):
f = dll._testfunc_p_p
f.restype = POINTER(c_int)
f.argtypes = [POINTER(c_int)]
# This only works if the value c_int(42) passed to the
# function is still alive while the pointer (the result) is
# used.
v = c_int(42)
self.assertEqual(pointer(v).contents.value, 42)
result = f(self.wrap(pointer(v)))
self.assertEqual(type(result), POINTER(c_int))
self.assertEqual(result.contents.value, 42)
# This on works...
result = f(self.wrap(pointer(v)))
self.assertEqual(result.contents.value, v.value)
p = pointer(c_int(99))
result = f(self.wrap(p))
self.assertEqual(result.contents.value, 99)
def test_shorts(self):
f = dll._testfunc_callback_i_if
args = []
expected = [262144, 131072, 65536, 32768, 16384, 8192, 4096, 2048,
1024, 512, 256, 128, 64, 32, 16, 8, 4, 2, 1]
def callback(v):
args.append(v)
return v
CallBack = CFUNCTYPE(c_int, c_int)
cb = CallBack(callback)
f(self.wrap(2**18), self.wrap(cb))
self.assertEqual(args, expected)
################################################################
def test_callbacks(self):
f = dll._testfunc_callback_i_if
f.restype = c_int
f.argtypes = None
MyCallback = CFUNCTYPE(c_int, c_int)
def callback(value):
#print "called back with", value
return value
cb = MyCallback(callback)
result = f(self.wrap(-10), self.wrap(cb))
self.assertEqual(result, -18)
# test with prototype
f.argtypes = [c_int, MyCallback]
cb = MyCallback(callback)
result = f(self.wrap(-10), self.wrap(cb))
self.assertEqual(result, -18)
result = f(self.wrap(-10), self.wrap(cb))
self.assertEqual(result, -18)
AnotherCallback = CALLBACK_FUNCTYPE(c_int, c_int, c_int, c_int, c_int)
# check that the prototype works: we call f with wrong
# argument types
cb = AnotherCallback(callback)
self.assertRaises(ArgumentError, f, self.wrap(-10), self.wrap(cb))
def test_callbacks_2(self):
# Can also use simple datatypes as argument type specifiers
# for the callback function.
# In this case the call receives an instance of that type
f = dll._testfunc_callback_i_if
f.restype = c_int
MyCallback = CFUNCTYPE(c_int, c_int)
f.argtypes = [c_int, MyCallback]
def callback(value):
#print "called back with", value
self.assertEqual(type(value), int)
return value
cb = MyCallback(callback)
result = f(self.wrap(-10), self.wrap(cb))
self.assertEqual(result, -18)
def test_longlong_callbacks(self):
f = dll._testfunc_callback_q_qf
f.restype = c_longlong
MyCallback = CFUNCTYPE(c_longlong, c_longlong)
f.argtypes = [c_longlong, MyCallback]
def callback(value):
self.assertTrue(isinstance(value, (int, long)))
return value & 0x7FFFFFFF
cb = MyCallback(callback)
self.assertEqual(13577625587, int(f(self.wrap(1000000000000), self.wrap(cb))))
def test_byval(self):
# without prototype
ptin = POINT(1, 2)
ptout = POINT()
# EXPORT int _testfunc_byval(point in, point *pout)
result = dll._testfunc_byval(ptin, byref(ptout))
got = result, ptout.x, ptout.y
expected = 3, 1, 2
self.assertEqual(got, expected)
# with prototype
ptin = POINT(101, 102)
ptout = POINT()
dll._testfunc_byval.argtypes = (POINT, POINTER(POINT))
dll._testfunc_byval.restype = c_int
result = dll._testfunc_byval(self.wrap(ptin), byref(ptout))
got = result, ptout.x, ptout.y
expected = 203, 101, 102
self.assertEqual(got, expected)
def test_struct_return_2H(self):
class S2H(Structure):
_fields_ = [("x", c_short),
("y", c_short)]
dll.ret_2h_func.restype = S2H
dll.ret_2h_func.argtypes = [S2H]
inp = S2H(99, 88)
s2h = dll.ret_2h_func(self.wrap(inp))
self.assertEqual((s2h.x, s2h.y), (99*2, 88*3))
def test_struct_return_8H(self):
class S8I(Structure):
_fields_ = [("a", c_int),
("b", c_int),
("c", c_int),
("d", c_int),
("e", c_int),
("f", c_int),
("g", c_int),
("h", c_int)]
dll.ret_8i_func.restype = S8I
dll.ret_8i_func.argtypes = [S8I]
inp = S8I(9, 8, 7, 6, 5, 4, 3, 2)
s8i = dll.ret_8i_func(self.wrap(inp))
self.assertEqual((s8i.a, s8i.b, s8i.c, s8i.d, s8i.e, s8i.f, s8i.g, s8i.h),
(9*2, 8*3, 7*4, 6*5, 5*6, 4*7, 3*8, 2*9))
def test_recursive_as_param(self):
from ctypes import c_int
class A(object):
pass
a = A()
a._as_parameter_ = a
with self.assertRaises(RuntimeError):
c_int.from_param(a)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class AsParamWrapper(object):
def __init__(self, param):
self._as_parameter_ = param
class AsParamWrapperTestCase(BasicWrapTestCase):
wrap = AsParamWrapper
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class AsParamPropertyWrapper(object):
def __init__(self, param):
self._param = param
def getParameter(self):
return self._param
_as_parameter_ = property(getParameter)
class AsParamPropertyWrapperTestCase(BasicWrapTestCase):
wrap = AsParamPropertyWrapper
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
mustermaxmueller/android_kernel_sony_msm8974_togari_5.x | tools/perf/scripts/python/failed-syscalls-by-pid.py | 11180 | 2058 | # failed system call counts, by pid
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide failed system call totals, broken down by pid.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s syscall-counts-by-pid.py [comm|pid]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_error_totals()
def raw_syscalls__sys_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, ret):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
if ret < 0:
try:
syscalls[common_comm][common_pid][id][ret] += 1
except TypeError:
syscalls[common_comm][common_pid][id][ret] = 1
def print_error_totals():
if for_comm is not None:
print "\nsyscall errors for %s:\n\n" % (for_comm),
else:
print "\nsyscall errors:\n\n",
print "%-30s %10s\n" % ("comm [pid]", "count"),
print "%-30s %10s\n" % ("------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id in id_keys:
print " syscall: %-16s\n" % syscall_name(id),
ret_keys = syscalls[comm][pid][id].keys()
for ret, val in sorted(syscalls[comm][pid][id].iteritems(), key = lambda(k, v): (v, k), reverse = True):
print " err = %-20s %10d\n" % (strerror(ret), val),
| gpl-2.0 |
saurabhjn76/sympy | sympy/core/tests/test_complex.py | 69 | 21313 | from sympy import (S, Symbol, sqrt, I, Integer, Rational, cos, sin, im, re, Abs,
exp, sinh, cosh, tan, tanh, conjugate, sign, cot, coth, pi, symbols,
expand_complex)
def test_complex():
a = Symbol("a", real=True)
b = Symbol("b", real=True)
e = (a + I*b)*(a - I*b)
assert e.expand() == a**2 + b**2
assert sqrt(I) == (-1)**Rational(1, 4)
def test_conjugate():
a = Symbol("a", real=True)
b = Symbol("b", real=True)
c = Symbol("c", imaginary=True)
d = Symbol("d", imaginary=True)
x = Symbol('x')
z = a + I*b + c + I*d
zc = a - I*b - c + I*d
assert conjugate(z) == zc
assert conjugate(exp(z)) == exp(zc)
assert conjugate(exp(I*x)) == exp(-I*conjugate(x))
assert conjugate(z**5) == zc**5
assert conjugate(abs(x)) == abs(x)
assert conjugate(sign(z)) == sign(zc)
assert conjugate(sin(z)) == sin(zc)
assert conjugate(cos(z)) == cos(zc)
assert conjugate(tan(z)) == tan(zc)
assert conjugate(cot(z)) == cot(zc)
assert conjugate(sinh(z)) == sinh(zc)
assert conjugate(cosh(z)) == cosh(zc)
assert conjugate(tanh(z)) == tanh(zc)
assert conjugate(coth(z)) == coth(zc)
def test_abs1():
a = Symbol("a", real=True)
b = Symbol("b", real=True)
assert abs(a) == abs(a)
assert abs(-a) == abs(a)
assert abs(a + I*b) == sqrt(a**2 + b**2)
def test_abs2():
a = Symbol("a", real=False)
b = Symbol("b", real=False)
assert abs(a) != a
assert abs(-a) != a
assert abs(a + I*b) != sqrt(a**2 + b**2)
def test_evalc():
x = Symbol("x", real=True)
y = Symbol("y", real=True)
z = Symbol("z")
assert ((x + I*y)**2).expand(complex=True) == x**2 + 2*I*x*y - y**2
assert expand_complex(z**(2*I)) == (re((re(z) + I*im(z))**(2*I)) +
I*im((re(z) + I*im(z))**(2*I)))
assert expand_complex(
z**(2*I), deep=False) == I*im(z**(2*I)) + re(z**(2*I))
assert exp(I*x) != cos(x) + I*sin(x)
assert exp(I*x).expand(complex=True) == cos(x) + I*sin(x)
assert exp(I*x + y).expand(complex=True) == exp(y)*cos(x) + I*sin(x)*exp(y)
assert sin(I*x).expand(complex=True) == I * sinh(x)
assert sin(x + I*y).expand(complex=True) == sin(x)*cosh(y) + \
I * sinh(y) * cos(x)
assert cos(I*x).expand(complex=True) == cosh(x)
assert cos(x + I*y).expand(complex=True) == cos(x)*cosh(y) - \
I * sinh(y) * sin(x)
assert tan(I*x).expand(complex=True) == tanh(x) * I
assert tan(x + I*y).expand(complex=True) == (
sin(2*x)/(cos(2*x) + cosh(2*y)) +
I*sinh(2*y)/(cos(2*x) + cosh(2*y)))
assert sinh(I*x).expand(complex=True) == I * sin(x)
assert sinh(x + I*y).expand(complex=True) == sinh(x)*cos(y) + \
I * sin(y) * cosh(x)
assert cosh(I*x).expand(complex=True) == cos(x)
assert cosh(x + I*y).expand(complex=True) == cosh(x)*cos(y) + \
I * sin(y) * sinh(x)
assert tanh(I*x).expand(complex=True) == tan(x) * I
assert tanh(x + I*y).expand(complex=True) == (
(sinh(x)*cosh(x) + I*cos(y)*sin(y)) /
(sinh(x)**2 + cos(y)**2)).expand()
def test_pythoncomplex():
x = Symbol("x")
assert 4j*x == 4*x*I
assert 4j*x == 4.0*x*I
assert 4.1j*x != 4*x*I
def test_rootcomplex():
R = Rational
assert ((+1 + I)**R(1, 2)).expand(
complex=True) == 2**R(1, 4)*cos( pi/8) + 2**R(1, 4)*sin( pi/8)*I
assert ((-1 - I)**R(1, 2)).expand(
complex=True) == 2**R(1, 4)*cos(3*pi/8) - 2**R(1, 4)*sin(3*pi/8)*I
assert (sqrt(-10)*I).as_real_imag() == (-sqrt(10), 0)
def test_expand_inverse():
assert (1/(1 + I)).expand(complex=True) == (1 - I)/2
assert ((1 + 2*I)**(-2)).expand(complex=True) == (-3 - 4*I)/25
assert ((1 + I)**(-8)).expand(complex=True) == Rational(1, 16)
def test_expand_complex():
assert ((2 + 3*I)**10).expand(complex=True) == -341525 - 145668*I
# the following two tests are to ensure the SymPy uses an efficient
# algorithm for calculating powers of complex numbers. They should execute
# in something like 0.01s.
assert ((2 + 3*I)**1000).expand(complex=True) == \
-81079464736246615951519029367296227340216902563389546989376269312984127074385455204551402940331021387412262494620336565547972162814110386834027871072723273110439771695255662375718498785908345629702081336606863762777939617745464755635193139022811989314881997210583159045854968310911252660312523907616129080027594310008539817935736331124833163907518549408018652090650537035647520296539436440394920287688149200763245475036722326561143851304795139005599209239350981457301460233967137708519975586996623552182807311159141501424576682074392689622074945519232029999 + \
46938745946789557590804551905243206242164799136976022474337918748798900569942573265747576032611189047943842446167719177749107138603040963603119861476016947257034472364028585381714774667326478071264878108114128915685688115488744955550920239128462489496563930809677159214598114273887061533057125164518549173898349061972857446844052995037423459472376202251620778517659247970283904820245958198842631651569984310559418135975795868314764489884749573052997832686979294085577689571149679540256349988338406458116270429842222666345146926395233040564229555893248370000*I
assert ((2 + 3*I/4)**1000).expand(complex=True) == \
Integer(1)*37079892761199059751745775382463070250205990218394308874593455293485167797989691280095867197640410033222367257278387021789651672598831503296531725827158233077451476545928116965316544607115843772405184272449644892857783761260737279675075819921259597776770965829089907990486964515784097181964312256560561065607846661496055417619388874421218472707497847700629822858068783288579581649321248495739224020822198695759609598745114438265083593711851665996586461937988748911532242908776883696631067311443171682974330675406616373422505939887984366289623091300746049101284856530270685577940283077888955692921951247230006346681086274961362500646889925803654263491848309446197554307105991537357310209426736453173441104334496173618419659521888945605315751089087820455852582920963561495787655250624781448951403353654348109893478206364632640344111022531861683064175862889459084900614967785405977231549003280842218501570429860550379522498497412180001/114813069527425452423283320117768198402231770208869520047764273682576626139237031385665948631650626991844596463898746277344711896086305533142593135616665318539129989145312280000688779148240044871428926990063486244781615463646388363947317026040466353970904996558162398808944629605623311649536164221970332681344168908984458505602379484807914058900934776500429002716706625830522008132236281291761267883317206598995396418127021779858404042159853183251540889433902091920554957783589672039160081957216630582755380425583726015528348786419432054508915275783882625175435528800822842770817965453762184851149029376 + \
I*421638390580169706973991429333213477486930178424989246669892530737775352519112934278994501272111385966211392610029433824534634841747911783746811994443436271013377059560245191441549885048056920190833693041257216263519792201852046825443439142932464031501882145407459174948712992271510309541474392303461939389368955986650538525895866713074543004916049550090364398070215427272240155060576252568700906004691224321432509053286859100920489253598392100207663785243368195857086816912514025693453058403158416856847185079684216151337200057494966741268925263085619240941610301610538225414050394612058339070756009433535451561664522479191267503989904464718368605684297071150902631208673621618217106272361061676184840810762902463998065947687814692402219182668782278472952758690939877465065070481351343206840649517150634973307937551168752642148704904383991876969408056379195860410677814566225456558230131911142229028179902418223009651437985670625/1793954211366022694113801876840128100034871409513586250746316776290259783425578615401030447369541046747571819748417910583511123376348523955353017744010395602173906080395504375010762174191250701116076984219741972574712741619474818186676828531882286780795390571221287481389759837587864244524002565968286448146002639202882164150037179450123657170327105882819203167448541028601906377066191895183769810676831353109303069033234715310287563158747705988305326397404720186258671215368588625611876280581509852855552819149745718992630449787803625851701801184123166018366180137512856918294030710215034138299203584
assert ((2 + 3*I)**-1000).expand(complex=True) == \
Integer(1)*-81079464736246615951519029367296227340216902563389546989376269312984127074385455204551402940331021387412262494620336565547972162814110386834027871072723273110439771695255662375718498785908345629702081336606863762777939617745464755635193139022811989314881997210583159045854968310911252660312523907616129080027594310008539817935736331124833163907518549408018652090650537035647520296539436440394920287688149200763245475036722326561143851304795139005599209239350981457301460233967137708519975586996623552182807311159141501424576682074392689622074945519232029999/8777125472973511649630750050295188683351430110097915876250894978429797369155961290321829625004920141758416719066805645579710744290541337680113772670040386863849283653078324415471816788604945889094925784900885812724984087843737442111926413818245854362613018058774368703971604921858023116665586358870612944209398056562604561248859926344335598822815885851096698226775053153403320782439987679978321289537645645163767251396759519805603090332694449553371530571613352311006350058217982509738362083094920649452123351717366337410243853659113315547584871655479914439219520157174729130746351059075207407866012574386726064196992865627149566238044625779078186624347183905913357718850537058578084932880569701242598663149911276357125355850792073635533676541250531086757377369962506979378337216411188347761901006460813413505861461267545723590468627854202034450569581626648934062198718362303420281555886394558137408159453103395918783625713213314350531051312551733021627153081075080140680608080529736975658786227362251632725009435866547613598753584705455955419696609282059191031962604169242974038517575645939316377801594539335940001 - Integer(1)*46938745946789557590804551905243206242164799136976022474337918748798900569942573265747576032611189047943842446167719177749107138603040963603119861476016947257034472364028585381714774667326478071264878108114128915685688115488744955550920239128462489496563930809677159214598114273887061533057125164518549173898349061972857446844052995037423459472376202251620778517659247970283904820245958198842631651569984310559418135975795868314764489884749573052997832686979294085577689571149679540256349988338406458116270429842222666345146926395233040564229555893248370000*I/8777125472973511649630750050295188683351430110097915876250894978429797369155961290321829625004920141758416719066805645579710744290541337680113772670040386863849283653078324415471816788604945889094925784900885812724984087843737442111926413818245854362613018058774368703971604921858023116665586358870612944209398056562604561248859926344335598822815885851096698226775053153403320782439987679978321289537645645163767251396759519805603090332694449553371530571613352311006350058217982509738362083094920649452123351717366337410243853659113315547584871655479914439219520157174729130746351059075207407866012574386726064196992865627149566238044625779078186624347183905913357718850537058578084932880569701242598663149911276357125355850792073635533676541250531086757377369962506979378337216411188347761901006460813413505861461267545723590468627854202034450569581626648934062198718362303420281555886394558137408159453103395918783625713213314350531051312551733021627153081075080140680608080529736975658786227362251632725009435866547613598753584705455955419696609282059191031962604169242974038517575645939316377801594539335940001
assert ((2 + 3*I/4)**-1000).expand(complex=True) == \
Integer(1)*4257256305661027385394552848555894604806501409793288342610746813288539790051927148781268212212078237301273165351052934681382567968787279534591114913777456610214738290619922068269909423637926549603264174216950025398244509039145410016404821694746262142525173737175066432954496592560621330313807235750500564940782099283410261748370262433487444897446779072067625787246390824312580440138770014838135245148574339248259670887549732495841810961088930810608893772914812838358159009303794863047635845688453859317690488124382253918725010358589723156019888846606295866740117645571396817375322724096486161308083462637370825829567578309445855481578518239186117686659177284332344643124760453112513611749309168470605289172320376911472635805822082051716625171429727162039621902266619821870482519063133136820085579315127038372190224739238686708451840610064871885616258831386810233957438253532027049148030157164346719204500373766157143311767338973363806106967439378604898250533766359989107510507493549529158818602327525235240510049484816090584478644771183158342479140194633579061295740839490629457435283873180259847394582069479062820225159699506175855369539201399183443253793905149785994830358114153241481884290274629611529758663543080724574566578220908907477622643689220814376054314972190402285121776593824615083669045183404206291739005554569305329760211752815718335731118664756831942466773261465213581616104242113894521054475516019456867271362053692785300826523328020796670205463390909136593859765912483565093461468865534470710132881677639651348709376/2103100954337624833663208713697737151593634525061637972297915388685604042449504336765884978184588688426595940401280828953096857809292320006227881797146858511436638446932833617514351442216409828605662238790280753075176269765767010004889778647709740770757817960711900340755635772183674511158570690702969774966791073165467918123298694584729211212414462628433370481195120564586361368504153395406845170075275051749019600057116719726628746724489572189061061036426955163696859127711110719502594479795200686212257570291758725259007379710596548777812659422174199194837355646482046783616494013289495563083118517507178847555801163089723056310287760875135196081975602765511153122381201303871673391366630940702817360340900568748719988954847590748960761446218262344767250783946365392689256634180417145926390656439421745644011831124277463643383712803287985472471755648426749842410972650924240795946699346613614779460399530274263580007672855851663196114585312432954432654691485867618908420370875753749297487803461900447407917655296784879220450937110470920633595689721819488638484547259978337741496090602390463594556401615298457456112485536498177883358587125449801777718900375736758266215245325999241624148841915093787519330809347240990363802360596034171167818310322276373120180985148650099673289383722502488957717848531612020897298448601714154586319660314294591620415272119454982220034319689607295960162971300417552364254983071768070124456169427638371140064235083443242844616326538396503937972586505546495649094344512270582463639152160238137952390380581401171977159154009407415523525096743009110916334144716516647041176989758534635251844947906038080852185583742296318878233394998111078843229681030277039104786225656992262073797524057992347971177720807155842376332851559276430280477639539393920006008737472164850104411971830120295750221200029811143140323763349636629725073624360001 - Integer(1)*3098214262599218784594285246258841485430681674561917573155883806818465520660668045042109232930382494608383663464454841313154390741655282039877410154577448327874989496074260116195788919037407420625081798124301494353693248757853222257918294662198297114746822817460991242508743651430439120439020484502408313310689912381846149597061657483084652685283853595100434135149479564507015504022249330340259111426799121454516345905101620532787348293877485702600390665276070250119465888154331218827342488849948540687659846652377277250614246402784754153678374932540789808703029043827352976139228402417432199779415751301480406673762521987999573209628597459357964214510139892316208670927074795773830798600837815329291912002136924506221066071242281626618211060464126372574400100990746934953437169840312584285942093951405864225230033279614235191326102697164613004299868695519642598882914862568516635347204441042798206770888274175592401790040170576311989738272102077819127459014286741435419468254146418098278519775722104890854275995510700298782146199325790002255362719776098816136732897323406228294203133323296591166026338391813696715894870956511298793595675308998014158717167429941371979636895553724830981754579086664608880698350866487717403917070872269853194118364230971216854931998642990452908852258008095741042117326241406479532880476938937997238098399302185675832474590293188864060116934035867037219176916416481757918864533515526389079998129329045569609325290897577497835388451456680707076072624629697883854217331728051953671643278797380171857920000*I/2103100954337624833663208713697737151593634525061637972297915388685604042449504336765884978184588688426595940401280828953096857809292320006227881797146858511436638446932833617514351442216409828605662238790280753075176269765767010004889778647709740770757817960711900340755635772183674511158570690702969774966791073165467918123298694584729211212414462628433370481195120564586361368504153395406845170075275051749019600057116719726628746724489572189061061036426955163696859127711110719502594479795200686212257570291758725259007379710596548777812659422174199194837355646482046783616494013289495563083118517507178847555801163089723056310287760875135196081975602765511153122381201303871673391366630940702817360340900568748719988954847590748960761446218262344767250783946365392689256634180417145926390656439421745644011831124277463643383712803287985472471755648426749842410972650924240795946699346613614779460399530274263580007672855851663196114585312432954432654691485867618908420370875753749297487803461900447407917655296784879220450937110470920633595689721819488638484547259978337741496090602390463594556401615298457456112485536498177883358587125449801777718900375736758266215245325999241624148841915093787519330809347240990363802360596034171167818310322276373120180985148650099673289383722502488957717848531612020897298448601714154586319660314294591620415272119454982220034319689607295960162971300417552364254983071768070124456169427638371140064235083443242844616326538396503937972586505546495649094344512270582463639152160238137952390380581401171977159154009407415523525096743009110916334144716516647041176989758534635251844947906038080852185583742296318878233394998111078843229681030277039104786225656992262073797524057992347971177720807155842376332851559276430280477639539393920006008737472164850104411971830120295750221200029811143140323763349636629725073624360001
a = Symbol('a', real=True)
b = Symbol('b', real=True)
assert exp(a*(2 + I*b)).expand(complex=True) == \
I*exp(2*a)*sin(a*b) + exp(2*a)*cos(a*b)
def test_expand():
f = (16 - 2*sqrt(29))**2
assert f.expand() == 372 - 64*sqrt(29)
f = (Integer(1)/2 + I/2)**10
assert f.expand() == I/32
f = (Integer(1)/2 + I)**10
assert f.expand() == Integer(237)/1024 - 779*I/256
def test_re_im1652():
x = Symbol('x')
assert re(x) == re(conjugate(x))
assert im(x) == - im(conjugate(x))
assert im(x)*re(conjugate(x)) + im(conjugate(x)) * re(x) == 0
def test_issue_5084():
x = Symbol('x')
assert ((x + x*I)/(1 + I)).as_real_imag() == (re((x + I*x)/(1 + I)
), im((x + I*x)/(1 + I)))
def test_issue_5236():
assert (cos(1 + I)**3).as_real_imag() == (-3*sin(1)**2*sinh(1)**2*cos(1)*cosh(1) +
cos(1)**3*cosh(1)**3, -3*cos(1)**2*cosh(1)**2*sin(1)*sinh(1) + sin(1)**3*sinh(1)**3)
def test_real_imag():
x, y, z = symbols('x, y, z')
X, Y, Z = symbols('X, Y, Z', commutative=False)
a = Symbol('a', real=True)
assert (2*a*x).as_real_imag() == (2*a*re(x), 2*a*im(x))
# issue 5395:
assert (x*x.conjugate()).as_real_imag() == (Abs(x)**2, 0)
assert im(x*x.conjugate()) == 0
assert im(x*y.conjugate()*z*y) == im(x*z)*Abs(y)**2
assert im(x*y.conjugate()*x*y) == im(x**2)*Abs(y)**2
assert im(Z*y.conjugate()*X*y) == im(Z*X)*Abs(y)**2
assert im(X*X.conjugate()) == im(X*X.conjugate(), evaluate=False)
assert (sin(x)*sin(x).conjugate()).as_real_imag() == \
(Abs(sin(x))**2, 0)
# issue 6573:
assert (x**2).as_real_imag() == (re(x)**2 - im(x)**2, 2*re(x)*im(x))
# issue 6428:
r = Symbol('r', real=True)
i = Symbol('i', imaginary=True)
assert (i*r*x).as_real_imag() == (I*i*r*im(x), -I*i*r*re(x))
assert (i*r*x*(y + 2)).as_real_imag() == (
I*i*r*(re(y) + 2)*im(x) + I*i*r*re(x)*im(y),
-I*i*r*(re(y) + 2)*re(x) + I*i*r*im(x)*im(y))
# issue 7106:
assert ((1 + I)/(1 - I)).as_real_imag() == (0, 1)
assert ((1 + 2*I)*(1 + 3*I)).as_real_imag() == (-5, 5)
def test_pow_issue_1724():
e = ((-1)**(S(1)/3))
assert e.conjugate().n() == e.n().conjugate()
e = S('-2/3 - (-29/54 + sqrt(93)/18)**(1/3) - 1/(9*(-29/54 + sqrt(93)/18)**(1/3))')
assert e.conjugate().n() == e.n().conjugate()
e = 2**I
assert e.conjugate().n() == e.n().conjugate()
def test_issue_5429():
assert sqrt(I).conjugate() != sqrt(I)
def test_issue_4124():
from sympy import oo
assert expand_complex(I*oo) == oo*I
| bsd-3-clause |
Lancey6/docbot | DocBot/docbot_core/tests.py | 2 | 5173 | #!/usr/bin/python
"""This tests the YouTube"""
import unittest
import re
from ircutils3.events import MessageEvent
from youtube import YouTubeIdent, YouTubeSearch
class TestYouTubeSearch(unittest.TestCase):
""" This is for testing the YouTube search functionality. """
user = "TestUser!testuser@test.com"
channel = "#channel"
prvmsg = "PRIVMSG"
test_simple_query = "!yt 5 Stages of PSN Loss"
valid_return = '\x02*** YouTube: \x02\x02\x1f5 Stages of PSN Loss\x1f\x02'
def _clean_pretty(self, pretty):
post_regex = ".\(post \d+\)"
rx = re.compile(post_regex)
rd = rx.search(pretty)
if rd:
cleaned = re.sub(rx, '', pretty)
return cleaned
else:
return pretty
def _build_message_event(self, message, user="TestUser!testuser@test.com", method="PRIVMSG", channel="#channel"):
return MessageEvent(user, method, [channel, message])
def test_valid_search(self):
""" This search should return a valid, public, video. """
valid_message_event = self._build_message_event(self.test_simple_query)
ytsearch = YouTubeSearch(valid_message_event)
cleaned = self._clean_pretty(ytsearch.pretty)
self.assertEqual(cleaned, self.valid_return)
class TestYouTubeIdent(unittest.TestCase):
""" This is for testing the core identify function in the YouTube module. """
# Setup
user = "TestUser!testuser@test.com"
channel = "#channel"
prvmsg = "PRIVMSG"
notice = "NOTICE"
post_regex = ".\(post \d+\)"
rx = re.compile(post_regex)
# Test Data
valid_url = "http://www.youtube.com/watch?v=iVAWm8VNmQA"
valid_message = "have you guys seen http://youtu.be/iVAWm8VNmQA?"
valid_response = '\x02*** YouTube: \x02\x02\x1fA Tribute to the Snackish\x1f\x02'
invalid_url = "http://youtu.be/i333AWm8VNmQA"
invalid_message = "they named their pokemon some garbage like ch?v=iVAW"
error_400 = "HTTP Error 400: Bad Request"
deleted_url = "http://www.youtube.com/watch?v=k-rjwg_9mdw"
private_url = "http://youtu.be/IC0C5w1-T1Y"
def _clean_pretty(self, pretty):
rd = self.rx.search(pretty)
if rd:
cleaned = re.sub(self.rx, '', pretty)
return cleaned
else:
return pretty
def _build_message_event(self, message, user="TestUser!testuser@test.com", method="PRIVMSG", channel="#channel"):
return MessageEvent(user, method, [channel, message])
# Messages
def test_valid_url(self):
""" This URL should pass with a valid response. """
valid_message_event = self._build_message_event(self.valid_url)
ytident = YouTubeIdent(valid_message_event)
self.assertEqual(self._clean_pretty(ytident.pretty), self.valid_response)
def test_valid_message(self):
""" This message should pass with a valid response. """
valid_message_event = self._build_message_event(self.valid_message)
ytident = YouTubeIdent(valid_message_event)
self.assertEqual(self._clean_pretty(ytident.pretty), self.valid_response)
def test_invalid_url(self):
""" Because this is an invalid URL, this should return an IndexError exception """
valid_message_event = self._build_message_event(self.invalid_url)
ytident = YouTubeIdent(valid_message_event)
self.assertEqual(type(ytident.pretty), IndexError)
def test_invalid_message(self):
""" Because this is an invalid message, this should return an IndexError exception """
valid_message_event = self._build_message_event(self.invalid_message)
ytident = YouTubeIdent(valid_message_event)
self.assertEqual(type(ytident.pretty), IndexError)
def test_private_message_not_increasing_viewcount(self):
""" With a valid URL given in a PM, we should not increase the counter. """
valid_message_event = self._build_message_event(self.valid_url, channel=self.user)
ytident1 = YouTubeIdent(valid_message_event)
regex = ".\(post (\d+)\)"
rx = re.compile(regex)
rd = rx.search(ytident1.pretty)
first_count = rd.groups()[0]
ytident2 = YouTubeIdent(valid_message_event)
regex = ".\(post (\d+)\)"
rx = re.compile(regex)
rd = rx.search(ytident1.pretty)
second_count = rd.groups()[0]
self.assertEqual(first_count, second_count)
# Responses
def test_deleted_video_url(self):
""" Because this is a deleted video, this should return an IndexError exception """
valid_message_event = self._build_message_event(self.deleted_url)
ytident = YouTubeIdent(valid_message_event)
self.assertEqual(type(ytident.pretty), IndexError)
def test_private_video_url(self):
""" Because this is a private video, this should return an IndexError exception """
valid_message_event = self._build_message_event(self.private_url)
ytident = YouTubeIdent(valid_message_event)
self.assertEqual(type(ytident.pretty), IndexError)
def main():
unittest.main()
if __name__ == "__main__":
main() | mit |
spatialaudio/sweep | lin_sweep_kaiser_window_bandlimited_script5/lin_sweep_kaiser_window_bandlimited_script5_1.py | 2 | 2216 | #!/usr/bin/env python3
"""The influence of windowing of lin. bandlimited sweep signals when using a
Kaiser Window by fixing beta (=2) and fade_out (=0).
fstart = 100 Hz
fstop = 5000 Hz
Deconvolution: Unwindowed
"""
import sys
sys.path.append('..')
import measurement_chain
import plotting
import calculation
import generation
import matplotlib.pyplot as plt
import windows
from scipy.signal import lfilter
import numpy as np
# Parameters of the measuring system
fs = 44100
fstart = 100
fstop = 5000
duration = 1
pad = 4
# Generate excitation signal
excitation = generation.lin_sweep(fstart, fstop, duration, fs)
N = len(excitation)
# Noise in measurement chain
noise_level_db = -30
noise = measurement_chain.additive_noise(noise_level_db)
# FIR-Filter-System
dirac_system = measurement_chain.convolution([1.0])
# Combinate system elements
system = measurement_chain.chained(dirac_system, noise)
# Spectrum of dirac for reference
dirac = np.zeros(pad * fs)
dirac[0] = 1
dirac_f = np.fft.rfft(dirac)
# Lists
beta = 7
fade_in_list = np.arange(0, 1001, 1)
fade_out = 0
def get_results(fade_in):
excitation_windowed = excitation * windows.window_kaiser(N,
fade_in,
fade_out,
fs, beta)
excitation_windowed_zeropadded = generation.zero_padding(
excitation_windowed, pad, fs)
excitation_zeropadded = generation.zero_padding(excitation, pad, fs)
system_response = system(excitation_windowed_zeropadded)
ir = calculation.deconv_process(excitation_zeropadded,
system_response,
fs)
return ir
with open("lin_sweep_kaiser_window_bandlimited_script5_1.txt", "w") as f:
for fade_in in fade_in_list:
ir = get_results(fade_in)
pnr = calculation.pnr_db(ir[0], ir[1:4 * fs])
spectrum_distance = calculation.vector_distance(
dirac_f, np.fft.rfft(ir[:pad * fs]))
f.write(
str(fade_in) + " " + str(pnr) +
" " + str(spectrum_distance) + " \n")
| mit |
crowdhackathon-transport/optimizers | crowdstance-api/venv/lib/python2.7/site-packages/jinja2/testsuite/imports.py | 404 | 5333 | # -*- coding: utf-8 -*-
"""
jinja2.testsuite.imports
~~~~~~~~~~~~~~~~~~~~~~~~
Tests the import features (with includes).
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import unittest
from jinja2.testsuite import JinjaTestCase
from jinja2 import Environment, DictLoader
from jinja2.exceptions import TemplateNotFound, TemplatesNotFound
test_env = Environment(loader=DictLoader(dict(
module='{% macro test() %}[{{ foo }}|{{ bar }}]{% endmacro %}',
header='[{{ foo }}|{{ 23 }}]',
o_printer='({{ o }})'
)))
test_env.globals['bar'] = 23
class ImportsTestCase(JinjaTestCase):
def test_context_imports(self):
t = test_env.from_string('{% import "module" as m %}{{ m.test() }}')
assert t.render(foo=42) == '[|23]'
t = test_env.from_string('{% import "module" as m without context %}{{ m.test() }}')
assert t.render(foo=42) == '[|23]'
t = test_env.from_string('{% import "module" as m with context %}{{ m.test() }}')
assert t.render(foo=42) == '[42|23]'
t = test_env.from_string('{% from "module" import test %}{{ test() }}')
assert t.render(foo=42) == '[|23]'
t = test_env.from_string('{% from "module" import test without context %}{{ test() }}')
assert t.render(foo=42) == '[|23]'
t = test_env.from_string('{% from "module" import test with context %}{{ test() }}')
assert t.render(foo=42) == '[42|23]'
def test_trailing_comma(self):
test_env.from_string('{% from "foo" import bar, baz with context %}')
test_env.from_string('{% from "foo" import bar, baz, with context %}')
test_env.from_string('{% from "foo" import bar, with context %}')
test_env.from_string('{% from "foo" import bar, with, context %}')
test_env.from_string('{% from "foo" import bar, with with context %}')
def test_exports(self):
m = test_env.from_string('''
{% macro toplevel() %}...{% endmacro %}
{% macro __private() %}...{% endmacro %}
{% set variable = 42 %}
{% for item in [1] %}
{% macro notthere() %}{% endmacro %}
{% endfor %}
''').module
assert m.toplevel() == '...'
assert not hasattr(m, '__missing')
assert m.variable == 42
assert not hasattr(m, 'notthere')
class IncludesTestCase(JinjaTestCase):
def test_context_include(self):
t = test_env.from_string('{% include "header" %}')
assert t.render(foo=42) == '[42|23]'
t = test_env.from_string('{% include "header" with context %}')
assert t.render(foo=42) == '[42|23]'
t = test_env.from_string('{% include "header" without context %}')
assert t.render(foo=42) == '[|23]'
def test_choice_includes(self):
t = test_env.from_string('{% include ["missing", "header"] %}')
assert t.render(foo=42) == '[42|23]'
t = test_env.from_string('{% include ["missing", "missing2"] ignore missing %}')
assert t.render(foo=42) == ''
t = test_env.from_string('{% include ["missing", "missing2"] %}')
self.assert_raises(TemplateNotFound, t.render)
try:
t.render()
except TemplatesNotFound as e:
assert e.templates == ['missing', 'missing2']
assert e.name == 'missing2'
else:
assert False, 'thou shalt raise'
def test_includes(t, **ctx):
ctx['foo'] = 42
assert t.render(ctx) == '[42|23]'
t = test_env.from_string('{% include ["missing", "header"] %}')
test_includes(t)
t = test_env.from_string('{% include x %}')
test_includes(t, x=['missing', 'header'])
t = test_env.from_string('{% include [x, "header"] %}')
test_includes(t, x='missing')
t = test_env.from_string('{% include x %}')
test_includes(t, x='header')
t = test_env.from_string('{% include x %}')
test_includes(t, x='header')
t = test_env.from_string('{% include [x] %}')
test_includes(t, x='header')
def test_include_ignoring_missing(self):
t = test_env.from_string('{% include "missing" %}')
self.assert_raises(TemplateNotFound, t.render)
for extra in '', 'with context', 'without context':
t = test_env.from_string('{% include "missing" ignore missing ' +
extra + ' %}')
assert t.render() == ''
def test_context_include_with_overrides(self):
env = Environment(loader=DictLoader(dict(
main="{% for item in [1, 2, 3] %}{% include 'item' %}{% endfor %}",
item="{{ item }}"
)))
assert env.get_template("main").render() == "123"
def test_unoptimized_scopes(self):
t = test_env.from_string("""
{% macro outer(o) %}
{% macro inner() %}
{% include "o_printer" %}
{% endmacro %}
{{ inner() }}
{% endmacro %}
{{ outer("FOO") }}
""")
assert t.render().strip() == '(FOO)'
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(ImportsTestCase))
suite.addTest(unittest.makeSuite(IncludesTestCase))
return suite
| mit |
junalmeida/Sick-Beard | lib/guessit/options.py | 11 | 2019 | from optparse import OptionParser
option_parser = OptionParser(usage='usage: %prog [options] file1 [file2...]')
option_parser.add_option('-v', '--verbose', action='store_true', dest='verbose', default=False,
help='display debug output')
option_parser.add_option('-p', '--properties', dest='properties', action='store_true', default=False,
help='Display properties that can be guessed.')
option_parser.add_option('-l', '--values', dest='values', action='store_true', default=False,
help='Display property values that can be guessed.')
option_parser.add_option('-s', '--transformers', dest='transformers', action='store_true', default=False,
help='Display transformers that can be used.')
option_parser.add_option('-i', '--info', dest='info', default='filename',
help='the desired information type: filename, hash_mpc or a hash from python\'s '
'hashlib module, such as hash_md5, hash_sha1, ...; or a list of any of '
'them, comma-separated')
option_parser.add_option('-n', '--name-only', dest='name_only', action='store_true', default=False,
help='Parse files as name only. Disable folder parsing, extension parsing, and file content analysis.')
option_parser.add_option('-t', '--type', dest='type', default=None,
help='the suggested file type: movie, episode. If undefined, type will be guessed.')
option_parser.add_option('-a', '--advanced', dest='advanced', action='store_true', default=False,
help='display advanced information for filename guesses, as json output')
option_parser.add_option('-y', '--yaml', dest='yaml', action='store_true', default=False,
help='display information for filename guesses as yaml output (like unit-test)')
option_parser.add_option('-d', '--demo', action='store_true', dest='demo', default=False,
help='run a few builtin tests instead of analyzing a file')
| gpl-3.0 |
loles/solar | solar/test/test_operations_with_tags.py | 3 | 1556 | # Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from pytest import fixture
from solar.core import resource
from solar.dblayer.model import ModelMeta
from solar.dblayer.solar_models import Resource
@fixture
def tagged_resources():
tags = ['n1', 'n2', 'n3']
t1 = Resource.from_dict('t1',
{'name': 't1', 'tags': tags, 'base_path': 'x'})
t1.save_lazy()
t2 = Resource.from_dict('t2',
{'name': 't2', 'tags': tags, 'base_path': 'x'})
t2.save_lazy()
t3 = Resource.from_dict('t3',
{'name': 't3', 'tags': tags, 'base_path': 'x'})
t3.save_lazy()
ModelMeta.save_all_lazy()
return [t1, t2, t3]
def test_add_remove_tags(tagged_resources):
loaded = resource.load_by_tags({'n1', 'n2'})
assert len(loaded) == 3
for res in loaded:
res.remove_tags('n1')
assert len(resource.load_by_tags(set(['n1']))) == 0
assert len(resource.load_by_tags(set(['n2']))) == 3
| apache-2.0 |
oaubert/advene | lib/simpletal/sgmlentitynames.py | 4 | 6958 | """ simpleTAL Implementation
Copyright (c) 2009 Colin Stewart (http://www.owlfish.com/)
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. The name of the author may not be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
If you make any bug fixes or feature enhancements please let me know!
A dictionary of HTML entity names to their decimal value.
"""
htmlNameToUnicodeNumber = {"nbsp": 160
,"iexcl": 161
,"cent": 162
,"pound": 163
,"curren": 164
,"yen": 165
,"brvbar": 166
,"sect": 167
,"uml": 168
,"copy": 169
,"ordf": 170
,"laquo": 171
,"not": 172
,"shy": 173
,"reg": 174
,"macr": 175
,"deg": 176
,"plusmn": 177
,"sup2": 178
,"sup3": 179
,"acute": 180
,"micro": 181
,"para": 182
,"middot": 183
,"cedil": 184
,"sup1": 185
,"ordm": 186
,"raquo": 187
,"frac14": 188
,"frac12": 189
,"frac34": 190
,"iquest": 191
,"Agrave": 192
,"Aacute": 193
,"Acirc": 194
,"Atilde": 195
,"Auml": 196
,"Aring": 197
,"AElig": 198
,"Ccedil": 199
,"Egrave": 200
,"Eacute": 201
,"Ecirc": 202
,"Euml": 203
,"Igrave": 204
,"Iacute": 205
,"Icirc": 206
,"Iuml": 207
,"ETH": 208
,"Ntilde": 209
,"Ograve": 210
,"Oacute": 211
,"Ocirc": 212
,"Otilde": 213
,"Ouml": 214
,"times": 215
,"Oslash": 216
,"Ugrave": 217
,"Uacute": 218
,"Ucirc": 219
,"Uuml": 220
,"Yacute": 221
,"THORN": 222
,"szlig": 223
,"agrave": 224
,"aacute": 225
,"acirc": 226
,"atilde": 227
,"auml": 228
,"aring": 229
,"aelig": 230
,"ccedil": 231
,"egrave": 232
,"eacute": 233
,"ecirc": 234
,"euml": 235
,"igrave": 236
,"iacute": 237
,"icirc": 238
,"iuml": 239
,"eth": 240
,"ntilde": 241
,"ograve": 242
,"oacute": 243
,"ocirc": 244
,"otilde": 245
,"ouml": 246
,"divide": 247
,"oslash": 248
,"ugrave": 249
,"uacute": 250
,"ucirc": 251
,"uuml": 252
,"yacute": 253
,"thorn": 254
,"yuml": 255
,"fnof": 402
,"Alpha": 913
,"Beta": 914
,"Gamma": 915
,"Delta": 916
,"Epsilon": 917
,"Zeta": 918
,"Eta": 919
,"Theta": 920
,"Iota": 921
,"Kappa": 922
,"Lambda": 923
,"Mu": 924
,"Nu": 925
,"Xi": 926
,"Omicron": 927
,"Pi": 928
,"Rho": 929
,"Sigma": 931
,"Tau": 932
,"Upsilon": 933
,"Phi": 934
,"Chi": 935
,"Psi": 936
,"Omega": 937
,"alpha": 945
,"beta": 946
,"gamma": 947
,"delta": 948
,"epsilon": 949
,"zeta": 950
,"eta": 951
,"theta": 952
,"iota": 953
,"kappa": 954
,"lambda": 955
,"mu": 956
,"nu": 957
,"xi": 958
,"omicron": 959
,"pi": 960
,"rho": 961
,"sigmaf": 962
,"sigma": 963
,"tau": 964
,"upsilon": 965
,"phi": 966
,"chi": 967
,"psi": 968
,"omega": 969
,"thetasym": 977
,"upsih": 978
,"piv": 982
,"bull": 8226
,"hellip": 8230
,"prime": 8242
,"Prime": 8243
,"oline": 8254
,"frasl": 8260
,"weierp": 8472
,"image": 8465
,"real": 8476
,"trade": 8482
,"alefsym": 8501
,"larr": 8592
,"uarr": 8593
,"rarr": 8594
,"darr": 8595
,"harr": 8596
,"crarr": 8629
,"lArr": 8656
,"uArr": 8657
,"rArr": 8658
,"dArr": 8659
,"hArr": 8660
,"forall": 8704
,"part": 8706
,"exist": 8707
,"empty": 8709
,"nabla": 8711
,"isin": 8712
,"notin": 8713
,"ni": 8715
,"prod": 8719
,"sum": 8721
,"minus": 8722
,"lowast": 8727
,"radic": 8730
,"prop": 8733
,"infin": 8734
,"ang": 8736
,"and": 8743
,"or": 8744
,"cap": 8745
,"cup": 8746
,"int": 8747
,"there4": 8756
,"sim": 8764
,"cong": 8773
,"asymp": 8776
,"ne": 8800
,"equiv": 8801
,"le": 8804
,"ge": 8805
,"sub": 8834
,"sup": 8835
,"nsub": 8836
,"sube": 8838
,"supe": 8839
,"oplus": 8853
,"otimes": 8855
,"perp": 8869
,"sdot": 8901
,"lceil": 8968
,"rceil": 8969
,"lfloor": 8970
,"rfloor": 8971
,"lang": 9001
,"rang": 9002
,"loz": 9674
,"spades": 9824
,"clubs": 9827
,"hearts": 9829
,"diams": 9830
,"quot": 34
,"amp": 38
,"lt": 60
,"gt": 62
,"OElig": 338
,"oelig": 339
,"Scaron": 352
,"scaron": 353
,"Yuml": 376
,"circ": 710
,"tilde": 732
,"ensp": 8194
,"emsp": 8195
,"thinsp": 8201
,"zwnj": 8204
,"zwj": 8205
,"lrm": 8206
,"rlm": 8207
,"ndash": 8211
,"mdash": 8212
,"lsquo": 8216
,"rsquo": 8217
,"sbquo": 8218
,"ldquo": 8220
,"rdquo": 8221
,"bdquo": 8222
,"dagger": 8224
,"Dagger": 8225
,"permil": 8240
,"lsaquo": 8249
,"rsaquo": 8250
,"euro": 8364}
| gpl-2.0 |
indrajitr/ansible | lib/ansible/utils/path.py | 43 | 5752 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import shutil
from errno import EEXIST
from ansible.errors import AnsibleError
from ansible.module_utils._text import to_bytes, to_native, to_text
__all__ = ['unfrackpath', 'makedirs_safe']
def unfrackpath(path, follow=True, basedir=None):
'''
Returns a path that is free of symlinks (if follow=True), environment variables, relative path traversals and symbols (~)
:arg path: A byte or text string representing a path to be canonicalized
:arg follow: A boolean to indicate of symlinks should be resolved or not
:raises UnicodeDecodeError: If the canonicalized version of the path
contains non-utf8 byte sequences.
:rtype: A text string (unicode on pyyhon2, str on python3).
:returns: An absolute path with symlinks, environment variables, and tilde
expanded. Note that this does not check whether a path exists.
example::
'$HOME/../../var/mail' becomes '/var/spool/mail'
'''
b_basedir = to_bytes(basedir, errors='surrogate_or_strict', nonstring='passthru')
if b_basedir is None:
b_basedir = to_bytes(os.getcwd(), errors='surrogate_or_strict')
elif os.path.isfile(b_basedir):
b_basedir = os.path.dirname(b_basedir)
b_final_path = os.path.expanduser(os.path.expandvars(to_bytes(path, errors='surrogate_or_strict')))
if not os.path.isabs(b_final_path):
b_final_path = os.path.join(b_basedir, b_final_path)
if follow:
b_final_path = os.path.realpath(b_final_path)
return to_text(os.path.normpath(b_final_path), errors='surrogate_or_strict')
def makedirs_safe(path, mode=None):
'''
A *potentially insecure* way to ensure the existence of a directory chain. The "safe" in this function's name
refers only to its ability to ignore `EEXIST` in the case of multiple callers operating on the same part of
the directory chain. This function is not safe to use under world-writable locations when the first level of the
path to be created contains a predictable component. Always create a randomly-named element first if there is any
chance the parent directory might be world-writable (eg, /tmp) to prevent symlink hijacking and potential
disclosure or modification of sensitive file contents.
:arg path: A byte or text string representing a directory chain to be created
:kwarg mode: If given, the mode to set the directory to
:raises AnsibleError: If the directory cannot be created and does not already exist.
:raises UnicodeDecodeError: if the path is not decodable in the utf-8 encoding.
'''
rpath = unfrackpath(path)
b_rpath = to_bytes(rpath)
if not os.path.exists(b_rpath):
try:
if mode:
os.makedirs(b_rpath, mode)
else:
os.makedirs(b_rpath)
except OSError as e:
if e.errno != EEXIST:
raise AnsibleError("Unable to create local directories(%s): %s" % (to_native(rpath), to_native(e)))
def basedir(source):
""" returns directory for inventory or playbook """
source = to_bytes(source, errors='surrogate_or_strict')
dname = None
if os.path.isdir(source):
dname = source
elif source in [None, '', '.']:
dname = os.getcwd()
elif os.path.isfile(source):
dname = os.path.dirname(source)
if dname:
# don't follow symlinks for basedir, enables source re-use
dname = os.path.abspath(dname)
return to_text(dname, errors='surrogate_or_strict')
def cleanup_tmp_file(path, warn=False):
"""
Removes temporary file or directory. Optionally display a warning if unable
to remove the file or directory.
:arg path: Path to file or directory to be removed
:kwarg warn: Whether or not to display a warning when the file or directory
cannot be removed
"""
try:
if os.path.exists(path):
try:
if os.path.isdir(path):
shutil.rmtree(path)
elif os.path.isfile(path):
os.unlink(path)
except Exception as e:
if warn:
# Importing here to avoid circular import
from ansible.utils.display import Display
display = Display()
display.display(u'Unable to remove temporary file {0}'.format(to_text(e)))
except Exception:
pass
def is_subpath(child, parent):
"""
Compares paths to check if one is contained in the other
:arg: child: Path to test
:arg parent; Path to test against
"""
test = False
abs_child = unfrackpath(child, follow=False)
abs_parent = unfrackpath(parent, follow=False)
c = abs_child.split(os.path.sep)
p = abs_parent.split(os.path.sep)
try:
test = c[:len(p)] == p
except IndexError:
# child is shorter than parent so cannot be subpath
pass
return test
| gpl-3.0 |
SongGithub/Monthly-Pay-Calculator | src/process_csv.py | 2 | 4220 | """
this module handles all file operations
"""
from employee import Employee
from tax_rate import TaxRate
from payslip import Payslip
import csv,settings,datetime,os
class ProcessCsvFile(object):
"""
The Class is designed to close read file thread gracefully
"""
def __init__(self, input_filename, output_filepath,tax_filepath):
self._input_filename = input_filename
self._output_filepath = output_filepath
self._tax_filepath = tax_filepath
self._input_object = None
self._headers = []
def read_csv(self):
"""read csv file content then return a list of dictionaries'"""
try:
f_obj = open(self._input_filename, 'rU')
reader = csv.reader(f_obj)
# following section skips the header row
has_header = csv.Sniffer().has_header(f_obj.readline())
f_obj.seek(0) # rewind
incsv = csv.reader(f_obj)
if has_header:
self._headers = keys = reader.next()
self._input_object = f_obj
return {'keys':keys, 'reader':reader}
except IOError as e:
print "I/O error({0}): {1}".format(
e.errno, e.strerror
)
def assemble_read_results(self,reader_obj):
"""assemble reader result from passed in object"""
keys = reader_obj['keys']
reader = reader_obj['reader']
rows = []
for row in reader:
dic = {}
# fiels-size integrity control:
# if any field is missing from the input row, or extra field exists
# then the this row is ignored.
if len(keys) == len(row):
for i in range(len(keys)):
dic[keys[i]] = row[i]
rows.append(dic)
return rows
def close_csv(self):
self._input_object.close()
def write_csv_header(self):
"""write csv header with pre-defined content
Args:
filename as input
"""
with open(self._output_filepath, 'w') as f_obj:
writer = csv.writer(
f_obj, delimiter=',',
quotechar='|',
quoting=csv.QUOTE_MINIMAL
)
writer.writerow(
[
'full name',
'pay period',
'gross income',
'income tax',
'net income',
'superannuation',
]
)
def write_csv(self, content):
"""write a csv file with intended content"""
with open(self._output_filepath, 'a') as f_obj:
writer = csv.writer(
f_obj, delimiter=',',
quotechar='|',
quoting=csv.QUOTE_MINIMAL
)
writer.writerow(content)
def prepare_result_file(self):
"""
The result file name needs to be calculated on the spot and time-stamped
on the filename in order to avoid conflict on file name.
"""
result_filename = 'result_' + str(
datetime.datetime.now().strftime("%Y%m%d_%H%M%S")) + '.csv'
self._output_filepath = os.path.join(self._output_filepath,result_filename)
def process_data(self):
"""process data according to param loaded"""
tax_object = TaxRate(self._tax_filepath)
csv_reader_object = self.read_csv()
csv_read_results = self.assemble_read_results(csv_reader_object)
self.prepare_result_file()
self.write_csv_header()
if csv_read_results != False:
for person in csv_read_results:
emp = Employee(person)
tax = tax_object.calculate_tax(emp.get_salary())
payslip = Payslip(emp.get_personal_info())
individual_payslip = payslip.calculate_payslip(tax)
print individual_payslip
# write actual content to the file
self.write_csv(individual_payslip)
self.close_csv()
| gpl-3.0 |
anbangr/trusted-juju | juju/state/agent.py | 2 | 1387 | import zookeeper
class AgentStateMixin(object):
"""A mixin for state objects that will have agents processes.
Provides for the observation and connection of agent processes.
Subclasses must implement M{_get_agent_path}.
"""
def has_agent(self):
"""Does this domain object have an agent connected.
Return boolean deferred informing whether an agent is
connected.
"""
d = self._client.exists(self._get_agent_path())
d.addCallback(lambda result: bool(result))
return d
def _get_agent_path(self):
raise NotImplementedError
def watch_agent(self):
"""Observe changes to an agent's presence.
Return two boolean deferreds informing whether an agent is
connected, and whether a change happened. Both presence
and content changes are encapsulated in the second deferred,
callers interested in only presence need to perform event
filtering as needed.
"""
exists_d, watch_d = self._client.exists_and_watch(
self._get_agent_path())
exists_d.addCallback(lambda result: bool(result))
return exists_d, watch_d
def connect_agent(self):
"""Inform juju that this associated agent is alive.
"""
return self._client.create(
self._get_agent_path(), flags=zookeeper.EPHEMERAL)
| agpl-3.0 |
wujuguang/django-admin-honeypot | admin_honeypot/migrations/0001_initial.py | 5 | 1270 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='LoginAttempt',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('username', models.CharField(max_length=255, null=True, verbose_name='username', blank=True)),
('ip_address', models.IPAddressField(null=True, verbose_name='ip address', blank=True)),
('session_key', models.CharField(max_length=50, null=True, verbose_name='session key', blank=True)),
('user_agent', models.TextField(null=True, verbose_name='user-agent', blank=True)),
('timestamp', models.DateTimeField(auto_now_add=True, verbose_name='timestamp')),
('path', models.TextField(null=True, verbose_name='path', blank=True)),
],
options={
'ordering': ('timestamp',),
'verbose_name': 'login attempt',
'verbose_name_plural': 'login attempts',
},
bases=(models.Model,),
),
]
| mit |
chiotlune/ext | gnuradio-3.7.0.1/gnuradio-runtime/python/pmt/qa_pmt.py | 11 | 3097 | #!/usr/bin/env python
#
# Copyright 2011 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
import unittest
import pmt
class test_pmt(unittest.TestCase):
def test01(self):
a = pmt.intern("a")
b = pmt.from_double(123765)
d1 = pmt.make_dict()
d2 = pmt.dict_add(d1, a, b)
print d2
def test02(self):
const = 123765
x_pmt = pmt.from_double(const)
x_int = pmt.to_double(x_pmt)
self.assertEqual(x_int, const)
def test03(self):
v = pmt.init_f32vector(3, [11, -22, 33])
s = pmt.serialize_str(v)
d = pmt.deserialize_str(s)
self.assertTrue(pmt.equal(v, d))
def test04(self):
v = pmt.init_f64vector(3, [11, -22, 33])
s = pmt.serialize_str(v)
d = pmt.deserialize_str(s)
self.assertTrue(pmt.equal(v, d))
def test05(self):
v = pmt.init_u8vector(3, [11, 22, 33])
s = pmt.serialize_str(v)
d = pmt.deserialize_str(s)
self.assertTrue(pmt.equal(v, d))
def test06(self):
v = pmt.init_s8vector(3, [11, -22, 33])
s = pmt.serialize_str(v)
d = pmt.deserialize_str(s)
self.assertTrue(pmt.equal(v, d))
def test07(self):
v = pmt.init_u16vector(3, [11, 22, 33])
s = pmt.serialize_str(v)
d = pmt.deserialize_str(s)
self.assertTrue(pmt.equal(v, d))
def test08(self):
v = pmt.init_s16vector(3, [11, -22, 33])
s = pmt.serialize_str(v)
d = pmt.deserialize_str(s)
self.assertTrue(pmt.equal(v, d))
def test09(self):
v = pmt.init_u32vector(3, [11, 22, 33])
s = pmt.serialize_str(v)
d = pmt.deserialize_str(s)
self.assertTrue(pmt.equal(v, d))
def test10(self):
v = pmt.init_s32vector(3, [11, -22, 33])
s = pmt.serialize_str(v)
d = pmt.deserialize_str(s)
self.assertTrue(pmt.equal(v, d))
def test11(self):
v = pmt.init_c32vector(3, [11 + -101j, -22 + 202j, 33 + -303j])
s = pmt.serialize_str(v)
d = pmt.deserialize_str(s)
self.assertTrue(pmt.equal(v, d))
def test12(self):
v = pmt.init_c64vector(3, [11 + -101j, -22 + 202j, 33 + -303j])
s = pmt.serialize_str(v)
d = pmt.deserialize_str(s)
self.assertTrue(pmt.equal(v, d))
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
ariebovenberg/snug | tutorial/relations.py | 2 | 1407 | import json
import snug
BASE = 'https://api.github.com'
class repo(snug.Query[dict]):
"""a repository lookup by owner and name"""
def __init__(self, name, owner):
self.name, self.owner = name, owner
def __iter__(self):
request = snug.GET(BASE + f'/repos/{self.owner}/{self.name}')
return json.loads((yield request).content)
def star(self) -> snug.Query[bool]:
"""star this repo"""
req = snug.PUT(BASE + f'/user/starred/{self.owner}/{self.name}')
return (yield req).status_code == 204
@snug.related
class issue(snug.Query[dict]):
"""get an issue in this repo"""
def __init__(self, repo, number):
self.repo, self.number = repo, number
def __iter__(self):
request = snug.GET(
BASE +
f'repos/{self.repo.owner}/'
f'{self.repo.name}/issues/{self.number}')
return json.loads((yield request).content)
def comments(self, since: datetime) -> snug.Query[list]:
"""retrieve comments for this issue"""
request = snug.GET(
BASE +
f'repos/{self.issue.repo.owner}/{self.issue.repo.name}/'
f'issues/{self.issue.number}/comments',
params={'since': since.strftime('%Y-%m-%dT%H:%M:%SZ')})
return json.loads((yield request).content)
| mit |
nguyentuananh1993/rospydemo | keyclient.py | 1 | 1371 | import socket
import sys
try:
import tty, termios
except ImportError:
# Probably Windows.
try:
import msvcrt
except ImportError:
# FIXME what to do on other platforms?
# Just give up here.
raise ImportError('getch not available')
else:
getch = msvcrt.getch
else:
def getch():
"""getch() -> key character
Read a single keypress from stdin and return the resulting character.
Nothing is echoed to the console. This call will block if a keypress
is not already available, but will not wait for Enter to be pressed.
If the pressed key was a modifier key, nothing will be detected; if
it were a special function key, it may return the first character of
of an escape sequence, leaving additional characters in the buffer.
"""
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(fd)
ch = sys.stdin.read(1)
except KeyboardInterrupt:
sys.exit()
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
TCP_IP = '192.168.1.102'
TCP_PORT = 8003
socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
socket.connect((TCP_IP, TCP_PORT))
while 1:
a = getch()
socket.send(a)
if a=='q':
break;
socket.close() | gpl-2.0 |
benjaminpjones/fractals | bitmap.py | 1 | 3738 | #BMPHeader
dummyHeader = "424D482B00000000000036040000280000006400000064000000010008000000000012270000202E0000202E00000000000000000000000000000101010002020200030303000404040005050500060606000707070008080800090909000A0A0A000B0B0B000C0C0C000D0D0D000E0E0E000F0F0F00101010001111110012121200131313001414140015151500161616001717170018181800191919001A1A1A001B1B1B001C1C1C001D1D1D001E1E1E001F1F1F00202020002121210022222200232323002424240025252500262626002727270028282800292929002A2A2A002B2B2B002C2C2C002D2D2D002E2E2E002F2F2F00303030003131310032323200333333003434340035353500363636003737370038383800393939003A3A3A003B3B3B003C3C3C003D3D3D003E3E3E003F3F3F00404040004141410042424200434343004444440045454500464646004747470048484800494949004A4A4A004B4B4B004C4C4C004D4D4D004E4E4E004F4F4F00505050005151510052525200535353005454540055555500565656005757570058585800595959005A5A5A005B5B5B005C5C5C005D5D5D005E5E5E005F5F5F00606060006161610062626200636363006464640065656500666666006767670068686800696969006A6A6A006B6B6B006C6C6C006D6D6D006E6E6E006F6F6F00707070007171710072727200737373007474740075757500767676007777770078787800797979007A7A7A007B7B7B007C7C7C007D7D7D007E7E7E007F7F7F00808080008181810082828200838383008484840085858500868686008787870088888800898989008A8A8A008B8B8B008C8C8C008D8D8D008E8E8E008F8F8F00909090009191910092929200939393009494940095959500969696009797970098989800999999009A9A9A009B9B9B009C9C9C009D9D9D009E9E9E009F9F9F00A0A0A000A1A1A100A2A2A200A3A3A300A4A4A400A5A5A500A6A6A600A7A7A700A8A8A800A9A9A900AAAAAA00ABABAB00ACACAC00ADADAD00AEAEAE00AFAFAF00B0B0B000B1B1B100B2B2B200B3B3B300B4B4B400B5B5B500B6B6B600B7B7B700B8B8B800B9B9B900BABABA00BBBBBB00BCBCBC00BDBDBD00BEBEBE00BFBFBF00C0C0C000C1C1C100C2C2C200C3C3C300C4C4C400C5C5C500C6C6C600C7C7C700C8C8C800C9C9C900CACACA00CBCBCB00CCCCCC00CDCDCD00CECECE00CFCFCF00D0D0D000D1D1D100D2D2D200D3D3D300D4D4D400D5D5D500D6D6D600D7D7D700D8D8D800D9D9D900DADADA00DBDBDB00DCDCDC00DDDDDD00DEDEDE00DFDFDF00E0E0E000E1E1E100E2E2E200E3E3E300E4E4E400E5E5E500E6E6E600E7E7E700E8E8E800E9E9E900EAEAEA00EBEBEB00ECECEC00EDEDED00EEEEEE00EFEFEF00F0F0F000F1F1F100F2F2F200F3F3F300F4F4F400F5F5F500F6F6F600F7F7F700F8F8F800F9F9F900FAFAFA00FBFBFB00FCFCFC00FDFDFD00FEFEFE00FFFFFF00"
#Break down header into integer bytes
brokenDownHeader = list(dummyHeader)
dummyHeaderList = []
for i in range(len(brokenDownHeader)/2):
dummyHeaderList.append(brokenDownHeader[2*i]+brokenDownHeader[2*i + 1])
for i in range(len(dummyHeaderList)):
dummyHeaderList[i] = int(dummyHeaderList[i],16)
def intToByteArray(yourInt, numBytes):
outputArray = []
for i in range(numBytes):
outputArray.append(yourInt/(256**i)%256)
return outputArray
def twoDimArrayToBitmap(pixelList, fileName):
width = len(pixelList[0])
height = len(pixelList)
rawDataSize = width * height + 2
widthInBytes = intToByteArray(width , 4)
heightInBytes = intToByteArray(height , 4)
rawDataSizeInBytes = intToByteArray(rawDataSize , 4)
headerList = dummyHeaderList
#Specify dimensions of image in header
for i in range(4):
headerList[i+18] = widthInBytes[i]
headerList[i+22] = heightInBytes[i]
headerList[i+34] = rawDataSizeInBytes[i]
padding = (4 - (width%4))%4 #bitmap requires padding of each row to a multiple of four bytes
paddingArray = []
for i in range(padding):
paddingArray.append(0)
flatPixelList = []
for i in range(height):
flatPixelList.extend(pixelList[i] + paddingArray)
newFileBytes = dummyHeaderList + flatPixelList + [0,0]
newFileByteArray = bytearray(newFileBytes)
newFile = open(fileName, "wb")
newFile.write(newFileByteArray)
newFile.close()
print "Wrote image to " + str(fileName) + "."
| mit |
Yadnyawalkya/integration_tests | cfme/tests/infrastructure/test_vmrc_console.py | 2 | 7008 | # -*- coding: utf-8 -*-
import pytest
from cfme import test_requirements
@test_requirements.vmrc
@pytest.mark.manual
def test_vmrc_console_matrix():
"""
This testcase is here to reflect testing matrix for vmrc consoles. Combinations listed
are being tested manually. Originally, there was one testcase for every combination, this
approach reduces number of needed testcases.
Testing matrix:
Systems:
- fedora latest
- rhel 6.x
- rhel 7.x
- windows 2012
- windows 7
- windows 10
Browsers:
- firefox latest
- chrome latest
- edge latest
- explorer 11
Providers:
- vsphere 6
- vsphere 65
- vsphere 67
Importance:
High:
- fedora latest + firefox, chrome + vsphere 67
- rhel 7.x + firefox, chrome + vsphere 67
- windows 10 + firefox, chrome, edge + vsphere 67
Medium:
- everything else
Testcases:
- test_vmrc_console_firefox_vsphere67_rhel6x
- test_vmrc_console_firefox_vsphere67_rhel7x
- test_vmrc_console_firefox_vsphere67_fedora
- test_vmrc_console_firefox_vsphere67_win10
- test_vmrc_console_firefox_vsphere67_win2012
- test_vmrc_console_firefox_vsphere67_win7
- test_vmrc_console_firefox_vsphere65_win7
- test_vmrc_console_firefox_vsphere65_rhel7x
- test_vmrc_console_firefox_vsphere65_fedora
- test_vmrc_console_firefox_vsphere65_rhel6x
- test_vmrc_console_firefox_vsphere65_win2012
- test_vmrc_console_firefox_vsphere65_win10
- test_vmrc_console_firefox_vsphere6_fedora
- test_vmrc_console_firefox_vsphere6_win10
- test_vmrc_console_firefox_vsphere6_win7
- test_vmrc_console_firefox_vsphere6_rhel6x
- test_vmrc_console_firefox_vsphere6_rhel7x
- test_vmrc_console_firefox_vsphere6_win2012
- test_vmrc_console_chrome_vsphere67_win10
- test_vmrc_console_chrome_vsphere67_rhel7x
- test_vmrc_console_chrome_vsphere67_win7
- test_vmrc_console_chrome_vsphere67_fedora
- test_vmrc_console_chrome_vsphere67_win2012
- test_vmrc_console_chrome_vsphere65_rhel7x
- test_vmrc_console_chrome_vsphere65_win10
- test_vmrc_console_chrome_vsphere65_fedora
- test_vmrc_console_chrome_vsphere65_win2012
- test_vmrc_console_chrome_vsphere65_win7
- test_vmrc_console_chrome_vsphere6_win2012
- test_vmrc_console_chrome_vsphere6_win7
- test_vmrc_console_chrome_vsphere6_rhel7x
- test_vmrc_console_chrome_vsphere6_win10
- test_vmrc_console_chrome_vsphere6_fedora
- test_vmrc_console_edge_vsphere67_win10
- test_vmrc_console_edge_vsphere65_win10
- test_vmrc_console_edge_vsphere6_win10
- test_vmrc_console_ie11_vsphere67_win7
- test_vmrc_console_ie11_vsphere67_win2012
- test_vmrc_console_ie11_vsphere65_win7
- test_vmrc_console_ie11_vsphere65_win2012
- test_vmrc_console_ie11_vsphere6_win2012
- test_vmrc_console_ie11_vsphere6_win7
Polarion:
assignee: apagac
casecomponent: Infra
caseimportance: high
initialEstimate: 2h
setup:
1. Login to CFME Appliance as admin.
2. Navigate to Configuration
3. Under VMware Console Support section and click on Dropdown in front
of "Use" and select "VMware VMRC Plugin".
4. Click save at the bottom of the page.
5. Provision a testing VM.
testSteps:
1. Navigtate to testing VM
2. Launch the console by Access -> VM Console
3. Make sure the console accepts commands
4. Make sure the characters are visible
expectedResults:
1. VM Details displayed
2. You should see a Pop-up being Blocked, Please allow it to
open (always allow pop-ups for this site) and then a new tab
will open and then in few secs, you will see a prompt asking
you if you would like to open VMRC, click Yes. Once done,
VMRC Window will open(apart from browser) and it will ask
you if you would like to View Certificate or Connect anyway
or Cancel, please click Connect Anyway. Finally, you should
see VM in this window and should be able to interact with it
using mouse/keyboard.
3. Console accepts characters
4. Characters not garbled; no visual defect
"""
pass
@pytest.mark.manual
@test_requirements.vmrc
@pytest.mark.tier(2)
def test_vmrc_console_novmrccredsinprovider():
"""
Leave the VMRC Creds blank in the provider add/edit dialog and observe
behavior trying to launch console. It should fail. Also observe the
message in VMRC Console Creds tab about what will happen if creds left
blank.
Bugzilla:
1550612
Polarion:
assignee: apagac
casecomponent: Appliance
caseimportance: critical
caseposneg: negative
initialEstimate: 1/2h
startsin: 5.8
"""
pass
@pytest.mark.manual
@test_requirements.vmrc
@pytest.mark.tier(2)
def test_vmrc_console_addremovevmwarecreds():
"""
Add VMware VMRC Console Credentials to a VMware Provider and then
Remove it.
Bugzilla:
1559957
Polarion:
assignee: apagac
casecomponent: Appliance
initialEstimate: 1/4h
startsin: 5.8
testSteps:
1. Compute->Infrastructure->Provider, Add VMware Provider with VMRC Console Creds
2. Edit provider, remove VMware VMRC Console Creds and Save
expectedResults:
1. Provider added
2. Provider can be Saved without VMRC Console Creds
"""
pass
@pytest.mark.manual
@test_requirements.vmrc
@pytest.mark.tier(2)
def test_vmrc_console_usecredwithlimitedvmrcaccess():
"""
Add Provider in VMware now has a new VMRC Console Tab for adding
credentials which will be used to initiate VMRC Connections and these
credentials could be less privileged as compared to Admin user but
needs to have Console Access.
In current VMware env we have "user_interact@vsphere.local" for this
purpose. It is setup on vSphere65(NVC) and has no permissions to add
network device, suspend vm, install vmware tools or reconfigure
floppy. So if you can see your VMRC Console can"t do these operations
with user_interact, mark this test as passed. As the sole purpose of
this test is to validate correct user and permissions are being used.
Bugzilla:
1479840
Polarion:
assignee: apagac
casecomponent: Appliance
caseimportance: critical
initialEstimate: 1/2h
startsin: 5.8
"""
pass
| gpl-2.0 |
salguarnieri/intellij-community | python/helpers/profiler/thriftpy3/protocol/TBinaryProtocol.py | 44 | 6615 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from .TProtocol import *
from struct import pack, unpack
class TBinaryProtocol(TProtocolBase):
"""Binary implementation of the Thrift protocol driver."""
# NastyHaxx. Python 2.4+ on 32-bit machines forces hex constants to be
# positive, converting this into a long. If we hardcode the int value
# instead it'll stay in 32 bit-land.
# VERSION_MASK = 0xffff0000
VERSION_MASK = -65536
# VERSION_1 = 0x80010000
VERSION_1 = -2147418112
TYPE_MASK = 0x000000ff
def __init__(self, trans, strictRead=False, strictWrite=True):
TProtocolBase.__init__(self, trans)
self.strictRead = strictRead
self.strictWrite = strictWrite
def writeMessageBegin(self, name, type, seqid):
if self.strictWrite:
self.writeI32(TBinaryProtocol.VERSION_1 | type)
self.writeString(name)
self.writeI32(seqid)
else:
self.writeString(name)
self.writeByte(type)
self.writeI32(seqid)
def writeMessageEnd(self):
pass
def writeStructBegin(self, name):
pass
def writeStructEnd(self):
pass
def writeFieldBegin(self, name, type, id):
self.writeByte(type)
self.writeI16(id)
def writeFieldEnd(self):
pass
def writeFieldStop(self):
self.writeByte(TType.STOP)
def writeMapBegin(self, ktype, vtype, size):
self.writeByte(ktype)
self.writeByte(vtype)
self.writeI32(size)
def writeMapEnd(self):
pass
def writeListBegin(self, etype, size):
self.writeByte(etype)
self.writeI32(size)
def writeListEnd(self):
pass
def writeSetBegin(self, etype, size):
self.writeByte(etype)
self.writeI32(size)
def writeSetEnd(self):
pass
def writeBool(self, bool):
if bool:
self.writeByte(1)
else:
self.writeByte(0)
def writeByte(self, byte):
buff = pack("!b", byte)
self.trans.write(buff)
def writeI16(self, i16):
buff = pack("!h", i16)
self.trans.write(buff)
def writeI32(self, i32):
buff = pack("!i", i32)
self.trans.write(buff)
def writeI64(self, i64):
buff = pack("!q", i64)
self.trans.write(buff)
def writeDouble(self, dub):
buff = pack("!d", dub)
self.trans.write(buff)
def writeString(self, str):
encoded = bytearray(str, 'utf-8')
self.writeI32(len(encoded))
self.trans.write(encoded)
def readMessageBegin(self):
sz = self.readI32()
if sz < 0:
version = sz & TBinaryProtocol.VERSION_MASK
if version != TBinaryProtocol.VERSION_1:
raise TProtocolException(
type=TProtocolException.BAD_VERSION,
message='Bad version in readMessageBegin: %d' % (sz))
type = sz & TBinaryProtocol.TYPE_MASK
name = self.readString()
seqid = self.readI32()
else:
if self.strictRead:
raise TProtocolException(type=TProtocolException.BAD_VERSION,
message='No protocol version header')
name = self.trans.readAll(sz)
type = self.readByte()
seqid = self.readI32()
return (name, type, seqid)
def readMessageEnd(self):
pass
def readStructBegin(self):
pass
def readStructEnd(self):
pass
def readFieldBegin(self):
type = self.readByte()
if type == TType.STOP:
return (None, type, 0)
id = self.readI16()
return (None, type, id)
def readFieldEnd(self):
pass
def readMapBegin(self):
ktype = self.readByte()
vtype = self.readByte()
size = self.readI32()
return (ktype, vtype, size)
def readMapEnd(self):
pass
def readListBegin(self):
etype = self.readByte()
size = self.readI32()
return (etype, size)
def readListEnd(self):
pass
def readSetBegin(self):
etype = self.readByte()
size = self.readI32()
return (etype, size)
def readSetEnd(self):
pass
def readBool(self):
byte = self.readByte()
if byte == 0:
return False
return True
def readByte(self):
buff = self.trans.readAll(1)
val, = unpack('!b', buff)
return val
def readI16(self):
buff = self.trans.readAll(2)
val, = unpack('!h', buff)
return val
def readI32(self):
buff = self.trans.readAll(4)
val, = unpack('!i', buff)
return val
def readI64(self):
buff = self.trans.readAll(8)
val, = unpack('!q', buff)
return val
def readDouble(self):
buff = self.trans.readAll(8)
val, = unpack('!d', buff)
return val
def readString(self):
len = self.readI32()
str = self.trans.readAll(len).decode('utf-8')
return str
class TBinaryProtocolFactory:
def __init__(self, strictRead=False, strictWrite=True):
self.strictRead = strictRead
self.strictWrite = strictWrite
def getProtocol(self, trans):
prot = TBinaryProtocol(trans, self.strictRead, self.strictWrite)
return prot
class TBinaryProtocolAccelerated(TBinaryProtocol):
"""C-Accelerated version of TBinaryProtocol.
This class does not override any of TBinaryProtocol's methods,
but the generated code recognizes it directly and will call into
our C module to do the encoding, bypassing this object entirely.
We inherit from TBinaryProtocol so that the normal TBinaryProtocol
encoding can happen if the fastbinary module doesn't work for some
reason. (TODO(dreiss): Make this happen sanely in more cases.)
In order to take advantage of the C module, just use
TBinaryProtocolAccelerated instead of TBinaryProtocol.
NOTE: This code was contributed by an external developer.
The internal Thrift team has reviewed and tested it,
but we cannot guarantee that it is production-ready.
Please feel free to report bugs and/or success stories
to the public mailing list.
"""
pass
class TBinaryProtocolAcceleratedFactory:
def getProtocol(self, trans):
return TBinaryProtocolAccelerated(trans)
| apache-2.0 |
ivandevp/django | django/utils/inspect.py | 146 | 2383 | from __future__ import absolute_import
import inspect
from django.utils import six
def getargspec(func):
if six.PY2:
return inspect.getargspec(func)
sig = inspect.signature(func)
args = [
p.name for p in sig.parameters.values()
if p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD
]
varargs = [
p.name for p in sig.parameters.values()
if p.kind == inspect.Parameter.VAR_POSITIONAL
]
varargs = varargs[0] if varargs else None
varkw = [
p.name for p in sig.parameters.values()
if p.kind == inspect.Parameter.VAR_KEYWORD
]
varkw = varkw[0] if varkw else None
defaults = [
p.default for p in sig.parameters.values()
if p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD and p.default is not p.empty
] or None
return args, varargs, varkw, defaults
def get_func_args(func):
if six.PY2:
argspec = inspect.getargspec(func)
return argspec.args[1:] # ignore 'self'
sig = inspect.signature(func)
return [
arg_name for arg_name, param in sig.parameters.items()
if param.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD
]
def func_accepts_kwargs(func):
if six.PY2:
# Not all callables are inspectable with getargspec, so we'll
# try a couple different ways but in the end fall back on assuming
# it is -- we don't want to prevent registration of valid but weird
# callables.
try:
argspec = inspect.getargspec(func)
except TypeError:
try:
argspec = inspect.getargspec(func.__call__)
except (TypeError, AttributeError):
argspec = None
return not argspec or argspec[2] is not None
return any(
p for p in inspect.signature(func).parameters.values()
if p.kind == p.VAR_KEYWORD
)
def func_has_no_args(func):
args = inspect.getargspec(func)[0] if six.PY2 else [
p for p in inspect.signature(func).parameters.values()
if p.kind == p.POSITIONAL_OR_KEYWORD and p.default is p.empty
]
return len(args) == 1
def func_supports_parameter(func, parameter):
if six.PY3:
return parameter in inspect.signature(func).parameters
else:
args, varargs, varkw, defaults = inspect.getargspec(func)
return parameter in args
| bsd-3-clause |
camilonova/django | django/forms/utils.py | 4 | 5736 | import json
from collections import UserList
from django.conf import settings
from django.core.exceptions import ValidationError # backwards compatibility
from django.utils import timezone
from django.utils.encoding import force_text
from django.utils.html import escape, format_html, format_html_join, html_safe
from django.utils.translation import gettext_lazy as _
def pretty_name(name):
"""Convert 'first_name' to 'First name'."""
if not name:
return ''
return name.replace('_', ' ').capitalize()
def flatatt(attrs):
"""
Convert a dictionary of attributes to a single string.
The returned string will contain a leading space followed by key="value",
XML-style pairs. In the case of a boolean value, the key will appear
without a value. It is assumed that the keys do not need to be
XML-escaped. If the passed dictionary is empty, then return an empty
string.
The result is passed through 'mark_safe' (by way of 'format_html_join').
"""
key_value_attrs = []
boolean_attrs = []
for attr, value in attrs.items():
if isinstance(value, bool):
if value:
boolean_attrs.append((attr,))
elif value is not None:
key_value_attrs.append((attr, value))
return (
format_html_join('', ' {}="{}"', sorted(key_value_attrs)) +
format_html_join('', ' {}', sorted(boolean_attrs))
)
@html_safe
class ErrorDict(dict):
"""
A collection of errors that knows how to display itself in various formats.
The dictionary keys are the field names, and the values are the errors.
"""
def as_data(self):
return {f: e.as_data() for f, e in self.items()}
def as_json(self, escape_html=False):
return json.dumps({f: e.get_json_data(escape_html) for f, e in self.items()})
def as_ul(self):
if not self:
return ''
return format_html(
'<ul class="errorlist">{}</ul>',
format_html_join('', '<li>{}{}</li>', ((k, force_text(v)) for k, v in self.items()))
)
def as_text(self):
output = []
for field, errors in self.items():
output.append('* %s' % field)
output.append('\n'.join(' * %s' % e for e in errors))
return '\n'.join(output)
def __str__(self):
return self.as_ul()
@html_safe
class ErrorList(UserList, list):
"""
A collection of errors that knows how to display itself in various formats.
"""
def __init__(self, initlist=None, error_class=None):
super().__init__(initlist)
if error_class is None:
self.error_class = 'errorlist'
else:
self.error_class = 'errorlist {}'.format(error_class)
def as_data(self):
return ValidationError(self.data).error_list
def get_json_data(self, escape_html=False):
errors = []
for error in self.as_data():
message = list(error)[0]
errors.append({
'message': escape(message) if escape_html else message,
'code': error.code or '',
})
return errors
def as_json(self, escape_html=False):
return json.dumps(self.get_json_data(escape_html))
def as_ul(self):
if not self.data:
return ''
return format_html(
'<ul class="{}">{}</ul>',
self.error_class,
format_html_join('', '<li>{}</li>', ((force_text(e),) for e in self))
)
def as_text(self):
return '\n'.join('* %s' % e for e in self)
def __str__(self):
return self.as_ul()
def __repr__(self):
return repr(list(self))
def __contains__(self, item):
return item in list(self)
def __eq__(self, other):
return list(self) == other
def __getitem__(self, i):
error = self.data[i]
if isinstance(error, ValidationError):
return list(error)[0]
return force_text(error)
def __reduce_ex__(self, *args, **kwargs):
# The `list` reduce function returns an iterator as the fourth element
# that is normally used for repopulating. Since we only inherit from
# `list` for `isinstance` backward compatibility (Refs #17413) we
# nullify this iterator as it would otherwise result in duplicate
# entries. (Refs #23594)
info = super(UserList, self).__reduce_ex__(*args, **kwargs)
return info[:3] + (None, None)
# Utilities for time zone support in DateTimeField et al.
def from_current_timezone(value):
"""
When time zone support is enabled, convert naive datetimes
entered in the current time zone to aware datetimes.
"""
if settings.USE_TZ and value is not None and timezone.is_naive(value):
current_timezone = timezone.get_current_timezone()
try:
return timezone.make_aware(value, current_timezone)
except Exception as exc:
raise ValidationError(
_('%(datetime)s couldn\'t be interpreted '
'in time zone %(current_timezone)s; it '
'may be ambiguous or it may not exist.'),
code='ambiguous_timezone',
params={'datetime': value, 'current_timezone': current_timezone}
) from exc
return value
def to_current_timezone(value):
"""
When time zone support is enabled, convert aware datetimes
to naive datetimes in the current time zone for display.
"""
if settings.USE_TZ and value is not None and timezone.is_aware(value):
current_timezone = timezone.get_current_timezone()
return timezone.make_naive(value, current_timezone)
return value
| bsd-3-clause |
ryfeus/lambda-packs | Tensorflow/source/tensorboard/backend/event_processing/event_file_loader.py | 2 | 2746 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functionality for loading events from a record file."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
class EventFileLoader(object):
"""An EventLoader is an iterator that yields Event protos."""
def __init__(self, file_path):
if file_path is None:
raise ValueError('A file path is required')
file_path = tf.resource_loader.readahead_file_path(file_path)
tf.logging.debug('Opening a record reader pointing at %s', file_path)
with tf.errors.raise_exception_on_not_ok_status() as status:
self._reader = tf.pywrap_tensorflow.PyRecordReader_New(
tf.compat.as_bytes(file_path), 0, tf.compat.as_bytes(''), status)
# Store it for logging purposes.
self._file_path = file_path
if not self._reader:
raise IOError('Failed to open a record reader pointing to %s' % file_path)
def Load(self):
"""Loads all new values from disk.
Calling Load multiple times in a row will not 'drop' events as long as the
return value is not iterated over.
Yields:
All values that were written to disk that have not been yielded yet.
"""
tf.logging.debug('Loading events from %s', self._file_path)
while True:
try:
with tf.errors.raise_exception_on_not_ok_status() as status:
self._reader.GetNext(status)
except (tf.errors.DataLossError, tf.errors.OutOfRangeError):
# We ignore partial read exceptions, because a record may be truncated.
# PyRecordReader holds the offset prior to the failed read, so retrying
# will succeed.
break
event = tf.Event()
event.ParseFromString(self._reader.record())
yield event
tf.logging.debug('No more events in %s', self._file_path)
def main(argv):
if len(argv) != 2:
print('Usage: event_file_loader <path-to-the-recordio-file>')
return 1
loader = EventFileLoader(argv[1])
for event in loader.Load():
print(event)
if __name__ == '__main__':
tf.app.run()
| mit |
Workday/OpenFrame | third_party/pycoverage/coverage/execfile.py | 209 | 5865 | """Execute files of Python code."""
import imp, marshal, os, sys
from coverage.backward import exec_code_object, open_source
from coverage.misc import ExceptionDuringRun, NoCode, NoSource
try:
# In Py 2.x, the builtins were in __builtin__
BUILTINS = sys.modules['__builtin__']
except KeyError:
# In Py 3.x, they're in builtins
BUILTINS = sys.modules['builtins']
def rsplit1(s, sep):
"""The same as s.rsplit(sep, 1), but works in 2.3"""
parts = s.split(sep)
return sep.join(parts[:-1]), parts[-1]
def run_python_module(modulename, args):
"""Run a python module, as though with ``python -m name args...``.
`modulename` is the name of the module, possibly a dot-separated name.
`args` is the argument array to present as sys.argv, including the first
element naming the module being executed.
"""
openfile = None
glo, loc = globals(), locals()
try:
try:
# Search for the module - inside its parent package, if any - using
# standard import mechanics.
if '.' in modulename:
packagename, name = rsplit1(modulename, '.')
package = __import__(packagename, glo, loc, ['__path__'])
searchpath = package.__path__
else:
packagename, name = None, modulename
searchpath = None # "top-level search" in imp.find_module()
openfile, pathname, _ = imp.find_module(name, searchpath)
# Complain if this is a magic non-file module.
if openfile is None and pathname is None:
raise NoSource(
"module does not live in a file: %r" % modulename
)
# If `modulename` is actually a package, not a mere module, then we
# pretend to be Python 2.7 and try running its __main__.py script.
if openfile is None:
packagename = modulename
name = '__main__'
package = __import__(packagename, glo, loc, ['__path__'])
searchpath = package.__path__
openfile, pathname, _ = imp.find_module(name, searchpath)
except ImportError:
_, err, _ = sys.exc_info()
raise NoSource(str(err))
finally:
if openfile:
openfile.close()
# Finally, hand the file off to run_python_file for execution.
pathname = os.path.abspath(pathname)
args[0] = pathname
run_python_file(pathname, args, package=packagename)
def run_python_file(filename, args, package=None):
"""Run a python file as if it were the main program on the command line.
`filename` is the path to the file to execute, it need not be a .py file.
`args` is the argument array to present as sys.argv, including the first
element naming the file being executed. `package` is the name of the
enclosing package, if any.
"""
# Create a module to serve as __main__
old_main_mod = sys.modules['__main__']
main_mod = imp.new_module('__main__')
sys.modules['__main__'] = main_mod
main_mod.__file__ = filename
if package:
main_mod.__package__ = package
main_mod.__builtins__ = BUILTINS
# Set sys.argv properly.
old_argv = sys.argv
sys.argv = args
try:
# Make a code object somehow.
if filename.endswith(".pyc") or filename.endswith(".pyo"):
code = make_code_from_pyc(filename)
else:
code = make_code_from_py(filename)
# Execute the code object.
try:
exec_code_object(code, main_mod.__dict__)
except SystemExit:
# The user called sys.exit(). Just pass it along to the upper
# layers, where it will be handled.
raise
except:
# Something went wrong while executing the user code.
# Get the exc_info, and pack them into an exception that we can
# throw up to the outer loop. We peel two layers off the traceback
# so that the coverage.py code doesn't appear in the final printed
# traceback.
typ, err, tb = sys.exc_info()
raise ExceptionDuringRun(typ, err, tb.tb_next.tb_next)
finally:
# Restore the old __main__
sys.modules['__main__'] = old_main_mod
# Restore the old argv and path
sys.argv = old_argv
def make_code_from_py(filename):
"""Get source from `filename` and make a code object of it."""
# Open the source file.
try:
source_file = open_source(filename)
except IOError:
raise NoSource("No file to run: %r" % filename)
try:
source = source_file.read()
finally:
source_file.close()
# We have the source. `compile` still needs the last line to be clean,
# so make sure it is, then compile a code object from it.
if not source or source[-1] != '\n':
source += '\n'
code = compile(source, filename, "exec")
return code
def make_code_from_pyc(filename):
"""Get a code object from a .pyc file."""
try:
fpyc = open(filename, "rb")
except IOError:
raise NoCode("No file to run: %r" % filename)
try:
# First four bytes are a version-specific magic number. It has to
# match or we won't run the file.
magic = fpyc.read(4)
if magic != imp.get_magic():
raise NoCode("Bad magic number in .pyc file")
# Skip the junk in the header that we don't need.
fpyc.read(4) # Skip the moddate.
if sys.version_info >= (3, 3):
# 3.3 added another long to the header (size), skip it.
fpyc.read(4)
# The rest of the file is the code object we want.
code = marshal.load(fpyc)
finally:
fpyc.close()
return code
| bsd-3-clause |
salguarnieri/intellij-community | python/lib/Lib/site-packages/django/contrib/comments/signals.py | 425 | 1079 | """
Signals relating to comments.
"""
from django.dispatch import Signal
# Sent just before a comment will be posted (after it's been approved and
# moderated; this can be used to modify the comment (in place) with posting
# details or other such actions. If any receiver returns False the comment will be
# discarded and a 403 (not allowed) response. This signal is sent at more or less
# the same time (just before, actually) as the Comment object's pre-save signal,
# except that the HTTP request is sent along with this signal.
comment_will_be_posted = Signal(providing_args=["comment", "request"])
# Sent just after a comment was posted. See above for how this differs
# from the Comment object's post-save signal.
comment_was_posted = Signal(providing_args=["comment", "request"])
# Sent after a comment was "flagged" in some way. Check the flag to see if this
# was a user requesting removal of a comment, a moderator approving/removing a
# comment, or some other custom user flag.
comment_was_flagged = Signal(providing_args=["comment", "flag", "created", "request"])
| apache-2.0 |
prashanthr/wakatime | wakatime/packages/pygments_py3/pygments/lexers/php.py | 72 | 9769 | # -*- coding: utf-8 -*-
"""
pygments.lexers.php
~~~~~~~~~~~~~~~~~~~
Lexers for PHP and related languages.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, bygroups, default, using, this
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Other
from pygments.util import get_bool_opt, get_list_opt, iteritems
__all__ = ['ZephirLexer', 'PhpLexer']
class ZephirLexer(RegexLexer):
"""
For `Zephir language <http://zephir-lang.com/>`_ source code.
Zephir is a compiled high level language aimed
to the creation of C-extensions for PHP.
.. versionadded:: 2.0
"""
name = 'Zephir'
aliases = ['zephir']
filenames = ['*.zep']
zephir_keywords = ['fetch', 'echo', 'isset', 'empty']
zephir_type = ['bit', 'bits', 'string']
flags = re.DOTALL | re.MULTILINE
tokens = {
'commentsandwhitespace': [
(r'\s+', Text),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline)
],
'slashstartsregex': [
include('commentsandwhitespace'),
(r'/(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/'
r'([gim]+\b|\B)', String.Regex, '#pop'),
default('#pop')
],
'badregex': [
(r'\n', Text, '#pop')
],
'root': [
(r'^(?=\s|/|<!--)', Text, 'slashstartsregex'),
include('commentsandwhitespace'),
(r'\+\+|--|~|&&|\?|:|\|\||\\(?=\n)|'
r'(<<|>>>?|==?|!=?|->|[-<>+*%&|^/])=?', Operator, 'slashstartsregex'),
(r'[{(\[;,]', Punctuation, 'slashstartsregex'),
(r'[})\].]', Punctuation),
(r'(for|in|while|do|break|return|continue|switch|case|default|if|else|loop|'
r'require|inline|throw|try|catch|finally|new|delete|typeof|instanceof|void|'
r'namespace|use|extends|this|fetch|isset|unset|echo|fetch|likely|unlikely|'
r'empty)\b', Keyword, 'slashstartsregex'),
(r'(var|let|with|function)\b', Keyword.Declaration, 'slashstartsregex'),
(r'(abstract|boolean|bool|char|class|const|double|enum|export|extends|final|'
r'native|goto|implements|import|int|string|interface|long|ulong|char|uchar|'
r'float|unsigned|private|protected|public|short|static|self|throws|reverse|'
r'transient|volatile)\b', Keyword.Reserved),
(r'(true|false|null|undefined)\b', Keyword.Constant),
(r'(Array|Boolean|Date|_REQUEST|_COOKIE|_SESSION|'
r'_GET|_POST|_SERVER|this|stdClass|range|count|iterator|'
r'window)\b', Name.Builtin),
(r'[$a-zA-Z_][\w\\]*', Name.Other),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'[0-9]+', Number.Integer),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single),
]
}
class PhpLexer(RegexLexer):
"""
For `PHP <http://www.php.net/>`_ source code.
For PHP embedded in HTML, use the `HtmlPhpLexer`.
Additional options accepted:
`startinline`
If given and ``True`` the lexer starts highlighting with
php code (i.e.: no starting ``<?php`` required). The default
is ``False``.
`funcnamehighlighting`
If given and ``True``, highlight builtin function names
(default: ``True``).
`disabledmodules`
If given, must be a list of module names whose function names
should not be highlighted. By default all modules are highlighted
except the special ``'unknown'`` module that includes functions
that are known to php but are undocumented.
To get a list of allowed modules have a look into the
`_php_builtins` module:
.. sourcecode:: pycon
>>> from pygments.lexers._php_builtins import MODULES
>>> MODULES.keys()
['PHP Options/Info', 'Zip', 'dba', ...]
In fact the names of those modules match the module names from
the php documentation.
"""
name = 'PHP'
aliases = ['php', 'php3', 'php4', 'php5']
filenames = ['*.php', '*.php[345]', '*.inc']
mimetypes = ['text/x-php']
# Note that a backslash is included in the following two patterns
# PHP uses a backslash as a namespace separator
_ident_char = r'[\\\w]|[^\x00-\x7f]'
_ident_begin = r'(?:[\\_a-z]|[^\x00-\x7f])'
_ident_end = r'(?:' + _ident_char + ')*'
_ident_inner = _ident_begin + _ident_end
flags = re.IGNORECASE | re.DOTALL | re.MULTILINE
tokens = {
'root': [
(r'<\?(php)?', Comment.Preproc, 'php'),
(r'[^<]+', Other),
(r'<', Other)
],
'php': [
(r'\?>', Comment.Preproc, '#pop'),
(r'<<<([\'"]?)(' + _ident_inner + r')\1\n.*?\n\s*\2;?\n', String),
(r'\s+', Text),
(r'#.*?\n', Comment.Single),
(r'//.*?\n', Comment.Single),
# put the empty comment here, it is otherwise seen as
# the start of a docstring
(r'/\*\*/', Comment.Multiline),
(r'/\*\*.*?\*/', String.Doc),
(r'/\*.*?\*/', Comment.Multiline),
(r'(->|::)(\s*)(' + _ident_inner + ')',
bygroups(Operator, Text, Name.Attribute)),
(r'[~!%^&*+=|:.<>/@-]+', Operator),
(r'\?', Operator), # don't add to the charclass above!
(r'[\[\]{}();,]+', Punctuation),
(r'(class)(\s+)', bygroups(Keyword, Text), 'classname'),
(r'(function)(\s*)(?=\()', bygroups(Keyword, Text)),
(r'(function)(\s+)(&?)(\s*)',
bygroups(Keyword, Text, Operator, Text), 'functionname'),
(r'(const)(\s+)(' + _ident_inner + ')',
bygroups(Keyword, Text, Name.Constant)),
(r'(and|E_PARSE|old_function|E_ERROR|or|as|E_WARNING|parent|'
r'eval|PHP_OS|break|exit|case|extends|PHP_VERSION|cfunction|'
r'FALSE|print|for|require|continue|foreach|require_once|'
r'declare|return|default|static|do|switch|die|stdClass|'
r'echo|else|TRUE|elseif|var|empty|if|xor|enddeclare|include|'
r'virtual|endfor|include_once|while|endforeach|global|__FILE__|'
r'endif|list|__LINE__|endswitch|new|__sleep|endwhile|not|'
r'array|__wakeup|E_ALL|NULL|final|php_user_filter|interface|'
r'implements|public|private|protected|abstract|clone|try|'
r'catch|throw|this|use|namespace|trait|yield|'
r'finally)\b', Keyword),
(r'(true|false|null)\b', Keyword.Constant),
(r'\$\{\$+' + _ident_inner + '\}', Name.Variable),
(r'\$+' + _ident_inner, Name.Variable),
(_ident_inner, Name.Other),
(r'(\d+\.\d*|\d*\.\d+)(e[+-]?[0-9]+)?', Number.Float),
(r'\d+e[+-]?[0-9]+', Number.Float),
(r'0[0-7]+', Number.Oct),
(r'0x[a-f0-9]+', Number.Hex),
(r'\d+', Number.Integer),
(r'0b[01]+', Number.Bin),
(r"'([^'\\]*(?:\\.[^'\\]*)*)'", String.Single),
(r'`([^`\\]*(?:\\.[^`\\]*)*)`', String.Backtick),
(r'"', String.Double, 'string'),
],
'classname': [
(_ident_inner, Name.Class, '#pop')
],
'functionname': [
(_ident_inner, Name.Function, '#pop')
],
'string': [
(r'"', String.Double, '#pop'),
(r'[^{$"\\]+', String.Double),
(r'\\([nrt"$\\]|[0-7]{1,3}|x[0-9a-f]{1,2})', String.Escape),
(r'\$' + _ident_inner + '(\[\S+?\]|->' + _ident_inner + ')?',
String.Interpol),
(r'(\{\$\{)(.*?)(\}\})',
bygroups(String.Interpol, using(this, _startinline=True),
String.Interpol)),
(r'(\{)(\$.*?)(\})',
bygroups(String.Interpol, using(this, _startinline=True),
String.Interpol)),
(r'(\$\{)(\S+)(\})',
bygroups(String.Interpol, Name.Variable, String.Interpol)),
(r'[${\\]+', String.Double)
],
}
def __init__(self, **options):
self.funcnamehighlighting = get_bool_opt(
options, 'funcnamehighlighting', True)
self.disabledmodules = get_list_opt(
options, 'disabledmodules', ['unknown'])
self.startinline = get_bool_opt(options, 'startinline', False)
# private option argument for the lexer itself
if '_startinline' in options:
self.startinline = options.pop('_startinline')
# collect activated functions in a set
self._functions = set()
if self.funcnamehighlighting:
from pygments.lexers._php_builtins import MODULES
for key, value in iteritems(MODULES):
if key not in self.disabledmodules:
self._functions.update(value)
RegexLexer.__init__(self, **options)
def get_tokens_unprocessed(self, text):
stack = ['root']
if self.startinline:
stack.append('php')
for index, token, value in \
RegexLexer.get_tokens_unprocessed(self, text, stack):
if token is Name.Other:
if value in self._functions:
yield index, Name.Builtin, value
continue
yield index, token, value
def analyse_text(text):
rv = 0.0
if re.search(r'<\?(?!xml)', text):
rv += 0.3
return rv
| bsd-3-clause |
nerevu/frappe | frappe/templates/pages/desk.py | 20 | 1967 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
no_sitemap = 1
no_cache = 1
base_template_path = "templates/pages/desk.html"
import os, re
import frappe
from frappe import _
import frappe.sessions
def get_context(context):
if (frappe.session.user == "Guest" or
frappe.db.get_value("User", frappe.session.user, "user_type")=="Website User"):
frappe.throw(_("You are not permitted to access this page."), frappe.PermissionError)
hooks = frappe.get_hooks()
boot = frappe.sessions.get()
boot_json = frappe.as_json(boot)
frappe.db.commit()
# remove script tags from boot
boot_json = re.sub("\<script\>[^<]*\</script\>", "", boot_json)
return {
"build_version": get_build_version(),
"include_js": hooks["app_include_js"],
"include_css": hooks["app_include_css"],
"boot": boot if context.get("for_mobile") else boot_json,
"background_image": boot.user.background_image or boot.default_background_image,
"google_analytics_id": frappe.conf.get("google_analytics_id")
}
@frappe.whitelist()
def get_desk_assets(build_version):
"""Get desk assets to be loaded for mobile app"""
data = get_context({"for_mobile": True})
assets = [{"type": "js", "data": ""}, {"type": "css", "data": ""}]
if build_version != data["build_version"]:
# new build, send assets
for path in data["include_js"]:
with open(os.path.join(frappe.local.sites_path, path) ,"r") as f:
assets[0]["data"] = assets[0]["data"] + "\n" + unicode(f.read(), "utf-8")
for path in data["include_css"]:
with open(os.path.join(frappe.local.sites_path, path) ,"r") as f:
assets[1]["data"] = assets[1]["data"] + "\n" + unicode(f.read(), "utf-8")
return {
"build_version": data["build_version"],
"boot": data["boot"],
"assets": assets
}
def get_build_version():
return str(os.path.getmtime(os.path.join(frappe.local.sites_path, "assets", "js",
"desk.min.js")))
| mit |
ryfeus/lambda-packs | Lxml_requests/source/pip/_vendor/cachecontrol/filewrapper.py | 346 | 2531 | from io import BytesIO
class CallbackFileWrapper(object):
"""
Small wrapper around a fp object which will tee everything read into a
buffer, and when that file is closed it will execute a callback with the
contents of that buffer.
All attributes are proxied to the underlying file object.
This class uses members with a double underscore (__) leading prefix so as
not to accidentally shadow an attribute.
"""
def __init__(self, fp, callback):
self.__buf = BytesIO()
self.__fp = fp
self.__callback = callback
def __getattr__(self, name):
# The vaguaries of garbage collection means that self.__fp is
# not always set. By using __getattribute__ and the private
# name[0] allows looking up the attribute value and raising an
# AttributeError when it doesn't exist. This stop thigns from
# infinitely recursing calls to getattr in the case where
# self.__fp hasn't been set.
#
# [0] https://docs.python.org/2/reference/expressions.html#atom-identifiers
fp = self.__getattribute__('_CallbackFileWrapper__fp')
return getattr(fp, name)
def __is_fp_closed(self):
try:
return self.__fp.fp is None
except AttributeError:
pass
try:
return self.__fp.closed
except AttributeError:
pass
# We just don't cache it then.
# TODO: Add some logging here...
return False
def _close(self):
if self.__callback:
self.__callback(self.__buf.getvalue())
# We assign this to None here, because otherwise we can get into
# really tricky problems where the CPython interpreter dead locks
# because the callback is holding a reference to something which
# has a __del__ method. Setting this to None breaks the cycle
# and allows the garbage collector to do it's thing normally.
self.__callback = None
def read(self, amt=None):
data = self.__fp.read(amt)
self.__buf.write(data)
if self.__is_fp_closed():
self._close()
return data
def _safe_read(self, amt):
data = self.__fp._safe_read(amt)
if amt == 2 and data == b'\r\n':
# urllib executes this read to toss the CRLF at the end
# of the chunk.
return data
self.__buf.write(data)
if self.__is_fp_closed():
self._close()
return data
| mit |
danieltahara/indivisible | indivisible/datasources/googlecivicinformation.py | 1 | 1058 | import us
import re
from base import JSONSource
class GoogleCivicInformation(JSONSource):
@classmethod
def initialize(cls, key):
cls.key = key
def __init__(self):
super(GoogleCivicInformation, self).__init__(
"https://www.googleapis.com/civicinfo/v2/")
def lookup_congressional_district(self, address):
params = {
"address": address,
"includeOffices": False,
"key": self.key,
}
results = self._get("representatives", params=params)
if results and 'error' not in results:
district_regex = r"ocd-division/country:us/state:([a-zA-Z]{2})/cd:(\d{2})"
for k in results["divisions"].iterkeys():
matches = re.findall(district_regex, k)
if len(matches) < 1:
continue
state = matches[0][0]
state = us.states.lookup(state).name
district = int(matches[0][1])
return (state, district)
return (None, None)
| mit |
Thraxis/pymedusa | lib/sqlalchemy/util/queue.py | 55 | 6548 | # util/queue.py
# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""An adaptation of Py2.3/2.4's Queue module which supports reentrant
behavior, using RLock instead of Lock for its mutex object. The
Queue object is used exclusively by the sqlalchemy.pool.QueuePool
class.
This is to support the connection pool's usage of weakref callbacks to return
connections to the underlying Queue, which can in extremely
rare cases be invoked within the ``get()`` method of the Queue itself,
producing a ``put()`` inside the ``get()`` and therefore a reentrant
condition.
"""
from collections import deque
from time import time as _time
from .compat import threading
__all__ = ['Empty', 'Full', 'Queue']
class Empty(Exception):
"Exception raised by Queue.get(block=0)/get_nowait()."
pass
class Full(Exception):
"Exception raised by Queue.put(block=0)/put_nowait()."
pass
class Queue:
def __init__(self, maxsize=0):
"""Initialize a queue object with a given maximum size.
If `maxsize` is <= 0, the queue size is infinite.
"""
self._init(maxsize)
# mutex must be held whenever the queue is mutating. All methods
# that acquire mutex must release it before returning. mutex
# is shared between the two conditions, so acquiring and
# releasing the conditions also acquires and releases mutex.
self.mutex = threading.RLock()
# Notify not_empty whenever an item is added to the queue; a
# thread waiting to get is notified then.
self.not_empty = threading.Condition(self.mutex)
# Notify not_full whenever an item is removed from the queue;
# a thread waiting to put is notified then.
self.not_full = threading.Condition(self.mutex)
def qsize(self):
"""Return the approximate size of the queue (not reliable!)."""
self.mutex.acquire()
n = self._qsize()
self.mutex.release()
return n
def empty(self):
"""Return True if the queue is empty, False otherwise (not
reliable!)."""
self.mutex.acquire()
n = self._empty()
self.mutex.release()
return n
def full(self):
"""Return True if the queue is full, False otherwise (not
reliable!)."""
self.mutex.acquire()
n = self._full()
self.mutex.release()
return n
def put(self, item, block=True, timeout=None):
"""Put an item into the queue.
If optional args `block` is True and `timeout` is None (the
default), block if necessary until a free slot is
available. If `timeout` is a positive number, it blocks at
most `timeout` seconds and raises the ``Full`` exception if no
free slot was available within that time. Otherwise (`block`
is false), put an item on the queue if a free slot is
immediately available, else raise the ``Full`` exception
(`timeout` is ignored in that case).
"""
self.not_full.acquire()
try:
if not block:
if self._full():
raise Full
elif timeout is None:
while self._full():
self.not_full.wait()
else:
if timeout < 0:
raise ValueError("'timeout' must be a positive number")
endtime = _time() + timeout
while self._full():
remaining = endtime - _time()
if remaining <= 0.0:
raise Full
self.not_full.wait(remaining)
self._put(item)
self.not_empty.notify()
finally:
self.not_full.release()
def put_nowait(self, item):
"""Put an item into the queue without blocking.
Only enqueue the item if a free slot is immediately available.
Otherwise raise the ``Full`` exception.
"""
return self.put(item, False)
def get(self, block=True, timeout=None):
"""Remove and return an item from the queue.
If optional args `block` is True and `timeout` is None (the
default), block if necessary until an item is available. If
`timeout` is a positive number, it blocks at most `timeout`
seconds and raises the ``Empty`` exception if no item was
available within that time. Otherwise (`block` is false),
return an item if one is immediately available, else raise the
``Empty`` exception (`timeout` is ignored in that case).
"""
self.not_empty.acquire()
try:
if not block:
if self._empty():
raise Empty
elif timeout is None:
while self._empty():
self.not_empty.wait()
else:
if timeout < 0:
raise ValueError("'timeout' must be a positive number")
endtime = _time() + timeout
while self._empty():
remaining = endtime - _time()
if remaining <= 0.0:
raise Empty
self.not_empty.wait(remaining)
item = self._get()
self.not_full.notify()
return item
finally:
self.not_empty.release()
def get_nowait(self):
"""Remove and return an item from the queue without blocking.
Only get an item if one is immediately available. Otherwise
raise the ``Empty`` exception.
"""
return self.get(False)
# Override these methods to implement other queue organizations
# (e.g. stack or priority queue).
# These will only be called with appropriate locks held
# Initialize the queue representation
def _init(self, maxsize):
self.maxsize = maxsize
self.queue = deque()
def _qsize(self):
return len(self.queue)
# Check whether the queue is empty
def _empty(self):
return not self.queue
# Check whether the queue is full
def _full(self):
return self.maxsize > 0 and len(self.queue) == self.maxsize
# Put a new item in the queue
def _put(self, item):
self.queue.append(item)
# Get an item from the queue
def _get(self):
return self.queue.popleft()
| gpl-3.0 |
notptr/newsscraper | news.py | 2 | 3937 | #!/usr/bin/env python
#this is a python 3 script that scrapes the news articles off of tl;dr
#Programmer: Matthew Deig
#Date: 2012-12-14
import urllib.request, urllib.parse, urllib.error, re
def getWebsite(websiteURL):
#this is a function it is going to return a string back of the website data
#the websiteURL is going to be a string that holds the website url for tl;dr
try:
webpage_data = urllib.request.urlopen(websiteURL).read().decode("utf-8")
except IOError as e:
return "Couldn't Reach host"
if not404(webpage_data) == True:
return webpage_data
else:
print("Something is wrong with the page skipping scrapping")
return None
def not404(webData):
#this funtion is going to see if we got the right webpage or not.
#webData is going to be a string
#this funtion is going to return True for it is the page we want or
#Flase that it is a 404 or something
for line in re.split('\n', webData):
if re.findall("\<title>.*?\</title>", line) != []:
if line == "<title>TL;DR </title>":
return True
else:
return False
def scrapWebpage(webData):
#this well scrap out all of the news articles on tl;dr
#then return the data
newsString = " "
newsArticles = None
webData = re.sub('\n', '\t', webData)
newsData = re.findall("<section.*?</section>", webData)
for line in newsData:
newsString = newsString + ' ' + line
newsData = re.findall("<a.*?</a>", newsString)
for article in newsData:
articleNewline = re.split('\t', article)
url = None
title = None
cat = None
text = ''
for line in articleNewline:
if line[1] == 'a' and line[2] != 'r':
dummy = re.sub("<a href=\"",'',line)
dummy = re.sub("\">", '', dummy)
dummy = re.sub("&", '&', dummy)
url = dummy
elif line[1] == 'h':
dummy = re.sub("<h1>", '', line)
dummy = re.sub("</h1>", '', dummy)
dummy = re.sub("’", "'", dummy)
dummy = re.sub("‘", "'", dummy)
dummy = re.sub("‘", "'", dummy)
dummy = re.sub("’", "'", dummy)
dummy = re.sub("'", "'", dummy)
dummy = re.sub("“", '"', dummy)
dummy = re.sub("”", '"', dummy)
title = dummy
elif line[1] == 's':
dummy = re.sub("<span.*?>", '', line)
dummy = re.sub("</span>", '', dummy)
cat = dummy
elif line[1] == 'p':
dummy = re.sub("<p>", '', line)
dummy = re.sub("</p>", '', dummy)
dummy = re.sub("’", "'", dummy)
dummy = re.sub("‘", "'", dummy)
dummy = re.sub("‘", "'", dummy)
dummy = re.sub("’", "'", dummy)
dummy = re.sub("'", "'", dummy)
dummy = re.sub("“", '"', dummy)
dummy = re.sub("”", '"', dummy)
text = dummy
if newsArticles != None:
newsArticles.append([title, cat, url, text])
else:
newsArticles = [[title, cat, url, text]]
return newsArticles
def prettyDisplay(newsArticle):
#this will be the finial part of the program
#it is going to pretty the scrapings from tl;dr
#newsArticle is a list we get from scrap which are all strings
for article in newsArticle:
print("Title: ", article[0])
print(article[1])
print("URL to full article: ", article[2])
print(article[3])
print()
print()
def onelineOutput(newsArticle):
#this is for pipeing and redirecting
#it is the same as pretty but on one
output = ''
for article in newsArticle:
output = output + article[1] + ' ' + article[0] + ' ' + article[2] + ' ' + article[3] + ' '
print(output)
if __name__ == "__main__":
data = getWebsite("http://toolong-didntread.com/")
if data == None:
print("Page is down or 404ed")
else:
data = scrapWebpage(data)
prettyDisplay(data) | unlicense |
WangDequan/pyvision | setup.py | 3 | 2616 | # immediately below is stupid hackery for setuptools to work with Cython
import distutils.extension
import distutils.command.build_ext
from distutils.extension import Extension as _Extension
from setuptools import setup
distutils.extension.Extension = _Extension
distutils.command.build_ext.Extension = _Extension
Extension = _Extension
from Cython.Distutils import build_ext
# end stupid hackery
# these lines will cause html annotation files to be generated
from Cython.Compiler.Main import default_options as pyrex_default_options
pyrex_default_options['annotate'] = True
import os
import numpy
import sys
print "building liblinear"
os.system("make -C vision/liblinear")
root = os.getcwd() + "/vision/"
ext_modules = [
Extension("vision.annotations", ["vision/annotations.pyx",
"vision/annotations.pxd"]),
Extension("vision.features", ["vision/features.pyx"]),
Extension("vision.model", ["vision/model.pyx"]),
Extension("vision.convolution", ["vision/convolution.pyx"]),
Extension("vision.track.standard", ["vision/track/standard.pyx"]),
Extension("vision.alearn.linear", ["vision/alearn/linear.pyx"]),
Extension("vision.alearn.marginals", ["vision/alearn/marginals.pyx"]),
Extension("vision.track.dp", ["vision/track/dp.pyx",
"vision/track/dp.pxd"]),
Extension("vision.track.realprior", ["vision/track/realprior.pyx"]),
Extension("vision.track.pairwise", ["vision/track/pairwise.pyx"]),
Extension("vision.svm", ["vision/svm.pyx"],
extra_objects = [root + "liblinear/linear.o",
root + "liblinear/tron.o",
root + "liblinear/blas/blas.a"],
language = "c++")]
for e in ext_modules:
e.pyrex_directives = {
"boundscheck": False,
"cdivision": True,
"infer_types": True,
"embedsignature": True}
# e.include_dirs.append(".")
e.extra_compile_args = ["-w"]
e.include_dirs.append(numpy.get_include())
setup(
name = "pyvision",
author = "Carl Vondrick",
author_email = "cvondric@ics.uci.edu",
description = "A concise computer vision toolkit",
license = "MIT",
version = "0.3.1",
classifiers = ["Development Status :: 1 - Planning",
"Intended Audience :: Developers"],
packages = ["vision",
"vision.track",
"vision.alearn",
"vision.reporting",
"vision.reconstruction"],
cmdclass = {"build_ext": build_ext},
ext_modules = ext_modules,
#ext_package = "vision"
)
| mit |
imajes/Sick-Beard | sickbeard/providers/hdbits.py | 14 | 7788 | # This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
import datetime
import urllib
import generic
import sickbeard
from sickbeard import classes
from sickbeard import logger, tvcache, exceptions
from sickbeard import helpers
from sickbeard.common import Quality
from sickbeard.exceptions import ex, AuthException
from sickbeard.name_parser.parser import NameParser, InvalidNameException
try:
import json
except ImportError:
from lib import simplejson as json
class HDBitsProvider(generic.TorrentProvider):
def __init__(self):
generic.TorrentProvider.__init__(self, "HDBits")
self.supportsBacklog = True
self.cache = HDBitsCache(self)
self.url = 'https://hdbits.org'
self.search_url = 'https://hdbits.org/api/torrents'
self.rss_url = 'https://hdbits.org/api/torrents'
self.download_url = 'http://hdbits.org/download.php?'
def isEnabled(self):
return sickbeard.HDBITS
def _checkAuth(self):
if not sickbeard.HDBITS_USERNAME or not sickbeard.HDBITS_PASSKEY:
raise AuthException("Your authentication credentials for " + self.name + " are missing, check your config.")
return True
def _checkAuthFromData(self, parsedJSON):
if parsedJSON is None:
return self._checkAuth()
if 'status' in parsedJSON and 'message' in parsedJSON:
if parsedJSON.get('status') == 5:
logger.log(u"Incorrect authentication credentials for " + self.name + " : " + parsedJSON['message'], logger.DEBUG)
raise AuthException("Your authentication credentials for " + self.name + " are incorrect, check your config.")
return True
def _get_season_search_strings(self, show, season):
season_search_string = [self._make_post_data_JSON(show=show, season=season)]
return season_search_string
def _get_episode_search_strings(self, episode):
episode_search_string = [self._make_post_data_JSON(show=episode.show, episode=episode)]
return episode_search_string
def _get_title_and_url(self, item):
title = item['name']
if title:
title = title.replace(' ', '.')
url = self.download_url + urllib.urlencode({'id': item['id'], 'passkey': sickbeard.HDBITS_PASSKEY})
return (title, url)
def _doSearch(self, search_params, show=None):
self._checkAuth()
logger.log(u"Search url: " + self.search_url + " search_params: " + search_params, logger.DEBUG)
data = self.getURL(self.search_url, post_data=search_params)
if not data:
logger.log(u"No data returned from " + self.search_url, logger.ERROR)
return []
parsedJSON = helpers.parse_json(data)
if parsedJSON is None:
logger.log(u"Error trying to load " + self.name + " JSON data", logger.ERROR)
return []
if self._checkAuthFromData(parsedJSON):
results = []
if parsedJSON and 'data' in parsedJSON:
items = parsedJSON['data']
else:
logger.log(u"Resulting JSON from " + self.name + " isn't correct, not parsing it", logger.ERROR)
items = []
for item in items:
results.append(item)
return results
def findPropers(self, search_date=None):
results = []
search_terms = [' proper ', ' repack ']
for term in search_terms:
for item in self._doSearch(self._make_post_data_JSON(search_term=term)):
if item['utadded']:
try:
result_date = datetime.datetime.fromtimestamp(int(item['utadded']))
except:
result_date = None
if result_date:
if not search_date or result_date > search_date:
title, url = self._get_title_and_url(item)
results.append(classes.Proper(title, url, result_date))
return results
def _make_post_data_JSON(self, show=None, episode=None, season=None, search_term=None):
post_data = {
'username': sickbeard.HDBITS_USERNAME,
'passkey': sickbeard.HDBITS_PASSKEY,
'category': [2], # TV Category
}
if episode:
post_data['tvdb'] = {
'id': show.tvdbid,
'season': episode.season,
'episode': episode.episode
}
if season:
post_data['tvdb'] = {
'id': show.tvdbid,
'season': season,
}
if search_term:
post_data['search'] = search_term
return json.dumps(post_data)
class HDBitsCache(tvcache.TVCache):
def __init__(self, provider):
tvcache.TVCache.__init__(self, provider)
# only poll HDBits every 15 minutes max
self.minTime = 15
def updateCache(self):
if not self.shouldUpdate():
return
if self._checkAuth(None):
data = self._getRSSData()
# As long as we got something from the provider we count it as an update
if data:
self.setLastUpdate()
else:
return []
logger.log(u"Clearing " + self.provider.name + " cache and updating with new information")
self._clearCache()
parsedJSON = helpers.parse_json(data)
if parsedJSON is None:
logger.log(u"Error trying to load " + self.provider.name + " JSON feed", logger.ERROR)
return []
if self._checkAuth(parsedJSON):
if parsedJSON and 'data' in parsedJSON:
items = parsedJSON['data']
else:
logger.log(u"Resulting JSON from " + self.provider.name + " isn't correct, not parsing it", logger.ERROR)
return []
for item in items:
self._parseItem(item)
else:
raise exceptions.AuthException("Your authentication info for " + self.provider.name + " is incorrect, check your config")
else:
return []
def _getRSSData(self):
return self.provider.getURL(self.provider.rss_url, post_data=self.provider._make_post_data_JSON())
def _parseItem(self, item):
(title, url) = self.provider._get_title_and_url(item)
if title and url:
logger.log(u"Adding item to results: " + title, logger.DEBUG)
self._addCacheEntry(title, url)
else:
logger.log(u"The data returned from the " + self.provider.name + " is incomplete, this result is unusable", logger.ERROR)
return
def _checkAuth(self, data):
return self.provider._checkAuthFromData(data)
provider = HDBitsProvider()
| gpl-3.0 |
themichaelusa/Trinitum | Trinitum/AsyncManager.py | 1 | 10879 | import time
import asyncio
import rethinkdb as r
class AsyncTaskManager(object):
def __init__(self, dbReference, connection, logger):
self.funcDict = {}
self.connection = connection
self.dbReference = dbReference
self.logger = logger
def setFunctionDict(self, newDict):
self.funcDict = newDict
def pullTableContents(self, tableRef):
contents = tableRef.run(self.connection)
return [data for data in contents]
class AsyncPipelineManager(AsyncTaskManager):
def __init__(self, dbReference, connection, logger):
super().__init__(dbReference, connection, logger)
from .Pipeline import Formatter
from gdax import PublicClient
self.formatter = Formatter()
self.gdaxPublicClient = PublicClient()
self.symbol, self.techInds, self.spotInds, self.spotCustom = (None,)*4
self.indicatorLag = None
from .Constants import DEFAULT_SPOT_DATA_DICT, DEFAULT_CUSTOM_DATA
self.spotData = DEFAULT_SPOT_DATA_DICT
self.customDataFeeds = DEFAULT_CUSTOM_DATA
self.customTables = None
async def updateDefaultFeeds(self):
#### UPDATE SPOT DATA DICT ####
try:
spotData = self.gdaxPublicClient.get_product_24hr_stats(self.symbol)
spotData.pop('volume_30day')
self.spotData = {k:float(v) for k,v in spotData.items()}
except BaseException as e:
self.logger.addEvent('trading', ('UPDATE_SPOT_DATA_ERROR: ' + str(e)))
#### UPDATE REALTIME TA-LIB INDICATORS ####
try:
OHLCV = list(self.spotData.values())[:5]
OHLCV = [float(data) for data in OHLCV]
OHLCV[3], OHLCV[4] = OHLCV[4], OHLCV[3]
for ind in self.techInds:
self.spotInds[ind.tbWrapper.indicator] = ind.getRealtime(OHLCV, self.indicatorLag)
except BaseException as e:
self.logger.addEvent('trading', ('UPDATE_TECH_INDS_ERROR: ' + str(e)))
await asyncio.sleep(0)
async def runCustomDataFeeds(self):
complete = {}
for name, ref, args in zip(self.customDataFeeds.items()):
result = ref(*args)
complete.update({name: result})
self.spotCustom = complete
await asyncio.sleep(0)
async def getPipelineData(self): #read
formattedStratData = self.formatter.formatStratData(self.spotData, self.spotInds)
formattedDataDict = {**formattedStratData, **self.spotCustom}
await asyncio.sleep(0)
if self.customTables is not None:
return formattedDataDict, self.customTables
else:
return formattedDataDict
class AsyncStrategyManager(AsyncTaskManager):
def __init__(self, dbReference, connection, logger):
super().__init__(dbReference, connection, logger)
self.strategy = None
async def tryEntryStrategy(self, tickData, riskData, pCount=0): #execute
tradeResult = self.strategy.tryTradeStrategy(tickData)
if pCount > 2:
riskResult = self.strategy.tryRiskStrategy(riskData)
stratResult = tradeResult & riskResult
if stratResult:
self.logger.addEvent('strategy', 'POSITION ENTRY CONDITIONS VALID')
return stratResult
await asyncio.sleep(0)
return tradeResult
"""
async def tryExitStrategy(self, tickData, targetPos, pCacheSize): #execute
tradeResult = self.strategy.tryTradeStrategy(tickData)
if (tradeResult == -1 and pCacheSize > 0):
self.logger.addEvent('strategy', 'POSITION EXIT CONDITIONS VALID')
await asyncio.sleep(0)
return tradeResult
"""
class AsyncStatisticsManager(AsyncTaskManager):
def __init__(self, dbReference, connection, authData, logger):
super().__init__(dbReference, connection, logger)
from gdax import AuthenticatedClient
self.gdaxAuthClient = AuthenticatedClient(*authData)
from .Constants import DEFAULT_CAPITAL_DICT
self.capitalStats = DEFAULT_CAPITAL_DICT
self.riskStats, self.riskProfile = {'FUNDS': None}, None
self.positionBookRef = self.dbReference.table('PositionBook')
def getReturns(self):
positionBook = self.pullTableContents(self.positionBookRef)
positionBook = positionBook[1:]
return [p.returns for p in positionBook if p.returns != None]
async def updateRiskStatistics(self):
from .Pipeline import getRiskFreeRate
self.riskProfile.updateRiskFreeRate(getRiskFreeRate())
self.riskProfile.updateReturns(self.getReturns())
self.riskStats = self.riskProfile.getAnalytics()
await asyncio.sleep(0)
async def updateCapitalStatistics(self, logCapital=False):
try:
accountData = list(self.gdaxAuthClient.get_accounts())
acctDataUSD = list(filter(lambda x: x['currency'] == "USD", accountData))
availibleCapitalUSD = float(acctDataUSD[0]['available'])
printCapitalValid = bool(logCapital == True)
if(printCapitalValid):
capitalCheck = "Current Capital: " + str(availibleCapitalUSD)
self.logger.addEvent('statistics', capitalCheck)
self.capitalStats['capital'] = availibleCapitalUSD
except BaseException as e:
self.logger.addEvent('trading', ('GDAX_AUTHCLIENT_GET_ACCOUNTS_ERROR: ' + str(e)))
await asyncio.sleep(0)
def getRiskStats(self, availibleFunds, pullRiskParams=False):
self.riskStats['FUNDS'] = availibleFunds
if pullRiskParams:
return self.riskStats, self.riskProfile.parameters
else:
return self.riskStats
def getCapitalStats(self):
return self.capitalStats
def getReturnsCount(self):
return len(self.riskProfile.analyticsObj.returns)
class AsyncTradingManager(AsyncTaskManager):
def __init__(self, dbReference, connection, authData, logger):
super().__init__(dbReference, connection, logger)
self.positionCache = {}
from .Constants import NOT_SET
self.symbol, self.quantity, self.riskParams = (NOT_SET,)*3
from gdax import AuthenticatedClient
self.gdaxAuthClient = AuthenticatedClient(*authData)
def getCacheSize(self):
return len(list(self.positionCache.keys()))
def validPosLimitCheck(self):
return bool(self.positionCache.size() <= self.riskParams['posLimit'])
async def createOrders(self, stratVerdict): #read
entryOrder, validEntryVerdict = 0, (stratVerdict == 1)
if (validEntryVerdict and self.validPosLimitCheck()):
from .Order import Order
entryOrder = Order('ENTRY', 'B', self.symbol, self.quantity)
orderLog = "Created Order for: " + str(self.symbol) + " at size: " + str(self.quantity)
self.logger.addEvent('trading', orderLog)
await asyncio.sleep(0)
return entryOrder
async def verifyAndEnterPosition(self, entryOrder, currentCapital, spotPrice): #read
newPosition, validEntryConditions = None, (entryOrder is not None and currentCapital > 0)
if validEntryConditions:
fundToleranceAvailible = currentCapital*self.riskParams['tolerance'] > self.quantity
if (fundToleranceAvailible and self.validPosLimitCheck()):
response = dict(self.gdaxAuthClient.buy(product_id=self.symbol, type='market', funds=self.quantity))
orderID = response['id']
time.sleep(1)
orderStatus = dict(self.gdaxAuthClient.get_order(orderID))
if (orderStatus['status'] != 'done'):
timedOut = 'TIMED_OUT'
self.gdaxAuthClient.cancel_order(orderID)
entryOrder.setErrorCode(timedOut)
orderTimeOut = 'Order: ' + str(orderID) + ' ' + timedOut
self.logger.addEvent('trading', orderTimeOut)
else:
noErrors = 'NO_ERRORS'
entryOrder.setOrderID(orderID)
entryOrder.setErrorCode(noErrors)
orderFillTime = orderStatus['done_at']
entryValue = float(response['executed_value'])
orderData = (entryOrder.direction, self.symbol, self.quantity, entryValue, orderFillTime)
### CREATE POSITION OBJECT FROM ORDERDATA AND ADD TO POSTION CACHE
from .Position import Position
newPosition = Position(orderID, *orderData)
self.positionCache.update({orderID: newPosition})
positionEntered = noErrors + ', Entered Position: ' + str(orderID)
self.logger.addEvent('trading', positionEntered)
else:
noFunds = "CAPITAL_TOLERANCE_EXCEEDED"
entryOrder.setErrorCode(noFunds)
self.logger.addEvent('trading', noFunds)
await asyncio.sleep(0)
return ([entryOrder], newPosition)
async def exitValidPositions(self, stratVerdict): #read
validExitConditions = stratVerdict == -1 and self.getCacheSize() > 0
completedPositions, exitOrders = [None], [None]
if validExitConditions:
self.logger.addEvent('strategy', 'POSITION EXIT CONDITIONS VALID')
sellResponses, completedPositions, exitOrders, response = [], [], [], None
self.gdaxAuthClient.cancel_all(product=self.symbol)
from .Position import Position
from .Order import Order
for p in self.positionCache.values():
from .Constants import GDAX_FUNDS_ERROR
while (response == GDAX_FUNDS_ERROR or response == None):
response = dict(self.gdaxAuthClient.sell(product_id=self.symbol, type='market', funds=self.quantity))
if (response == GDAX_FUNDS_ERROR):
self.logger.addEvent('trading', 'INVALID_SELL_RESPONSE: GDAX_FUNDS_ERROR')
pArgs = (p['entID'], p['direction'], p['ticker'], p['quantity'], p['entryPrice'], p['entryTime'])
completedPosition = Position(*pArgs)
completedPosition.setExitID(response['id'])
completedPositions.append(completedPosition)
sellResponses.append(response)
exitOrder = Order('EXIT', 'S', self.symbol, self.quantity)
exitOrder.setErrorCode("NO_ERRORS")
exitOrder.setOrderID(response['id'])
exitOrders.append(exitOrder)
response = None
for posit, response in zip(completedPositions, sellResponses):
exitedPos = "Exited Position:" + posit.exID
self.logger.addEvent('trading', exitedPos)
try:
orderStatus = dict(self.gdaxAuthClient.get_order(posit.exID))
orderExitTime = orderStatus['done_at']
posit.setExitParams(float(response['executed_value']), orderExitTime)
except BaseException as e:
self.logger.addEvent('trading', ('GDAX_AUTHCLIENT_GETORDER_ERROR: ' + str(e)))
self.positionCache.clear()
await asyncio.sleep(0)
return (exitOrders, completedPositions)
class AsyncBookManager(AsyncTaskManager):
def __init__(self, dbReference, connection, logger):
super().__init__(dbReference, connection, logger)
self.orderBookRef = self.dbReference.table('OrderBook')
self.posBookRef = self.dbReference.table("PositionBook")
async def addToOrderBook(self, orderObjs): #write
if (orderObjs != [None]):
from .Utilities import getObjectDict
for orderObj in orderObjs:
oDict = getObjectDict(orderObj)
self.orderBookRef.insert(oDict).run(self.connection)
await asyncio.sleep(0)
async def addToPositionBook(self, positions): #write
if (positions != [None]):
from .Utilities import getObjectDict
for position in positions:
pDict = getObjectDict(position)
self.posBookRef.insert(pDict).run(self.connection)
await asyncio.sleep(0)
async def getOrderBook(self): #read
orderBook = self.pullTableContents(self.orderBookRef)
await asyncio.sleep(0)
return orderBook
async def getPositionBook(self): #read
positionBook = self.pullTableContents(self.posBookRef)
await asyncio.sleep(0)
return positionBook
| gpl-3.0 |
riteshshrv/django | tests/http_utils/tests.py | 304 | 2247 | from __future__ import unicode_literals
import gzip
import io
from django.http import HttpRequest, HttpResponse, StreamingHttpResponse
from django.http.utils import conditional_content_removal
from django.test import SimpleTestCase
# based on Python 3.3's gzip.compress
def gzip_compress(data):
buf = io.BytesIO()
f = gzip.GzipFile(fileobj=buf, mode='wb', compresslevel=0)
try:
f.write(data)
finally:
f.close()
return buf.getvalue()
class HttpUtilTests(SimpleTestCase):
def test_conditional_content_removal(self):
"""
Tests that content is removed from regular and streaming responses with
a status_code of 100-199, 204, 304 or a method of "HEAD".
"""
req = HttpRequest()
# Do nothing for 200 responses.
res = HttpResponse('abc')
conditional_content_removal(req, res)
self.assertEqual(res.content, b'abc')
res = StreamingHttpResponse(['abc'])
conditional_content_removal(req, res)
self.assertEqual(b''.join(res), b'abc')
# Strip content for some status codes.
for status_code in (100, 150, 199, 204, 304):
res = HttpResponse('abc', status=status_code)
conditional_content_removal(req, res)
self.assertEqual(res.content, b'')
res = StreamingHttpResponse(['abc'], status=status_code)
conditional_content_removal(req, res)
self.assertEqual(b''.join(res), b'')
# Issue #20472
abc = gzip_compress(b'abc')
res = HttpResponse(abc, status=304)
res['Content-Encoding'] = 'gzip'
conditional_content_removal(req, res)
self.assertEqual(res.content, b'')
res = StreamingHttpResponse([abc], status=304)
res['Content-Encoding'] = 'gzip'
conditional_content_removal(req, res)
self.assertEqual(b''.join(res), b'')
# Strip content for HEAD requests.
req.method = 'HEAD'
res = HttpResponse('abc')
conditional_content_removal(req, res)
self.assertEqual(res.content, b'')
res = StreamingHttpResponse(['abc'])
conditional_content_removal(req, res)
self.assertEqual(b''.join(res), b'')
| bsd-3-clause |
sambitgaan/nupic | examples/opf/experiments/spatial_classification/run_exp_generator.py | 34 | 3817 | #! /usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Run ExpGenerator to generate the description and permutations file for a
spatial classification experiment
"""
import os
import json
from optparse import OptionParser
from nupic.swarming.exp_generator.ExpGenerator import expGenerator
if __name__ == '__main__':
helpString = \
"""%prog [options] searchDef
This script is used to create the description.py and permutations.py files
for an experiment. The searchDef argument should be the name of a python
script with a getSearch() method which returns the search definition as a
dict. The schema for this dict can be found at
py/nupic/swarming/exp_generator/experimentDescriptionSchema.json
"""
# ============================================================================
# Process command line arguments
parser = OptionParser(helpString)
parser.add_option("--verbosity", default=0, type="int",
help="Verbosity level, either 0, 1, 2, or 3 [default: %default].")
parser.add_option("--outDir", dest='outDir', default=None,
help="Where to place generated files. Default is in the same directory"
" as the searchDef script.")
(options, args) = parser.parse_args()
# Must provide the name of a script
if len(args) != 1:
parser.error("Missing required 'searchDef' argument")
searchFileName = args[0]
# ------------------------------------------------------------------------
# Read in the search script and get the search definition
searchFile = open(searchFileName)
vars = {}
exec(searchFile, vars)
searchFile.close()
getSearchFunc = vars.get('getSearch', None)
if getSearchFunc is None:
raise RuntimeError("Error: the %s python script does not provide the "
"required getSearch() method")
searchDef = getSearchFunc(os.path.dirname(__file__))
if not isinstance(searchDef, dict):
raise RuntimeError("The searchDef function should return a dict, but it "
"returned %s" % (str(searchDef)))
# ------------------------------------------------------------------------
# Figure out the output directory if not provided
if options.outDir is None:
options.outDir = os.path.dirname(searchFileName)
# ------------------------------------------------------------------------
# Run through expGenerator
expGenArgs = ['--description=%s' % (json.dumps(searchDef)),
'--version=v2',
'--outDir=%s' % (options.outDir)]
print "Running ExpGenerator with the following arguments: ", expGenArgs
expGenerator(expGenArgs)
# Get the permutations file name
permutationsFilename = os.path.join(options.outDir, 'permutations.py')
print "Successfully generated permutations file: %s" % (permutationsFilename)
| agpl-3.0 |
ToontownUprising/src | toontown/effects/Splash.py | 5 | 2673 | from pandac.PandaModules import *
from direct.interval.IntervalGlobal import *
from Ripples import *
from toontown.battle.BattleProps import globalPropPool
from toontown.battle import BattleParticles
class Splash(NodePath):
splashCount = 0
def __init__(self, parent = hidden, wantParticles = 1):
NodePath.__init__(self, parent)
self.assign(parent.attachNewNode('splash'))
self.splashdown = globalPropPool.getProp('splashdown')
self.splashdown.reparentTo(self)
self.splashdown.setZ(-0.01)
self.splashdown.setScale(0.4)
ta = TransparencyAttrib.make(TransparencyAttrib.MBinary)
self.splashdown.node().setAttrib(ta, 1)
self.splashdown.setBin('fixed', 130, 1)
self.ripples = Ripples(self)
self.ripples.setBin('fixed', 120, 1)
self.wantParticles = 1
if self.wantParticles:
self.pSystem = BattleParticles.createParticleEffect('SplashLines')
self.pSystem.setScale(0.4)
self.pSystem.setBin('fixed', 150, 1)
self.particles = self.pSystem.particlesDict.get('particles-1')
self.track = None
self.trackId = Splash.splashCount
Splash.splashCount += 1
self.setBin('fixed', 100, 1)
self.hide()
return
def createTrack(self, rate = 1):
self.ripples.createTrack(rate)
self.splashdown.setPlayRate(rate, 'splashdown')
animDuration = self.splashdown.getDuration('splashdown') * 0.65
rippleSequence = Sequence(Func(self.splashdown.show), Func(self.splashdown.play, 'splashdown'), Wait(animDuration), Func(self.splashdown.hide))
if self.wantParticles:
particleSequence = Sequence(Func(self.pSystem.show), Func(self.particles.induceLabor), Func(self.pSystem.start, self), Wait(2.2), Func(self.pSystem.hide), Func(self.pSystem.disable))
else:
particleSequence = Sequence()
self.track = Sequence(Func(self.show), Parallel(self.ripples.track, rippleSequence, particleSequence), Func(self.hide), name='splashdown-%d-track' % self.trackId)
def play(self, rate = 1):
self.stop()
self.createTrack(rate)
self.track.start()
def loop(self, rate = 1):
self.stop()
self.createTrack(rate)
self.track.loop()
def stop(self):
if self.track:
self.track.finish()
def destroy(self):
self.stop()
del self.track
self.ripples.destroy()
del self.ripples
if self.wantParticles:
self.pSystem.cleanup()
del self.pSystem
del self.particles
self.removeNode()
| mit |
yrizk/yrizk.github.io | myvenv/lib/python3.4/site-packages/pip/_vendor/requests/packages/urllib3/_collections.py | 172 | 6278 | from collections import Mapping, MutableMapping
try:
from threading import RLock
except ImportError: # Platform-specific: No threads available
class RLock:
def __enter__(self):
pass
def __exit__(self, exc_type, exc_value, traceback):
pass
try: # Python 2.7+
from collections import OrderedDict
except ImportError:
from .packages.ordered_dict import OrderedDict
from .packages.six import iterkeys, itervalues
__all__ = ['RecentlyUsedContainer', 'HTTPHeaderDict']
_Null = object()
class RecentlyUsedContainer(MutableMapping):
"""
Provides a thread-safe dict-like container which maintains up to
``maxsize`` keys while throwing away the least-recently-used keys beyond
``maxsize``.
:param maxsize:
Maximum number of recent elements to retain.
:param dispose_func:
Every time an item is evicted from the container,
``dispose_func(value)`` is called. Callback which will get called
"""
ContainerCls = OrderedDict
def __init__(self, maxsize=10, dispose_func=None):
self._maxsize = maxsize
self.dispose_func = dispose_func
self._container = self.ContainerCls()
self.lock = RLock()
def __getitem__(self, key):
# Re-insert the item, moving it to the end of the eviction line.
with self.lock:
item = self._container.pop(key)
self._container[key] = item
return item
def __setitem__(self, key, value):
evicted_value = _Null
with self.lock:
# Possibly evict the existing value of 'key'
evicted_value = self._container.get(key, _Null)
self._container[key] = value
# If we didn't evict an existing value, we might have to evict the
# least recently used item from the beginning of the container.
if len(self._container) > self._maxsize:
_key, evicted_value = self._container.popitem(last=False)
if self.dispose_func and evicted_value is not _Null:
self.dispose_func(evicted_value)
def __delitem__(self, key):
with self.lock:
value = self._container.pop(key)
if self.dispose_func:
self.dispose_func(value)
def __len__(self):
with self.lock:
return len(self._container)
def __iter__(self):
raise NotImplementedError('Iteration over this class is unlikely to be threadsafe.')
def clear(self):
with self.lock:
# Copy pointers to all values, then wipe the mapping
values = list(itervalues(self._container))
self._container.clear()
if self.dispose_func:
for value in values:
self.dispose_func(value)
def keys(self):
with self.lock:
return list(iterkeys(self._container))
class HTTPHeaderDict(MutableMapping):
"""
:param headers:
An iterable of field-value pairs. Must not contain multiple field names
when compared case-insensitively.
:param kwargs:
Additional field-value pairs to pass in to ``dict.update``.
A ``dict`` like container for storing HTTP Headers.
Field names are stored and compared case-insensitively in compliance with
RFC 7230. Iteration provides the first case-sensitive key seen for each
case-insensitive pair.
Using ``__setitem__`` syntax overwrites fields that compare equal
case-insensitively in order to maintain ``dict``'s api. For fields that
compare equal, instead create a new ``HTTPHeaderDict`` and use ``.add``
in a loop.
If multiple fields that are equal case-insensitively are passed to the
constructor or ``.update``, the behavior is undefined and some will be
lost.
>>> headers = HTTPHeaderDict()
>>> headers.add('Set-Cookie', 'foo=bar')
>>> headers.add('set-cookie', 'baz=quxx')
>>> headers['content-length'] = '7'
>>> headers['SET-cookie']
'foo=bar, baz=quxx'
>>> headers['Content-Length']
'7'
If you want to access the raw headers with their original casing
for debugging purposes you can access the private ``._data`` attribute
which is a normal python ``dict`` that maps the case-insensitive key to a
list of tuples stored as (case-sensitive-original-name, value). Using the
structure from above as our example:
>>> headers._data
{'set-cookie': [('Set-Cookie', 'foo=bar'), ('set-cookie', 'baz=quxx')],
'content-length': [('content-length', '7')]}
"""
def __init__(self, headers=None, **kwargs):
self._data = {}
if headers is None:
headers = {}
self.update(headers, **kwargs)
def add(self, key, value):
"""Adds a (name, value) pair, doesn't overwrite the value if it already
exists.
>>> headers = HTTPHeaderDict(foo='bar')
>>> headers.add('Foo', 'baz')
>>> headers['foo']
'bar, baz'
"""
self._data.setdefault(key.lower(), []).append((key, value))
def getlist(self, key):
"""Returns a list of all the values for the named field. Returns an
empty list if the key doesn't exist."""
return self[key].split(', ') if key in self else []
def copy(self):
h = HTTPHeaderDict()
for key in self._data:
for rawkey, value in self._data[key]:
h.add(rawkey, value)
return h
def __eq__(self, other):
if not isinstance(other, Mapping):
return False
other = HTTPHeaderDict(other)
return dict((k1, self[k1]) for k1 in self._data) == \
dict((k2, other[k2]) for k2 in other._data)
def __getitem__(self, key):
values = self._data[key.lower()]
return ', '.join(value[1] for value in values)
def __setitem__(self, key, value):
self._data[key.lower()] = [(key, value)]
def __delitem__(self, key):
del self._data[key.lower()]
def __len__(self):
return len(self._data)
def __iter__(self):
for headers in itervalues(self._data):
yield headers[0][0]
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, dict(self.items()))
| apache-2.0 |
mozilla/mwc | apps/firefox/tests.py | 1 | 10461 | # -*- coding: utf-8 -*-
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import json
import os
from urlparse import parse_qs, urlparse
from django.conf import settings
from django.test.client import Client
from django.utils import unittest
from funfactory.urlresolvers import reverse
from mock import ANY, Mock, patch
from mozorg.tests import TestCase
from nose.tools import eq_, ok_
from platforms import load_devices
from pyquery import PyQuery as pq
from firefox import views as fx_views
from firefox.firefox_details import FirefoxDetails
from firefox.views import product_details
TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), 'test_data')
PROD_DETAILS_DIR = os.path.join(TEST_DATA_DIR, 'product_details_json')
with patch.object(settings, 'PROD_DETAILS_DIR', PROD_DETAILS_DIR):
firefox_details = FirefoxDetails()
@patch.object(fx_views, 'firefox_details', firefox_details)
class TestFirefoxDetails(TestCase):
def test_get_download_url(self):
url = firefox_details.get_download_url('OS X', 'pt-BR', '17.0')
self.assertDictEqual(parse_qs(urlparse(url).query),
{'lang': ['pt-BR'],
'os': ['osx'],
'product': ['firefox-17.0']})
def test_filter_builds_by_locale_name(self):
# search english
builds = firefox_details.get_filtered_full_builds(
firefox_details.latest_version('release'),
'ujara'
)
eq_(len(builds), 1)
eq_(builds[0]['name_en'], 'Gujarati')
# search native
builds = firefox_details.get_filtered_full_builds(
firefox_details.latest_version('release'),
u'જરા'
)
eq_(len(builds), 1)
eq_(builds[0]['name_en'], 'Gujarati')
@patch.object(fx_views, 'firefox_details', firefox_details)
class TestFirefoxAll(TestCase):
def setUp(self):
self.client = Client()
with self.activate('en-US'):
self.url = reverse('firefox.all')
def test_no_search_results(self):
"""
Tables should be gone and not-found message should be shown when there
are no search results.
"""
resp = self.client.get(self.url + '?q=DOES_NOT_EXIST')
doc = pq(resp.content)
ok_(not doc('table.build-table'))
ok_(not doc('.not-found.hide'))
def test_no_search_query(self):
"""
When not searching all builds should show.
"""
resp = self.client.get(self.url)
doc = pq(resp.content)
eq_(len(doc('.build-table')), 2)
eq_(len(doc('.not-found.hide')), 2)
release = firefox_details.latest_version('release')
num_builds = len(firefox_details.get_filtered_full_builds(release))
num_builds += len(firefox_details.get_filtered_test_builds(release))
eq_(len(doc('tr[data-search]')), num_builds)
class TestFirefoxPartners(TestCase):
def setUp(self):
self.client = Client()
@patch('firefox.views.settings.DEBUG', True)
def test_js_bundle_files_debug_true(self):
"""
When DEBUG is on the bundle should return the individual files
with the MEDIA_URL.
"""
bundle = 'partners_desktop'
files = settings.MINIFY_BUNDLES['js'][bundle]
files = [settings.MEDIA_URL + f for f in files]
self.assertEqual(files,
json.loads(fx_views.get_js_bundle_files(bundle)))
@patch('firefox.views.settings.DEBUG', False)
def test_js_bundle_files_debug_false(self):
"""
When DEBUG is off the bundle should return a single minified filename.
"""
bundle = 'partners_desktop'
filename = '%sjs/%s-min.js?build=' % (settings.MEDIA_URL, bundle)
bundle_file = json.loads(fx_views.get_js_bundle_files(bundle))
self.assertEqual(len(bundle_file), 1)
self.assertTrue(bundle_file[0].startswith(filename))
@patch('firefox.views.requests.post')
def test_sf_form_proxy_error_response(self, post_patch):
"""An error response from SF should be returned."""
new_mock = Mock()
new_mock.status_code = 400
post_patch.return_value = new_mock
with self.activate('en-US'):
url = reverse('firefox.partners.contact-bizdev')
resp = self.client.post(url)
self.assertEqual(resp.status_code, 400)
self.assertEqual(resp.content, 'bad_request')
self.assertTrue(post_patch.called)
@patch('firefox.views.requests.post')
def test_sf_form_proxy_invalid_form(self, post_patch):
"""A form error should result in a 400 response."""
with self.activate('en-US'):
url = reverse('firefox.partners.contact-bizdev')
resp = self.client.post(url, {
'first_name': 'Dude' * 20,
})
self.assertEqual(resp.status_code, 400)
self.assertEqual(resp.content, 'Form invalid')
self.assertFalse(post_patch.called)
@patch('firefox.views.requests.post')
def test_sf_form_proxy(self, post_patch):
new_mock = Mock()
new_mock.status_code = 200
post_patch.return_value = new_mock
with self.activate('en-US'):
url = reverse('firefox.partners.contact-bizdev')
resp = self.client.post(url, {
'first_name': 'The',
'last_name': 'Dude',
'title': 'Abider of things',
})
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.content, 'ok')
post_patch.assert_called_once_with(ANY, {
'first_name': u'The',
'last_name': u'Dude',
'description': u'',
'retURL': 'http://www.mozilla.org/en-US/about/'
'partnerships?success=1',
'title': u'Abider of things',
'URL': u'',
'company': u'',
'oid': '00DU0000000IrgO',
'phone': u'',
'mobile': u'',
'00NU0000002pDJr': [],
'email': u'',
})
class TestLoadDevices(unittest.TestCase):
def file(self):
# where should the test file go?
return 'TODO'
@unittest.skip('Please to write test')
def test_load_devices(self):
load_devices(self, self.file(), cacheDevices=False)
#todo
@patch.object(fx_views, 'firefox_details', firefox_details)
class FxVersionRedirectsMixin(object):
def test_non_firefox(self):
user_agent = 'random'
response = self.client.get(self.url, HTTP_USER_AGENT=user_agent)
eq_(response.status_code, 301)
eq_(response['Vary'], 'User-Agent')
eq_(response['Location'],
'http://testserver%s' % reverse('firefox.new'))
def test_bad_firefox(self):
user_agent = 'Mozilla/5.0 (SaferSurf) Firefox 1.5'
response = self.client.get(self.url, HTTP_USER_AGENT=user_agent)
eq_(response.status_code, 301)
eq_(response['Vary'], 'User-Agent')
eq_(response['Location'],
'http://testserver%s' % reverse('firefox.update'))
@patch.dict(product_details.firefox_versions,
LATEST_FIREFOX_VERSION='14.0')
def test_old_firefox(self):
user_agent = ('Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:13.0) '
'Gecko/20100101 Firefox/13.0')
response = self.client.get(self.url, HTTP_USER_AGENT=user_agent)
eq_(response.status_code, 301)
eq_(response['Vary'], 'User-Agent')
eq_(response['Location'],
'http://testserver%s' % reverse('firefox.update'))
@patch.dict(product_details.firefox_versions,
LATEST_FIREFOX_VERSION='13.0.5')
def test_current_minor_version_firefox(self):
"""
Should show current even if behind by a patch version
"""
user_agent = ('Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:13.0) '
'Gecko/20100101 Firefox/13.0')
response = self.client.get(self.url, HTTP_USER_AGENT=user_agent)
eq_(response.status_code, 200)
eq_(response['Vary'], 'User-Agent')
@patch.dict(product_details.firefox_versions,
LATEST_FIREFOX_VERSION='18.0')
def test_esr_firefox(self):
"""
Currently released ESR firefoxen should not redirect. At present
they are 10.0.x and 17.0.x.
"""
user_agent = ('Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:10.0.12) '
'Gecko/20111101 Firefox/10.0.12')
response = self.client.get(self.url, HTTP_USER_AGENT=user_agent)
eq_(response.status_code, 200)
eq_(response['Vary'], 'User-Agent')
user_agent = ('Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:17.0) '
'Gecko/20100101 Firefox/17.0')
response = self.client.get(self.url, HTTP_USER_AGENT=user_agent)
eq_(response.status_code, 200)
eq_(response['Vary'], 'User-Agent')
@patch.dict(product_details.firefox_versions,
LATEST_FIREFOX_VERSION='16.0')
def test_current_firefox(self):
user_agent = ('Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:16.0) '
'Gecko/20100101 Firefox/16.0')
response = self.client.get(self.url, HTTP_USER_AGENT=user_agent)
eq_(response.status_code, 200)
eq_(response['Vary'], 'User-Agent')
@patch.dict(product_details.firefox_versions,
LATEST_FIREFOX_VERSION='16.0')
def test_future_firefox(self):
user_agent = ('Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:18.0) '
'Gecko/20100101 Firefox/18.0')
response = self.client.get(self.url, HTTP_USER_AGENT=user_agent)
eq_(response.status_code, 200)
eq_(response['Vary'], 'User-Agent')
class TestWhatsnewRedirect(FxVersionRedirectsMixin, TestCase):
def setUp(self):
self.client = Client()
with self.activate('en-US'):
self.url = reverse('firefox.whatsnew', args=['13.0'])
class TestFirstrunRedirect(FxVersionRedirectsMixin, TestCase):
def setUp(self):
self.client = Client()
with self.activate('en-US'):
self.url = reverse('firefox.firstrun', args=['13.0'])
| mpl-2.0 |
kelvin13/shifty-octocat | pygments/lexers/chapel.py | 19 | 3458 | # -*- coding: utf-8 -*-
"""
pygments.lexers.chapel
~~~~~~~~~~~~~~~~~~~~~~
Lexer for the Chapel language.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, bygroups, words
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation
__all__ = ['ChapelLexer']
class ChapelLexer(RegexLexer):
"""
For `Chapel <http://chapel.cray.com/>`_ source.
.. versionadded:: 2.0
"""
name = 'Chapel'
filenames = ['*.chpl']
aliases = ['chapel', 'chpl']
# mimetypes = ['text/x-chapel']
tokens = {
'root': [
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text),
(r'//(.*?)\n', Comment.Single),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
(r'(config|const|in|inout|out|param|ref|type|var)\b',
Keyword.Declaration),
(r'(false|nil|true)\b', Keyword.Constant),
(r'(bool|complex|imag|int|opaque|range|real|string|uint)\b',
Keyword.Type),
(words((
'align', 'atomic', 'begin', 'break', 'by', 'cobegin', 'coforall',
'continue', 'delete', 'dmapped', 'do', 'domain', 'else', 'enum',
'except', 'export', 'extern', 'for', 'forall', 'if', 'index',
'inline', 'iter', 'label', 'lambda', 'let', 'local', 'new',
'noinit', 'on', 'only', 'otherwise', 'pragma', 'private',
'public', 'reduce', 'require', 'return', 'scan', 'select',
'serial', 'single', 'sparse', 'subdomain', 'sync', 'then',
'use', 'when', 'where', 'while', 'with', 'yield', 'zip'),
suffix=r'\b'),
Keyword),
(r'(proc)((?:\s|\\\s)+)', bygroups(Keyword, Text), 'procname'),
(r'(class|module|record|union)(\s+)', bygroups(Keyword, Text),
'classname'),
# imaginary integers
(r'\d+i', Number),
(r'\d+\.\d*([Ee][-+]\d+)?i', Number),
(r'\.\d+([Ee][-+]\d+)?i', Number),
(r'\d+[Ee][-+]\d+i', Number),
# reals cannot end with a period due to lexical ambiguity with
# .. operator. See reference for rationale.
(r'(\d*\.\d+)([eE][+-]?[0-9]+)?i?', Number.Float),
(r'\d+[eE][+-]?[0-9]+i?', Number.Float),
# integer literals
# -- binary
(r'0[bB][01]+', Number.Bin),
# -- hex
(r'0[xX][0-9a-fA-F]+', Number.Hex),
# -- octal
(r'0[oO][0-7]+', Number.Oct),
# -- decimal
(r'[0-9]+', Number.Integer),
# strings
(r'"(\\\\|\\"|[^"])*"', String),
(r"'(\\\\|\\'|[^'])*'", String),
# tokens
(r'(=|\+=|-=|\*=|/=|\*\*=|%=|&=|\|=|\^=|&&=|\|\|=|<<=|>>=|'
r'<=>|<~>|\.\.|by|#|\.\.\.|'
r'&&|\|\||!|&|\||\^|~|<<|>>|'
r'==|!=|<=|>=|<|>|'
r'[+\-*/%]|\*\*)', Operator),
(r'[:;,.?()\[\]{}]', Punctuation),
# identifiers
(r'[a-zA-Z_][\w$]*', Name.Other),
],
'classname': [
(r'[a-zA-Z_][\w$]*', Name.Class, '#pop'),
],
'procname': [
(r'[a-zA-Z_][\w$]*', Name.Function, '#pop'),
],
}
| gpl-3.0 |
satvikdhandhania/vit-11 | build/lib.linux-x86_64-2.7/moca/mrs/legacy_api.py | 3 | 14520 | from __future__ import absolute_import
try:
import json
except ImportError, e:
import simplejson as json
import logging
import urllib2
import telnetlib
import urllib
from django.conf import settings
from django.template import loader
from django.http import HttpResponse
from django import forms
from django.core.mail import send_mail
from django.shortcuts import render_to_response
from util import enable_logging
from moca.mrs import openmrs, patient, sms
from moca.mrs.api import register_saved_procedure, register_binary, register_binary_chunk
from moca.mrs.models import Notification
def render_json_template(*args, **kwargs):
"""Renders a JSON template, and then calls render_json_response()."""
data = loader.render_to_string(*args, **kwargs)
return render_json_response(data)
def render_json_response(data):
"""Sends an HttpResponse with the X-JSON header and the right mimetype."""
resp = HttpResponse(data, mimetype=("application/json; charset=" +
settings.DEFAULT_CHARSET))
resp['X-JSON'] = data
return resp
def json_fail(message):
response = {
'status': 'FAILURE',
'data': message,
}
return json.dumps(response)
def json_succeed(data):
response = {
'status': 'SUCCESS',
'data': data,
}
return json.dumps(response)
def validate_credentials(request):
try:
username = request.REQUEST.get("username", None)
password = request.REQUEST.get("password", None)
logging.info("username: " + username)
logging.info("pasword: " + password)
response = ''
omrs = openmrs.OpenMRS(username,password,
settings.OPENMRS_SERVER_URL)
if omrs.validate_credentials(username, password):
response = json_succeed("username and password validated!")
else:
response = json_fail("username and password combination incorrect!")
return render_json_response(response)
except Exception, e:
logging.error( "Exception in validate_credentials: " + str(e))
class ProcedureSubmitForm(forms.Form):
username = forms.CharField(required=True, max_length=256)
password = forms.CharField(required=True, max_length=256)
savedproc_guid = forms.CharField(required=True, max_length=512)
procedure_guid = forms.CharField(required=True, max_length=512)
responses = forms.CharField(required=True)
phone = forms.CharField(max_length=255)
@enable_logging
def procedure_submit(request):
try:
logging.info("Received saved procedure submission.")
for key,value in request.REQUEST.items():
print key,value
# if request.method != 'POST':
# return HttpResponse('get')
form = ProcedureSubmitForm(request.REQUEST)
response = ''
if form.is_valid():
result, message = register_saved_procedure(form.cleaned_data['savedproc_guid'],
form.cleaned_data['procedure_guid'],
form.cleaned_data['responses'],
form.cleaned_data['phone'],
form.cleaned_data['username'],
form.cleaned_data['password'])
if result:
response = json_succeed("Successfully saved the procedure: %s" % message)
logging.info("Saved procedure successfully registerd.")
else:
response = json_fail(message)
logging.error("Failed to register procedure: %s" % message)
else:
logging.error("Saved procedure submission was invalid, dumping REQUEST.")
for k,v in request.REQUEST.items():
logging.error("SavedProcedure argument %s:%s" % (k,v))
response = json_fail("Could not parse submission : missing parts or invalid data?")
except Exception, e:
error = "Exception : %s" % e
logging.error(error)
response = json_fail(error)
return render_json_response(response)
class BinaryChunkSubmitForm(forms.Form):
procedure_guid = forms.CharField(required=True, max_length=512)
element_id = forms.CharField(required=True)
element_type = forms.CharField(required=True)
binary_guid = forms.CharField(required=True)
file_size = forms.IntegerField(required=True)
byte_start = forms.IntegerField(required=True)
byte_end = forms.IntegerField(required=True)
#byte_data = forms.CharField(required=True)
byte_data = forms.FileField(required=True)
done = forms.BooleanField(initial=False, required=False)
@enable_logging
def binarychunk_submit(request):
"""Processes an individual chunk of binary data uploaded."""
response = ''
form = BinaryChunkSubmitForm(request.POST, request.FILES)
if form.is_valid():
logging.info("Received valid binarychunk form")
procedure_guid = form.cleaned_data['procedure_guid']
element_id = form.cleaned_data['element_id']
element_type = form.cleaned_data['element_type']
binary_guid = form.cleaned_data['binary_guid']
file_size = form.cleaned_data['file_size']
byte_start = form.cleaned_data['byte_start']
byte_end = form.cleaned_data['byte_end']
byte_data = form.cleaned_data['byte_data']
try:
result, message = register_binary_chunk(procedure_guid,
element_id,
element_type,
binary_guid,
file_size,
byte_start,
byte_end,
byte_data.chunks())
if result:
response = json_succeed("Successfully saved the binary chunk: %s" % message)
else:
response = json_fail("Failed to save the binary chunk: %s" % message)
except Exception, e:
logging.error("registering binary chunk failed: %s" % e)
response = json_fail("Registering binary chunk failed: %s" % e)
logging.info("Finished processing binarychunk form")
else:
logging.error("Received invalid binarychunk form")
for k,v in request.REQUEST.items():
if k == 'byte_data':
logging.debug("%s:(binary length %d)" % (k,len(v)))
else:
logging.debug("%s:%s" % (k,v))
response = json_fail("Could not parse submission. Missing parts?")
return render_json_response(response)
class BinaryChunkHackSubmitForm(forms.Form):
procedure_guid = forms.CharField(required=True, max_length=512)
element_id = forms.CharField(required=True)
element_type = forms.CharField(required=True)
binary_guid = forms.CharField(required=True)
file_size = forms.IntegerField(required=True)
byte_start = forms.IntegerField(required=True)
byte_end = forms.IntegerField(required=True)
byte_data = forms.CharField(required=True)
#byte_data = forms.FileField(required=True)
done = forms.BooleanField(initial=False, required=False)
@enable_logging
def binarychunk_hack_submit(request):
"""Processes an individual chunk of binary data uploaded."""
response = ''
form = BinaryChunkHackSubmitForm(request.POST, request.FILES)
if form.is_valid():
logging.info("Received valid binarychunk-hack form")
procedure_guid = form.cleaned_data['procedure_guid']
element_id = form.cleaned_data['element_id']
element_type = form.cleaned_data['element_type']
binary_guid = form.cleaned_data['binary_guid']
file_size = form.cleaned_data['file_size']
byte_start = form.cleaned_data['byte_start']
byte_end = form.cleaned_data['byte_end']
byte_data = form.cleaned_data['byte_data']
# This hack submits byte_data as base64 encoded, so decode it.
byte_data = byte_data.decode('base64')
try:
result, message = register_binary_chunk(procedure_guid,
element_id,
element_type,
binary_guid,
file_size,
byte_start,
byte_end,
[byte_data,])
if result:
response = json_succeed("Successfully saved the binary chunk: %s" % message)
else:
response = json_fail("Failed to save the binary chunk: %s" % message)
except Exception, e:
logging.error("registering binary chunk failed: %s" % e)
response = json_fail("Registering binary chunk failed: %s" % e)
logging.info("Finished processing binarychunk form")
else:
logging.error("Received invalid binarychunk form")
for k,v in request.REQUEST.items():
if k == 'byte_data':
logging.debug("%s:(binary length %d)" % (k,len(v)))
else:
logging.debug("%s:%s" % (k,v))
response = json_fail("Could not parse submission. Missing parts?")
logging.info("Sending response %s" % response)
return render_json_response(response)
class BinarySubmitForm(forms.Form):
procedure_guid = forms.CharField(required=True, max_length=512)
element_id = forms.CharField(required=True)
#data = forms.FileField(required=True)
@enable_logging
def binary_submit(request):
response = ''
form = BinarySubmitForm(request.REQUEST)
data = request.FILES.get('data',None)
if form.is_valid() and data:
logging.info("Received a valid Binary submission form")
element_id = form.cleaned_data['element_id']
procedure_guid = form.cleaned_data['procedure_guid']
register_binary(procedure_guid, element_id, data)
response = json_succeed("Successfully saved the binary")
logging.info("Done processing Binary submission form")
else:
logging.info("Received an invalid Binary submission form")
response = json_fail("Could not parse submission. Missing parts?")
return render_json_response(response)
class OpenMRSQueryForm(forms.Form):
username = forms.CharField(required=True, max_length=256)
password = forms.CharField(required=True, max_length=256)
@enable_logging
def patient_list(request):
logging.info("entering patient list proc")
username = request.REQUEST.get("username", None)
password = request.REQUEST.get("password", None)
omrs = openmrs.OpenMRS(username,password,
settings.OPENMRS_SERVER_URL)
try:
patients_xml = omrs.get_all_patients()
data = patient.parse_patient_list_xml(patients_xml)
logging.info("we finished getting the patient list")
response = json_succeed(data)
except Exception, e:
logging.error("Got exception while fetching patient list: %s" % e)
response = json_fail("Problem while getting patient list: %s" % e)
return render_json_response(response)
@enable_logging
def patient_get(request, id):
logging.info("entering patient get proc")
username = request.REQUEST.get("username", None)
password = request.REQUEST.get("password", None)
omrs = openmrs.OpenMRS(username,password,
settings.OPENMRS_SERVER_URL)
logging.info("About to getPatient")
try:
patient_xml = omrs.get_patient(id)
data = patient.parse_patient_xml(patient_xml)
response = json_succeed(data)
except Exception, e:
logging.error("Got error %s" % e)
response = json_fail("couldn't get patient")
logging.info("finished patient_get")
return render_json_response(response)
@enable_logging
def notification_submit(request):
phoneId = request.REQUEST.get("phoneIdentifier", None)
text = request.REQUEST.get("notificationText", None)
caseIdentifier = request.REQUEST.get("caseIdentifier", None)
patientIdentifier = request.REQUEST.get("patientIdentifier", None)
delivered = False
logging.info("Notification submit received")
for key,value in request.REQUEST.items():
logging.info("Notification submit %s:%s" % (key,value))
response = json_fail('Failed to register notification.')
if phoneId and text and caseIdentifier and patientIdentifier:
n = Notification(client=phoneId,
patient_id=patientIdentifier,
message=text,
procedure_id=caseIdentifier,
delivered=delivered)
n.save()
response = json_succeed('Successfully registered notification.')
try:
sms.send_sms_notification(n)
except Exception, e:
logging.error("Got error while trying to send notification: %s" % e)
return render_json_response(response)
def email_notification_submit(request):
addresses = request.REQUEST.get("emailAddresses",None)
caseIdentifier = request.REQUEST.get("caseIdentifier", None)
patientId = request.REQUEST.get("patientIdentifier", None)
subject = request.REQUEST.get("subject", "")
message = request.REQUEST.get("notificationText", "")
logging.info("Email notification submit received")
for key,value in request.REQUEST.items():
logging.info("Notification submit %s:%s" % (key,value))
response = json_fail('Failed to register email notification.')
try:
emailAddresses = json.loads(addresses)
except Exception, e:
response = json_fail('Got error when trying to parse email addresses.')
if addresses and caseIdentifier and patientId:
try:
send_mail(subject, message, 'moca-specialist@mit.edu',
emailAddresses, fail_silently=False)
response = json_succeed('Successfully registered email notification')
except Exception, e:
logging.error('Email could not be sent: %s' % e)
return render_json_response(response)
| bsd-3-clause |
danmergens/mi-instrument | mi/dataset/driver/nutnr_b/dcl_full/nutnr_b_dcl_full_recovered_driver.py | 7 | 1342 | #!/usr/local/bin/python2.7
##
# OOIPLACEHOLDER
#
# Copyright 2014 Raytheon Co.
##
__author__ = 'mworden'
import os
from mi.logging import config
from mi.core.log import get_logger
from mi.dataset.dataset_parser import DataSetDriverConfigKeys
from mi.dataset.dataset_driver import DataSetDriver
from mi.dataset.parser.nutnr_b_dcl_full import NutnrBDclFullRecoveredParser
from mi.core.versioning import version
log = get_logger()
@version("15.7.1")
def parse(unused, source_file_path, particle_data_handler):
parser_config = {
DataSetDriverConfigKeys.PARTICLE_MODULE: 'mi.dataset.parser.nutnr_b_particles',
DataSetDriverConfigKeys.PARTICLE_CLASS: None
}
def exception_callback(exception):
log.debug("ERROR: %r", exception)
particle_data_handler.setParticleDataCaptureFailure()
with open(source_file_path, 'r') as stream_handle:
parser = NutnrBDclFullRecoveredParser(parser_config,
stream_handle,
lambda state, ingested: None,
lambda data: None,
exception_callback)
driver = DataSetDriver(parser, particle_data_handler)
driver.processFileStream()
return particle_data_handler
| bsd-2-clause |
gangliao/Paddle | demo/quick_start/trainer_config.resnet-lstm.py | 13 | 3628 | # edit-mode: -*- python -*-
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This configuration is a demonstration of how to implement the stacked LSTM
with residual connections, i.e. an LSTM layer takes the sum of the hidden states
and inputs of the previous LSTM layer instead of only the hidden states.
This architecture is from:
Yonghui Wu, Mike Schuster, Zhifeng Chen, Quoc V. Le, Mohammad Norouzi,
Wolfgang Macherey, Maxim Krikun, Yuan Cao, Qin Gao, Klaus Macherey,
Jeff Klingner, Apurva Shah, Melvin Johnson, Xiaobing Liu, Lukasz Kaiser,
Stephan Gouws, Yoshikiyo Kato, Taku Kudo, Hideto Kazawa, Keith Stevens,
George Kurian, Nishant Patil, Wei Wang, Cliff Young, Jason Smith, Jason Riesa,
Alex Rudnick, Oriol Vinyals, Greg Corrado, Macduff Hughes, Jeffrey Dean. 2016.
Google's Neural Machine Translation System: Bridging the Gap between Human and
Machine Translation. In arXiv https://arxiv.org/pdf/1609.08144v2.pdf
Different from the architecture described in the paper, we use a stack single
direction LSTM layers as the first layer instead of bi-directional LSTM. Also,
since this is a demo code, to reduce computation time, we stacked 4 layers
instead of 8 layers.
"""
from paddle.trainer_config_helpers import *
dict_file = "./data/dict.txt"
word_dict = dict()
with open(dict_file, 'r') as f:
for i, line in enumerate(f):
w = line.strip().split()[0]
word_dict[w] = i
is_predict = get_config_arg('is_predict', bool, False)
trn = 'data/train.list' if not is_predict else None
tst = 'data/test.list' if not is_predict else 'data/pred.list'
process = 'process' if not is_predict else 'process_predict'
define_py_data_sources2(
train_list=trn,
test_list=tst,
module="dataprovider_emb",
obj=process,
args={"dictionary": word_dict})
batch_size = 128 if not is_predict else 1
settings(
batch_size=batch_size,
learning_rate=2e-3,
learning_method=AdamOptimizer(),
regularization=L2Regularization(8e-4),
gradient_clipping_threshold=25)
bias_attr = ParamAttr(initial_std=0., l2_rate=0.)
data = data_layer(name="word", size=len(word_dict))
emb = embedding_layer(input=data, size=128)
lstm = simple_lstm(input=emb, size=128, lstm_cell_attr=ExtraAttr(drop_rate=0.1))
previous_input, previous_hidden_state = emb, lstm
for i in range(3):
# The input to the current layer is the sum of the hidden state
# and input of the previous layer.
current_input = addto_layer(input=[previous_input, previous_hidden_state])
hidden_state = simple_lstm(
input=current_input, size=128, lstm_cell_attr=ExtraAttr(drop_rate=0.1))
previous_input, previous_hidden_state = current_input, hidden_state
lstm = previous_hidden_state
lstm_last = pooling_layer(input=lstm, pooling_type=MaxPooling())
output = fc_layer(
input=lstm_last, size=2, bias_attr=bias_attr, act=SoftmaxActivation())
if is_predict:
maxid = maxid_layer(output)
outputs([maxid, output])
else:
label = data_layer(name="label", size=2)
cls = classification_cost(input=output, label=label)
outputs(cls)
| apache-2.0 |
andris210296/andris-projeto | backend/venv/lib/python2.7/site-packages/unidecode/x0d3.py | 253 | 4705 | data = (
'tim', # 0x00
'tib', # 0x01
'tibs', # 0x02
'tis', # 0x03
'tiss', # 0x04
'ting', # 0x05
'tij', # 0x06
'tic', # 0x07
'tik', # 0x08
'tit', # 0x09
'tip', # 0x0a
'tih', # 0x0b
'pa', # 0x0c
'pag', # 0x0d
'pagg', # 0x0e
'pags', # 0x0f
'pan', # 0x10
'panj', # 0x11
'panh', # 0x12
'pad', # 0x13
'pal', # 0x14
'palg', # 0x15
'palm', # 0x16
'palb', # 0x17
'pals', # 0x18
'palt', # 0x19
'palp', # 0x1a
'palh', # 0x1b
'pam', # 0x1c
'pab', # 0x1d
'pabs', # 0x1e
'pas', # 0x1f
'pass', # 0x20
'pang', # 0x21
'paj', # 0x22
'pac', # 0x23
'pak', # 0x24
'pat', # 0x25
'pap', # 0x26
'pah', # 0x27
'pae', # 0x28
'paeg', # 0x29
'paegg', # 0x2a
'paegs', # 0x2b
'paen', # 0x2c
'paenj', # 0x2d
'paenh', # 0x2e
'paed', # 0x2f
'pael', # 0x30
'paelg', # 0x31
'paelm', # 0x32
'paelb', # 0x33
'paels', # 0x34
'paelt', # 0x35
'paelp', # 0x36
'paelh', # 0x37
'paem', # 0x38
'paeb', # 0x39
'paebs', # 0x3a
'paes', # 0x3b
'paess', # 0x3c
'paeng', # 0x3d
'paej', # 0x3e
'paec', # 0x3f
'paek', # 0x40
'paet', # 0x41
'paep', # 0x42
'paeh', # 0x43
'pya', # 0x44
'pyag', # 0x45
'pyagg', # 0x46
'pyags', # 0x47
'pyan', # 0x48
'pyanj', # 0x49
'pyanh', # 0x4a
'pyad', # 0x4b
'pyal', # 0x4c
'pyalg', # 0x4d
'pyalm', # 0x4e
'pyalb', # 0x4f
'pyals', # 0x50
'pyalt', # 0x51
'pyalp', # 0x52
'pyalh', # 0x53
'pyam', # 0x54
'pyab', # 0x55
'pyabs', # 0x56
'pyas', # 0x57
'pyass', # 0x58
'pyang', # 0x59
'pyaj', # 0x5a
'pyac', # 0x5b
'pyak', # 0x5c
'pyat', # 0x5d
'pyap', # 0x5e
'pyah', # 0x5f
'pyae', # 0x60
'pyaeg', # 0x61
'pyaegg', # 0x62
'pyaegs', # 0x63
'pyaen', # 0x64
'pyaenj', # 0x65
'pyaenh', # 0x66
'pyaed', # 0x67
'pyael', # 0x68
'pyaelg', # 0x69
'pyaelm', # 0x6a
'pyaelb', # 0x6b
'pyaels', # 0x6c
'pyaelt', # 0x6d
'pyaelp', # 0x6e
'pyaelh', # 0x6f
'pyaem', # 0x70
'pyaeb', # 0x71
'pyaebs', # 0x72
'pyaes', # 0x73
'pyaess', # 0x74
'pyaeng', # 0x75
'pyaej', # 0x76
'pyaec', # 0x77
'pyaek', # 0x78
'pyaet', # 0x79
'pyaep', # 0x7a
'pyaeh', # 0x7b
'peo', # 0x7c
'peog', # 0x7d
'peogg', # 0x7e
'peogs', # 0x7f
'peon', # 0x80
'peonj', # 0x81
'peonh', # 0x82
'peod', # 0x83
'peol', # 0x84
'peolg', # 0x85
'peolm', # 0x86
'peolb', # 0x87
'peols', # 0x88
'peolt', # 0x89
'peolp', # 0x8a
'peolh', # 0x8b
'peom', # 0x8c
'peob', # 0x8d
'peobs', # 0x8e
'peos', # 0x8f
'peoss', # 0x90
'peong', # 0x91
'peoj', # 0x92
'peoc', # 0x93
'peok', # 0x94
'peot', # 0x95
'peop', # 0x96
'peoh', # 0x97
'pe', # 0x98
'peg', # 0x99
'pegg', # 0x9a
'pegs', # 0x9b
'pen', # 0x9c
'penj', # 0x9d
'penh', # 0x9e
'ped', # 0x9f
'pel', # 0xa0
'pelg', # 0xa1
'pelm', # 0xa2
'pelb', # 0xa3
'pels', # 0xa4
'pelt', # 0xa5
'pelp', # 0xa6
'pelh', # 0xa7
'pem', # 0xa8
'peb', # 0xa9
'pebs', # 0xaa
'pes', # 0xab
'pess', # 0xac
'peng', # 0xad
'pej', # 0xae
'pec', # 0xaf
'pek', # 0xb0
'pet', # 0xb1
'pep', # 0xb2
'peh', # 0xb3
'pyeo', # 0xb4
'pyeog', # 0xb5
'pyeogg', # 0xb6
'pyeogs', # 0xb7
'pyeon', # 0xb8
'pyeonj', # 0xb9
'pyeonh', # 0xba
'pyeod', # 0xbb
'pyeol', # 0xbc
'pyeolg', # 0xbd
'pyeolm', # 0xbe
'pyeolb', # 0xbf
'pyeols', # 0xc0
'pyeolt', # 0xc1
'pyeolp', # 0xc2
'pyeolh', # 0xc3
'pyeom', # 0xc4
'pyeob', # 0xc5
'pyeobs', # 0xc6
'pyeos', # 0xc7
'pyeoss', # 0xc8
'pyeong', # 0xc9
'pyeoj', # 0xca
'pyeoc', # 0xcb
'pyeok', # 0xcc
'pyeot', # 0xcd
'pyeop', # 0xce
'pyeoh', # 0xcf
'pye', # 0xd0
'pyeg', # 0xd1
'pyegg', # 0xd2
'pyegs', # 0xd3
'pyen', # 0xd4
'pyenj', # 0xd5
'pyenh', # 0xd6
'pyed', # 0xd7
'pyel', # 0xd8
'pyelg', # 0xd9
'pyelm', # 0xda
'pyelb', # 0xdb
'pyels', # 0xdc
'pyelt', # 0xdd
'pyelp', # 0xde
'pyelh', # 0xdf
'pyem', # 0xe0
'pyeb', # 0xe1
'pyebs', # 0xe2
'pyes', # 0xe3
'pyess', # 0xe4
'pyeng', # 0xe5
'pyej', # 0xe6
'pyec', # 0xe7
'pyek', # 0xe8
'pyet', # 0xe9
'pyep', # 0xea
'pyeh', # 0xeb
'po', # 0xec
'pog', # 0xed
'pogg', # 0xee
'pogs', # 0xef
'pon', # 0xf0
'ponj', # 0xf1
'ponh', # 0xf2
'pod', # 0xf3
'pol', # 0xf4
'polg', # 0xf5
'polm', # 0xf6
'polb', # 0xf7
'pols', # 0xf8
'polt', # 0xf9
'polp', # 0xfa
'polh', # 0xfb
'pom', # 0xfc
'pob', # 0xfd
'pobs', # 0xfe
'pos', # 0xff
)
| mit |
1905410/Misago | misago/users/views/admin/ranks.py | 1 | 3952 | from django.contrib import messages
from django.core.urlresolvers import reverse
from django.shortcuts import redirect
from django.utils.translation import ugettext_lazy as _
from misago.admin.views import generic
from ...forms.admin import RankForm
from ...models import Rank
class RankAdmin(generic.AdminBaseMixin):
root_link = 'misago:admin:users:ranks:index'
Model = Rank
Form = RankForm
templates_dir = 'misago/admin/ranks'
message_404 = _("Requested rank does not exist.")
def update_roles(self, target, roles):
target.roles.clear()
if roles:
target.roles.add(*roles)
def handle_form(self, form, request, target):
super(RankAdmin, self).handle_form(form, request, target)
self.update_roles(target, form.cleaned_data['roles'])
class RanksList(RankAdmin, generic.ListView):
ordering = (('order', None),)
class NewRank(RankAdmin, generic.ModelFormView):
message_submit = _('New rank "%(name)s" has been saved.')
class EditRank(RankAdmin, generic.ModelFormView):
message_submit = _('Rank "%(name)s" has been edited.')
class DeleteRank(RankAdmin, generic.ButtonView):
def check_permissions(self, request, target):
message_format = {'name': target.name}
if target.is_default:
message = _('Rank "%(name)s" is default rank and can\'t be deleted.')
return message % message_format
if target.user_set.exists():
message = _('Rank "%(name)s" is assigned to users and can\'t be deleted.')
return message % message_format
def button_action(self, request, target):
target.delete()
message = _('Rank "%(name)s" has been deleted.')
messages.success(request, message % {'name': target.name})
class MoveDownRank(RankAdmin, generic.ButtonView):
def button_action(self, request, target):
try:
other_target = Rank.objects.filter(order__gt=target.order)
other_target = other_target.earliest('order')
except Rank.DoesNotExist:
other_target = None
if other_target:
other_target.order, target.order = target.order, other_target.order
other_target.save(update_fields=['order'])
target.save(update_fields=['order'])
message = _('Rank "%(name)s" has been moved below "%(other)s".')
targets_names = {'name': target.name, 'other': other_target.name}
messages.success(request, message % targets_names)
class MoveUpRank(RankAdmin, generic.ButtonView):
def button_action(self, request, target):
try:
other_target = Rank.objects.filter(order__lt=target.order)
other_target = other_target.latest('order')
except Rank.DoesNotExist:
other_target = None
if other_target:
other_target.order, target.order = target.order, other_target.order
other_target.save(update_fields=['order'])
target.save(update_fields=['order'])
message = _('Rank "%(name)s" has been moved above "%(other)s".')
targets_names = {'name': target.name, 'other': other_target.name}
messages.success(request, message % targets_names)
class RankUsers(RankAdmin, generic.TargetedView):
def real_dispatch(self, request, target):
redirect_url = reverse('misago:admin:users:accounts:index')
return redirect('%s?rank=%s' % (redirect_url, target.pk))
class DefaultRank(RankAdmin, generic.ButtonView):
def check_permissions(self, request, target):
if target.is_default:
message = _('Rank "%(name)s" is already default.')
return message % {'name': target.name}
def button_action(self, request, target):
Rank.objects.make_rank_default(target)
message = _('Rank "%(name)s" has been made default.')
messages.success(request, message % {'name': target.name})
| gpl-2.0 |
wenderen/servo | tests/wpt/harness/wptrunner/wptrunner.py | 59 | 9731 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import unicode_literals
import json
import os
import sys
import environment as env
import products
import testloader
import wptcommandline
import wptlogging
import wpttest
from testrunner import ManagerGroup
here = os.path.split(__file__)[0]
logger = None
"""Runner for web-platform-tests
The runner has several design goals:
* Tests should run with no modification from upstream.
* Tests should be regarded as "untrusted" so that errors, timeouts and even
crashes in the tests can be handled without failing the entire test run.
* For performance tests can be run in multiple browsers in parallel.
The upstream repository has the facility for creating a test manifest in JSON
format. This manifest is used directly to determine which tests exist. Local
metadata files are used to store the expected test results.
"""
def setup_logging(*args, **kwargs):
global logger
logger = wptlogging.setup(*args, **kwargs)
def get_loader(test_paths, product, ssl_env, debug=None, run_info_extras=None, **kwargs):
if run_info_extras is None:
run_info_extras = {}
run_info = wpttest.get_run_info(kwargs["run_info"], product, debug=debug,
extras=run_info_extras)
test_manifests = testloader.ManifestLoader(test_paths, force_manifest_update=kwargs["manifest_update"]).load()
manifest_filters = []
meta_filters = []
if kwargs["include"] or kwargs["exclude"] or kwargs["include_manifest"]:
manifest_filters.append(testloader.TestFilter(include=kwargs["include"],
exclude=kwargs["exclude"],
manifest_path=kwargs["include_manifest"],
test_manifests=test_manifests))
if kwargs["tags"]:
meta_filters.append(testloader.TagFilter(tags=kwargs["tags"]))
test_loader = testloader.TestLoader(test_manifests,
kwargs["test_types"],
run_info,
manifest_filters=manifest_filters,
meta_filters=meta_filters,
chunk_type=kwargs["chunk_type"],
total_chunks=kwargs["total_chunks"],
chunk_number=kwargs["this_chunk"],
include_https=ssl_env.ssl_enabled)
return run_info, test_loader
def list_test_groups(test_paths, product, **kwargs):
env.do_delayed_imports(logger, test_paths)
ssl_env = env.ssl_env(logger, **kwargs)
run_info, test_loader = get_loader(test_paths, product, ssl_env,
**kwargs)
for item in sorted(test_loader.groups(kwargs["test_types"])):
print item
def list_disabled(test_paths, product, **kwargs):
env.do_delayed_imports(logger, test_paths)
rv = []
ssl_env = env.ssl_env(logger, **kwargs)
run_info, test_loader = get_loader(test_paths, product, ssl_env,
**kwargs)
for test_type, tests in test_loader.disabled_tests.iteritems():
for test in tests:
rv.append({"test": test.id, "reason": test.disabled()})
print json.dumps(rv, indent=2)
def get_pause_after_test(test_loader, **kwargs):
total_tests = sum(len(item) for item in test_loader.tests.itervalues())
if kwargs["pause_after_test"] is None:
if kwargs["repeat_until_unexpected"]:
return False
if kwargs["repeat"] == 1 and total_tests == 1:
return True
return False
return kwargs["pause_after_test"]
def run_tests(config, test_paths, product, **kwargs):
with wptlogging.CaptureIO(logger, not kwargs["no_capture_stdio"]):
env.do_delayed_imports(logger, test_paths)
(check_args,
browser_cls, get_browser_kwargs,
executor_classes, get_executor_kwargs,
env_options, run_info_extras) = products.load_product(config, product)
ssl_env = env.ssl_env(logger, **kwargs)
check_args(**kwargs)
if "test_loader" in kwargs:
run_info = wpttest.get_run_info(kwargs["run_info"], product, debug=None,
extras=run_info_extras(**kwargs))
test_loader = kwargs["test_loader"]
else:
run_info, test_loader = get_loader(test_paths,
product,
ssl_env,
run_info_extras=run_info_extras(**kwargs),
**kwargs)
if kwargs["run_by_dir"] is False:
test_source_cls = testloader.SingleTestSource
test_source_kwargs = {}
else:
# A value of None indicates infinite depth
test_source_cls = testloader.PathGroupedSource
test_source_kwargs = {"depth": kwargs["run_by_dir"]}
logger.info("Using %i client processes" % kwargs["processes"])
unexpected_total = 0
kwargs["pause_after_test"] = get_pause_after_test(test_loader, **kwargs)
with env.TestEnvironment(test_paths,
ssl_env,
kwargs["pause_after_test"],
kwargs["debug_info"],
env_options) as test_environment:
try:
test_environment.ensure_started()
except env.TestEnvironmentError as e:
logger.critical("Error starting test environment: %s" % e.message)
raise
browser_kwargs = get_browser_kwargs(ssl_env=ssl_env, **kwargs)
repeat = kwargs["repeat"]
repeat_count = 0
repeat_until_unexpected = kwargs["repeat_until_unexpected"]
while repeat_count < repeat or repeat_until_unexpected:
repeat_count += 1
if repeat_until_unexpected:
logger.info("Repetition %i" % (repeat_count))
elif repeat > 1:
logger.info("Repetition %i / %i" % (repeat_count, repeat))
unexpected_count = 0
logger.suite_start(test_loader.test_ids, run_info)
for test_type in kwargs["test_types"]:
logger.info("Running %s tests" % test_type)
for test in test_loader.disabled_tests[test_type]:
logger.test_start(test.id)
logger.test_end(test.id, status="SKIP")
executor_cls = executor_classes.get(test_type)
executor_kwargs = get_executor_kwargs(test_type,
test_environment.external_config,
test_environment.cache_manager,
run_info,
**kwargs)
if executor_cls is None:
logger.error("Unsupported test type %s for product %s" %
(test_type, product))
continue
with ManagerGroup("web-platform-tests",
kwargs["processes"],
test_source_cls,
test_source_kwargs,
browser_cls,
browser_kwargs,
executor_cls,
executor_kwargs,
kwargs["pause_after_test"],
kwargs["pause_on_unexpected"],
kwargs["restart_on_unexpected"],
kwargs["debug_info"]) as manager_group:
try:
manager_group.run(test_type, test_loader.tests)
except KeyboardInterrupt:
logger.critical("Main thread got signal")
manager_group.stop()
raise
unexpected_count += manager_group.unexpected_count()
unexpected_total += unexpected_count
logger.info("Got %i unexpected results" % unexpected_count)
if repeat_until_unexpected and unexpected_total > 0:
break
logger.suite_end()
return unexpected_total == 0
def main():
"""Main entry point when calling from the command line"""
kwargs = wptcommandline.parse_args()
try:
if kwargs["prefs_root"] is None:
kwargs["prefs_root"] = os.path.abspath(os.path.join(here, "prefs"))
setup_logging(kwargs, {"raw": sys.stdout})
if kwargs["list_test_groups"]:
list_test_groups(**kwargs)
elif kwargs["list_disabled"]:
list_disabled(**kwargs)
else:
return not run_tests(**kwargs)
except Exception:
if kwargs["pdb"]:
import pdb, traceback
print traceback.format_exc()
pdb.post_mortem()
else:
raise
| mpl-2.0 |
Exploit-install/Veil-Pillage | modules/management/enable_uac.py | 4 | 1889 | """
Module to enable UAC on a host or host list.
Module built by @harmj0y
"""
from lib import command_methods
class Module:
def __init__(self, targets=None, creds=None, args=None):
self.name = "Enable UAC"
self.description = "Enables User Access Control (UAC) on a host or host list."
# internal list() that holds one or more targets
self.targets = targets
# internal list() that holds one or more cred tuples
# [ (username, pw), (username2, pw2), ...]
self.creds = creds
# a state output file that will be written out by pillage.py
# ex- if you're querying domain users
self.output = ""
# user interaction for- format is {Option : [Value, Description]]}
self.required_options = {"trigger_method" : ["wmis", "[wmis], [winexe], or [smbexec] for triggering"]}
def run(self):
# assume single set of credentials
username, password = self.creds[0]
triggerMethod = self.required_options["trigger_method"][0]
for target in self.targets:
# reg.exe command to enable UAC
command = "reg ADD HKLM\\SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Policies\\System /v EnableLUA /t REG_DWORD /d 1 /f"
result = command_methods.executeResult(target, username, password, command, triggerMethod)
if result == "":
self.output += "[!] No result file, UAC enable failed using creds '"+username+":"+password+"' on : " + target + "\n"
elif "The operation completed successfully" in result:
self.output += "[*] UAC successfully enabled using creds '"+username+":"+password+"' on : " + target + "\n"
else:
self.output += "[!] Error in enabling UAC using creds '"+username+":"+password+"' on : " + target + "\n"
| gpl-3.0 |
anisyonk/pilot | movers/base.py | 1 | 31956 | """
Base class of site movers
:author: Alexey Anisenkov
"""
import hashlib
import os, re
import time
from subprocess import Popen, PIPE, STDOUT
from pUtil import tolog #
from PilotErrors import PilotErrors, PilotException
from Node import Node
class BaseSiteMover(object):
"""
File movers move files between a storage element (of different kinds) and a local directory
get_data: SE->local
put_data: local->SE
check_space: available space in SE
mkdirWperm -- create recursively dirs setting appropriate permissions
getLocalFileInfo -- get size and checksum of a local file
"""
schemes = ['srm'] # list of supported schemes for transfers
name = "" # unique ID of the Mover implementation, if not set copy_command will be used
copy_command = None
timeout = 3000 #
checksum_type = "adler32" # algorithm name of checksum calculation
checksum_command = "adler32" # command to be executed to get checksum, e.g. md5sum (adler32 is internal default implementation)
ddmconf = {} # DDMEndpoints configuration from AGIS
require_replicas = True ## quick hack to avoid query Rucio to resolve input replicas
def __init__(self, setup_path='', **kwargs):
self.copysetup = setup_path
self.timeout = kwargs.get('timeout', self.timeout)
self.ddmconf = kwargs.get('ddmconf', self.ddmconf)
self.workDir = kwargs.get('workDir', '')
#self.setup_command = self.getSetup()
self.trace_report = {}
@classmethod
def log(self, value): # quick stub
#print value
tolog(value)
@property
def copysetup(self):
return self._setup
@copysetup.setter
def copysetup(self, value):
value = os.path.expandvars(value.strip())
if value and not os.access(value, os.R_OK):
self.log("WARNING: copysetup=%s is invalid: file is not readdable" % value)
raise PilotException("Failed to set copysetup: passed invalid file name=%s" % value, code=PilotErrors.ERR_NOSUCHFILE, state="RFCP_FAIL")
self._setup = value
@classmethod
def getID(self):
"""
return the ID/NAME string of Mover class used to resolve Mover classs
name attribute helps to define various movers with the same copy command
"""
return self.name or self.copy_command
@classmethod
def getRucioPath(self, scope, lfn, prefix='rucio'):
"""
Construct a partial Rucio PFN using the scope and the LFN
"""
# <prefix=rucio>/<scope>/md5(<scope>:<lfn>)[0:2]/md5(<scope:lfn>)[2:4]/<lfn>
hash_hex = hashlib.md5('%s:%s' % (scope, lfn)).hexdigest()
paths = [prefix] + scope.split('.') + [hash_hex[0:2], hash_hex[2:4], lfn]
paths = filter(None, paths) # remove empty parts to avoid double /-chars
return '/'.join(paths)
#scope = os.path.join(*scope.split('.')) # correct scope
#return os.path.join(prefix, scope, hash_hex[0:2], hash_hex[2:4], lfn)
def isDeterministic(self, ddmendpoint):
return self.ddmconf.get(ddmendpoint, {}).get('is_deterministic', None)
def getSURLRucio(self, se, se_path, scope, lfn, job=None):
"""
Get final destination SURL of file to be moved
"""
# ANALY/PROD job specific processing ??
prefix = 'rucio'
if se_path.rstrip('/').endswith('/' + prefix): # avoid double prefix
prefix = ''
surl = se + os.path.join(se_path, self.getRucioPath(scope, lfn, prefix=prefix))
return surl
def getSURL(self, se, se_path, scope, lfn, job=None, pathConvention=None, ddmEndpoint=None):
"""
Get final destination SURL of file to be moved
job instance is passing here for possible JOB specific processing ?? FIX ME LATER
"""
# consider only deterministic sites (output destination)
# do proper extract is_determetistic flag from DDMEndpoint: TODO
# quick hack for now
if (ddmEndpoint and self.isDeterministic(ddmEndpoint)) or (se_path and se_path.rstrip('/').endswith('/rucio')):
return self.getSURLRucio(se, se_path, scope, lfn)
raise Exception("getSURL(): NOT IMPLEMENTED error: processing of non Rucio transfers is not implemented yet, se_path=%s" % se_path)
def getSetup(self):
"""
return full setup command to be executed
Can be customized by different site mover
"""
if not self.copysetup:
return ''
return 'source %s' % self.copysetup
def setup(self):
"""
Prepare site specific setup initializations
Should be implemented by different site mover
"""
# TODO: vertify setup??
# raise in case of errors
return True # rcode=0, output=''
def shouldVerifyStageIn(self):
"""
Should the get operation perform any file size/checksum verifications?
can be customized for specific movers
"""
return True
def check_availablespace(self, maxinputsize, files):
"""
Verify that enough local space is available to stage in and run the job
:raise: PilotException in case of not enough space
"""
if not self.shouldVerifyStageIn():
return
totalsize = reduce(lambda x, y: x + y.filesize, files, 0)
# verify total filesize
if maxinputsize and totalsize > maxinputsize:
error = "Too many/too large input files (%s). Total file size=%s B > maxinputsize=%s B" % (len(files), totalsize, maxinputsize)
raise PilotException(error, code=PilotErrors.ERR_SIZETOOLARGE)
self.log("Total input file size=%s B within allowed limit=%s B (zero value means unlimited)" % (totalsize, maxinputsize))
# get available space
wn = Node()
wn.collectWNInfo(self.workDir)
available_space = int(wn.disk)*1024**2 # convert from MB to B
self.log("Locally available space: %d B" % available_space)
# are we wihin the limit?
if totalsize > available_space:
error = "Not enough local space for staging input files and run the job (need %d B, but only have %d B)" % (totalsize, available_space)
raise PilotException(error, code=PilotErrors.ERR_NOLOCALSPACE)
def getRemoteFileChecksum(self, filename):
"""
get checksum of remote file
Should be implemented by different site mover
:return: (checksum, checksum_type)
:raise: an exception in case of errors
"""
return None, None
def getRemoteFileSize(self, filename):
"""
get size of remote file
Should be implemented by different site mover
:return: length of file
:raise: an exception in case of errors
"""
return None
def resolve_replica(self, fspec, protocol, ddm=None):
"""
:fspec: FileSpec object
:protocol: dict('se':'', 'scheme':'str' or list)
Resolve input replica either by protocol scheme
or manually construct pfn according to protocol.se value (full local path is matched respect to ddm_se default protocol)
:return: input file replica details: {'surl':'', 'ddmendpoint':'', 'pfn':''}
:raise: PilotException in case of controlled error
"""
# resolve proper surl (main srm replica) and find related replica
if protocol.get('se'):
scheme = str(protocol.get('se')).split(':', 1)[0]
else:
scheme = protocol.get('scheme')
if not scheme:
raise Exception('Failed to resolve copytool scheme to be used, se field is corrupted?: protocol=%s' % protocol)
if isinstance(scheme, str):
scheme = [scheme]
replica = None # find first matched to protocol spec replica
surl = None
if protocol.get('se'): # custom settings: match Rucio replica by default protocol se (quick stub until Rucio protocols are proper populated)
for ddmendpoint, replicas, ddm_se, ddm_path in fspec.replicas:
if not replicas: # ignore ddms with no replicas
continue
surl = replicas[0] # assume srm protocol is first entry
self.log("[stage-in] surl (srm replica) from Rucio: pfn=%s, ddmendpoint=%s, ddm.se=%s, ddm.se_path=%s" % (surl, ddmendpoint, ddm_se, ddm_path))
for r in replicas:
if ddm_se and r.startswith(ddm_se): # manually form pfn based on protocol.se
r_filename = r.replace(ddm_se, '', 1).replace(ddm_path, '', 1) # resolve replica filename
# quick hack: if hosted replica ddmendpoint and input protocol ddmendpoint mismatched => consider replica ddmendpoint.path
r_path = protocol.get('path')
if ddmendpoint != protocol.get('ddm'):
self.log("[stage-in] ignore protocol.path=%s since protocol.ddm=%s differs from found replica.ddm=%s ... will use ddm.path=%s to form TURL" % (protocol.get('path'), protocol.get('ddm'), ddmendpoint, ddm_path))
r_path = ddm_path
replica = protocol.get('se') + r_path
if replica and r_filename and '/' not in (replica[-1] + r_filename[0]):
replica += '/'
replica += r_filename
self.log("[stage-in] ignore_rucio_replicas since protocol.se is explicitly passed, protocol.se=%s, protocol.path=%s: found replica=%s matched ddm.se=%s, ddm.path=%s .. will use TURL=%s" % (protocol.get('se'), protocol.get('path'), surl, ddm_se, ddm_path, replica))
break
if replica:
break
def get_preferred_replica(replicas, allowed_schemas):
for r in replicas:
for sval in allowed_schemas:
if r and r.startswith('%s://' % sval):
return r
return None
if not replica: # resolve replica from Rucio: use exact pfn from Rucio replicas
for ddmendpoint, replicas, ddm_se, ddm_path in fspec.replicas:
if not replicas: # ignore ddms with no replicas
continue
replica = get_preferred_replica(replicas, scheme)
if replica:
surl = get_preferred_replica(replicas, ['srm']) or replicas[0] # prefer SRM protocol for surl -- to be verified
self.log("[stage-in] surl (srm replica) from Rucio: pfn=%s, ddmendpoint=%s, ddm.se=%s, ddm.se_path=%s" % (surl, ddmendpoint, ddm_se, ddm_path))
break
if not replica: # replica not found
error = 'Failed to find replica for input file, protocol=%s, fspec=%s, allowed schemas=%s' % (protocol, fspec, scheme)
self.log("resolve_replica: %s" % error)
raise PilotException(error, code=PilotErrors.ERR_REPNOTFOUND)
return {'surl':surl, 'ddmendpoint':ddmendpoint, 'pfn':replica}
def is_stagein_allowed(self, fspec, job):
"""
check if stage-in operation is allowed for the mover
apply additional job specific checks here if need
Should be overwritten by custom sitemover
:return: True in case stage-in transfer is allowed
:raise: PilotException in case of controlled error
"""
return True
def get_data(self, fspec):
"""
fspec is FileSpec object
:return: file details: {'checksum': '', 'checksum_type':'', 'filesize':''}
:raise: PilotException in case of controlled error
"""
# resolve proper surl and find related replica
dst = os.path.join(self.workDir, fspec.lfn)
return self.stageIn(fspec.turl, dst, fspec)
def stageIn(self, source, destination, fspec):
"""
Stage in the source file: do stagein file + verify local file
:return: file details: {'checksum': '', 'checksum_type':'', 'filesize':''}
:raise: PilotException in case of controlled error
"""
self.trace_report.update(relativeStart=time.time(), transferStart=time.time())
dst_checksum, dst_checksum_type = self.stageInFile(source, destination, fspec)
src_fsize = fspec.filesize
if not self.shouldVerifyStageIn():
self.log("skipped stage-in verification for lfn=%" % fspec.lfn)
return {'checksum': dst_checksum, 'checksum_type':dst_checksum_type, 'filesize':src_fsize}
src_checksum, src_checksum_type = fspec.get_checksum()
dst_fsize = os.path.getsize(destination)
# verify stagein by checksum
self.trace_report.update(validateStart=time.time())
try:
if not dst_checksum:
dst_checksum, dst_checksum_type = self.calc_file_checksum(destination)
except Exception, e:
self.log("verify StageIn: caught exception while getting local file=%s checksum: %s .. skipped" % (destination, e))
try:
if not src_checksum:
src_checksum, src_checksum_type = self.getRemoteFileChecksum(source)
except Exception, e:
self.log("verify StageIn: caught exception while getting remote file=%s checksum: %s .. skipped" % (source, e))
try:
if dst_checksum and dst_checksum_type: # verify against source
is_verified = src_checksum and src_checksum_type and dst_checksum == src_checksum and dst_checksum_type == src_checksum_type
self.log("Remote checksum [%s]: %s (%s)" % (src_checksum_type, src_checksum, source))
self.log("Local checksum [%s]: %s (%s)" % (dst_checksum_type, dst_checksum, destination))
self.log("checksum is_verified = %s" % is_verified)
if type(dst_checksum) is str and type(src_checksum) is str:
if len(src_checksum) != len(dst_checksum):
self.log("Local and remote checksums have different lengths (%s vs %s), will lstrip them" % (dst_checksum, src_checksum))
src_checksum = src_checksum.lstrip('0')
dst_checksum = dst_checksum.lstrip('0')
is_verified = src_checksum and src_checksum_type and dst_checksum == src_checksum and dst_checksum_type == src_checksum_type
self.log("Remote checksum [%s]: %s (%s)" % (src_checksum_type, src_checksum, source))
self.log("Local checksum [%s]: %s (%s)" % (dst_checksum_type, dst_checksum, destination))
self.log("checksum is_verified = %s" % is_verified)
if not is_verified:
error = "Remote and local checksums (of type %s) do not match for %s (%s != %s)" % \
(src_checksum_type, os.path.basename(destination), dst_checksum, src_checksum)
if src_checksum_type == 'adler32':
state = 'AD_MISMATCH'
rcode = PilotErrors.ERR_GETADMISMATCH
else:
state = 'MD5_MISMATCH'
rcode = PilotErrors.ERR_GETMD5MISMATCH
raise PilotException(error, code=rcode, state=state)
self.log("verifying stagein done. [by checksum] [%s]" % source)
self.trace_report.update(clientState="DONE")
return {'checksum': dst_checksum, 'checksum_type':dst_checksum_type, 'filesize':dst_fsize}
except PilotException:
raise
except Exception, e:
self.log("verify StageIn: caught exception while doing file checksum verification: %s .. skipped" % e)
# verify stage-in by filesize
try:
if not src_fsize:
src_fsize = self.getRemoteFileSize(source)
is_verified = src_fsize and src_fsize == dst_fsize
self.log("Remote filesize [%s]: %s" % (os.path.dirname(destination), src_fsize))
self.log("Local filesize [%s]: %s" % (os.path.dirname(destination), dst_fsize))
self.log("filesize is_verified = %s" % is_verified)
if not is_verified:
error = "Remote and local file sizes do not match for %s (%s != %s)" % (os.path.basename(destination), dst_fsize, src_fsize)
self.log(error)
raise PilotException(error, code=PilotErrors.ERR_GETWRONGSIZE, state='FS_MISMATCH')
self.log("verifying stagein done. [by filesize] [%s]" % source)
self.trace_report.update(clientState="DONE")
return {'checksum': dst_checksum, 'checksum_type':dst_checksum_type, 'filesize':dst_fsize}
except PilotException:
raise
except Exception, e:
self.log("verify StageIn: caught exception while doing file size verification: %s .. skipped" % e)
raise PilotException("Neither checksum nor file size could be verified (failing job)", code=PilotErrors.ERR_NOFILEVERIFICATION, state='NOFILEVERIFICATION')
def stageInFile(self, source, destination, fspec=None):
"""
Stage in the file.
Should be implemented by different site mover
:return: destination file details (checksum, checksum_type) in case of success, throw exception in case of failure
:raise: PilotException in case of controlled error
"""
raise Exception('NOT IMPLEMENTED')
def put_data(self, fspec):
"""
fspec is FileSpec object
:return: remote file details: {'checksum': '', 'checksum_type':'', 'filesize':'', 'surl':''}
stageout workflow could be overwritten by specific Mover
:raise: PilotException in case of controlled error
"""
src = os.path.join(self.workDir, fspec.lfn)
return self.stageOut(src, fspec.turl, fspec)
def stageOut(self, source, destination, fspec):
"""
Stage out the source file: do stageout file + verify remote file output
:return: remote file details: {'checksum': '', 'checksum_type':'', 'filesize':''}
:raise: PilotException in case of controlled error
"""
src_checksum, src_checksum_type = None, None
src_fsize = fspec and fspec.filesize or os.path.getsize(source)
if fspec:
src_checksum, src_checksum_type = fspec.get_checksum()
# do stageOutFile
self.trace_report.update(relativeStart=time.time(), transferStart=time.time())
file_exist_error, dst_checksum, dst_checksum_type = None, None, None
try:
dst_checksum, dst_checksum_type = self.stageOutFile(source, destination, fspec)
except PilotException, e:
# do clean up
if e.code == PilotErrors.ERR_FILEEXIST: ## continue execution with further verification of newer file respect to already exist at storage
self.log("INFO: StageOutFile() failed with FILEEXIST error: skipped .. will try to verify if newer produced file is the same as from storage")
file_exist_error = e
else:
self.remote_cleanup(destination, fspec)
raise
# verify stageout by checksum
self.trace_report.update(validateStart=time.time())
try:
if not dst_checksum:
dst_checksum, dst_checksum_type = self.getRemoteFileChecksum(destination)
except Exception, e:
self.log("verify StageOut: caught exception while getting remote file checksum.. skipped, error=%s" % e)
import traceback
self.log(traceback.format_exc())
# Ignore in the case of lsm mover
if self.name == 'lsm':
self.log("Ignoring lsm error")
if file_exist_error: ## no way to verify newer file against already exist at storage: do fail transfer with FILEEXIST error
raise file_exist_error
return {'checksum': None, 'checksum_type':None, 'filesize':src_fsize}
else:
self.log("Used %s mover" % (self.name))
try:
if dst_checksum and dst_checksum_type: # verify against source
if not src_checksum: # fspec has no checksum data defined try to calculate from the source
src_checksum, src_checksum_type = self.calc_file_checksum(source)
is_verified = src_checksum and src_checksum_type and dst_checksum == src_checksum and dst_checksum_type == src_checksum_type
self.log("Local checksum [%s]: %s" % (src_checksum_type, src_checksum))
self.log("Remote checksum [%s]: %s" % (dst_checksum_type, dst_checksum))
self.log("checksum is_verified = %s" % is_verified)
if type(dst_checksum) is str and type(src_checksum) is str:
if len(dst_checksum) < len(src_checksum):
self.log("Local and remote checksums have different lengths (%s vs %s)" % (src_checksum, dst_checksum))
if src_checksum[0] == '0':
self.log("Stripping initial 0:s from local checksum")
src_checksum = src_checksum.lstrip('0')
is_verified = src_checksum and src_checksum_type and dst_checksum == src_checksum and dst_checksum_type == src_checksum_type
self.log("Local checksum [%s]: %s" % (src_checksum_type, src_checksum))
self.log("Remote checksum [%s]: %s" % (dst_checksum_type, dst_checksum))
self.log("checksum is_verified = %s" % is_verified)
if not is_verified and file_exist_error: ## newer file is different respect to one from storage: raise initial FILEEXIST error
raise file_exist_error
if not is_verified:
error = "Remote and local checksums (of type %s) do not match for %s (%s != %s)" % \
(src_checksum_type, os.path.basename(destination), dst_checksum, src_checksum)
if src_checksum_type == 'adler32':
state = 'AD_MISMATCH'
rcode = PilotErrors.ERR_PUTADMISMATCH
else:
state = 'MD5_MISMATCH'
rcode = PilotErrors.ERR_PUTMD5MISMATCH
raise PilotException(error, code=rcode, state=state)
self.log("verifying stageout done. [by checksum]")
self.trace_report.update(clientState="DONE")
return {'checksum': dst_checksum, 'checksum_type':dst_checksum_type, 'filesize':src_fsize}
except PilotException, e:
if not e.code == PilotErrors.ERR_FILEEXIST:
self.remote_cleanup(destination, fspec)
raise
except Exception, e:
self.log("verify StageOut: caught exception while doing file checksum verification: %s .. skipped" % e)
# verify stageout by filesize
try:
dst_fsize = self.getRemoteFileSize(destination)
is_verified = src_fsize and src_fsize == dst_fsize
self.log("Local filesize [%s]: %s" % (os.path.dirname(destination), src_fsize))
self.log("Remote filesize [%s]: %s" % (os.path.dirname(destination), dst_fsize))
self.log("filesize is_verified = %s" % is_verified)
if not is_verified and file_exist_error: ## newer file is different respect to one from storage: raise initial FILEEXIST error
raise file_exist_error
if not is_verified:
error = "Remote and local file sizes do not match for %s (%s != %s)" % (os.path.basename(destination), dst_fsize, src_fsize)
self.log(error)
raise PilotException(error, code=PilotErrors.ERR_PUTWRONGSIZE, state='FS_MISMATCH')
self.log("verifying stageout done. [by filesize]")
self.trace_report.update(clientState="DONE")
return {'checksum': dst_checksum, 'checksum_type':dst_checksum_type, 'filesize':src_fsize}
except PilotException:
if not e.code == PilotErrors.ERR_FILEEXIST:
self.remote_cleanup(destination, fspec)
raise
except Exception, e:
self.log("verify StageOut: caught exception while doing file size verification: %s .. skipped" % e)
if file_exist_error:
raise file_exist_error
else:
self.remote_cleanup(destination, fspec)
raise PilotException("Neither checksum nor file size could be verified (failing job)", code=PilotErrors.ERR_NOFILEVERIFICATION, state='NOFILEVERIFICATION')
def stageOutFile(self, source, destination, fspec):
"""
Stage out the file.
Should be implemented by different site mover
:return: destination file details (checksum, checksum_type) in case of success, throw exception in case of failure
:raise: PilotException in case of controlled error
"""
raise Exception('NOT IMPLEMENTED')
def remote_cleanup(self, destination, fspec):
"""
Apply remote clean up: e.g. remove incomplete remote file
Should be customized by different site mover
"""
return True
def resolveStageErrorFromOutput(self, output, filename=None, is_stagein=False):
"""
resolve error code, client state and defined error mesage from the output
:return: dict {'rcode', 'state, 'error'}
"""
ret = {'rcode': PilotErrors.ERR_STAGEINFAILED if is_stagein else PilotErrors.ERR_STAGEOUTFAILED, 'state': 'COPY_ERROR', 'error': 'Copy operation failed [is_stagein=%s]: %s' % (is_stagein, output)}
if "Could not establish context" in output:
ret['rcode'] = PilotErrors.ERR_NOPROXY
ret['state'] = 'CONTEXT_FAIL'
ret['error'] = "Could not establish context: Proxy / VO extension of proxy has probably expired: %s" % output
elif "File exists" in output or 'SRM_FILE_BUSY' in output or 'file already exists' in output:
ret['rcode'] = PilotErrors.ERR_FILEEXIST
ret['state'] = 'FILE_EXIST'
ret['error'] = "File already exists in the destination: %s" % output
elif "No space left on device" in output:
ret['rcode'] = PilotErrors.ERR_NOLOCALSPACE
ret['state'] = 'NO_SPACE'
ret['error'] = "No available space left on local disk: %s" % output
elif "globus_xio:" in output:
ret['rcode'] = PilotErrors.ERR_GETGLOBUSSYSERR
ret['state'] = 'GLOBUS_FAIL'
ret['error'] = "Globus system error: %s" % output
elif "No such file or directory" and "DBRelease" in filename: ## is it a stageout error??
ret['rcode'] = PilotErrors.ERR_MISSDBREL
ret['state'] = 'NO_DBREL'
ret['error'] = output
elif "No such file or directory" in output:
ret['rcode'] = PilotErrors.ERR_NOSUCHFILE
ret['state'] = 'NO_FILE'
ret['error'] = output
elif "query chksum is not supported" in output or "Unable to checksum" in output:
ret['rcode'] = PilotErrors.ERR_CHKSUMNOTSUP
ret['state'] = 'CHKSUM_NOTSUP'
ret['error'] = output
elif "does not match the checksum" in output:
if 'adler32' in output:
state = 'AD_MISMATCH'
rcode = PilotErrors.ERR_GETADMISMATCH
else:
state = 'MD5_MISMATCH'
rcode = PilotErrors.ERR_GETMD5MISMATCH
ret['rcode'] = rcode
ret['state'] = state
return ret
def getTimeOut(self, filesize):
""" Get a proper time-out limit based on the file size """
timeout_max = 5 + 3*3600 # 3 hours ::: FIX ME LATER ::
timeout_min = self.timeout
timeout = timeout_min + int(filesize/0.5e6) # approx < 0.5 Mb/sec
return min(timeout, timeout_max)
def calc_file_checksum(self, filename):
"""
calculate SiteMover specific checksum for a file
:return: (checksum, checksum_type)
raise an exception if input filename is not exist/readable
"""
if not self.checksum_command or not self.checksum_type:
raise Exception("Failed to get file checksum: incomplete checksum_command declaration: type=%s, command=%s" % (self.checksum_type, self.checksum_command))
fn = getattr(self, "calc_%s_checksum" % self.checksum_type, None)
checksum = fn(filename) if callable(fn) else self.calc_checksum(filename, self.checksum_command)
return checksum, self.checksum_type
@classmethod
def calc_adler32_checksum(self, filename):
"""
calculate the adler32 checksum for a file
raise an exception if input filename is not exist/readable
"""
from zlib import adler32
asum = 1 # default adler32 starting value
BLOCKSIZE = 64*1024*1024 # read buffer, 64 Mb
with open(filename, 'rb') as f:
while True:
data = f.read(BLOCKSIZE)
if not data:
break
asum = adler32(data, asum)
if asum < 0:
asum += 2**32
return "%08x" % asum # convert to hex
@classmethod
def calc_checksum(self, filename, command='md5sum', setup=None, pattern=None, cmd=None):
"""
:cmd: quick hack: fix me later
calculate file checksum value
raise an exception if input filename is not exist/readable
"""
if not cmd:
cmd = "%s %s" % (command, filename)
if setup:
cmd = "%s 1>/dev/null 2>/dev/null; %s" % (setup, cmd)
self.log("Execute command (%s) to calc checksum of file" % cmd)
c = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True)
output, error = c.communicate()
if error:
self.log("INFO: calc_checksum: error=%s" % error)
if c.returncode:
self.log('FAILED to calc_checksum for file=%s, cmd=%s, rcode=%s, output=%s' % (filename, cmd, c.returncode, output))
raise Exception(output)
self.log("calc_checksum: output=%s" % output)
value = ''
if pattern:
self.log("INFO: calc_checksum: try to extract checksum value by pattern=%s" % pattern)
m = re.match(pattern, output)
if m:
value = m.groupdict().get('checksum') or ''
else:
value = output.split()[0]
return value # return final checksum
@classmethod
def removeLocal(self, filename):
"""
Remove the local file in case of failure to prevent problem with get retry attempt
:return: True in case of physical file removal
"""
if not os.path.exists(filename): # nothing to remove
return False
try:
os.remove(filename)
self.log("Successfully removed local file=%s" % filename)
is_removed = True
except Exception, e:
self.log("Could not remove the local file=%s .. skipped, error=%s" % (filename, e))
is_removed = False
return is_removed
| apache-2.0 |
omasanori/gyp | PRESUBMIT.py | 496 | 3373 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Top-level presubmit script for GYP.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into gcl.
"""
PYLINT_BLACKLIST = [
# TODO: fix me.
# From SCons, not done in google style.
'test/lib/TestCmd.py',
'test/lib/TestCommon.py',
'test/lib/TestGyp.py',
# Needs style fix.
'pylib/gyp/generator/xcode.py',
]
PYLINT_DISABLED_WARNINGS = [
# TODO: fix me.
# Many tests include modules they don't use.
'W0611',
# Include order doesn't properly include local files?
'F0401',
# Some use of built-in names.
'W0622',
# Some unused variables.
'W0612',
# Operator not preceded/followed by space.
'C0323',
'C0322',
# Unnecessary semicolon.
'W0301',
# Unused argument.
'W0613',
# String has no effect (docstring in wrong place).
'W0105',
# Comma not followed by space.
'C0324',
# Access to a protected member.
'W0212',
# Bad indent.
'W0311',
# Line too long.
'C0301',
# Undefined variable.
'E0602',
# Not exception type specified.
'W0702',
# No member of that name.
'E1101',
# Dangerous default {}.
'W0102',
# Others, too many to sort.
'W0201', 'W0232', 'E1103', 'W0621', 'W0108', 'W0223', 'W0231',
'R0201', 'E0101', 'C0321',
# ************* Module copy
# W0104:427,12:_test.odict.__setitem__: Statement seems to have no effect
'W0104',
]
def CheckChangeOnUpload(input_api, output_api):
report = []
report.extend(input_api.canned_checks.PanProjectChecks(
input_api, output_api))
return report
def CheckChangeOnCommit(input_api, output_api):
report = []
# Accept any year number from 2009 to the current year.
current_year = int(input_api.time.strftime('%Y'))
allowed_years = (str(s) for s in reversed(xrange(2009, current_year + 1)))
years_re = '(' + '|'.join(allowed_years) + ')'
# The (c) is deprecated, but tolerate it until it's removed from all files.
license = (
r'.*? Copyright (\(c\) )?%(year)s Google Inc\. All rights reserved\.\n'
r'.*? Use of this source code is governed by a BSD-style license that '
r'can be\n'
r'.*? found in the LICENSE file\.\n'
) % {
'year': years_re,
}
report.extend(input_api.canned_checks.PanProjectChecks(
input_api, output_api, license_header=license))
report.extend(input_api.canned_checks.CheckTreeIsOpen(
input_api, output_api,
'http://gyp-status.appspot.com/status',
'http://gyp-status.appspot.com/current'))
import os
import sys
old_sys_path = sys.path
try:
sys.path = ['pylib', 'test/lib'] + sys.path
blacklist = PYLINT_BLACKLIST
if sys.platform == 'win32':
blacklist = [os.path.normpath(x).replace('\\', '\\\\')
for x in PYLINT_BLACKLIST]
report.extend(input_api.canned_checks.RunPylint(
input_api,
output_api,
black_list=blacklist,
disabled_warnings=PYLINT_DISABLED_WARNINGS))
finally:
sys.path = old_sys_path
return report
def GetPreferredTrySlaves():
return ['gyp-win32', 'gyp-win64', 'gyp-linux', 'gyp-mac', 'gyp-android']
| bsd-3-clause |
pauloschilling/sentry | src/sentry/migrations/0195_auto__chg_field_organization_owner.py | 18 | 38511 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Organization.owner'
db.alter_column('sentry_organization', 'owner_id', self.gf('sentry.db.models.fields.foreignkey.FlexibleForeignKey')(to=orm['sentry.User'], null=True))
def backwards(self, orm):
# User chose to not deal with backwards NULL issues for 'Organization.owner'
raise RuntimeError("Cannot reverse this migration. 'Organization.owner' and its values cannot be restored.")
# The following code is provided here to aid in writing a correct migration
# Changing field 'Organization.owner'
db.alter_column('sentry_organization', 'owner_id', self.gf('sentry.db.models.fields.foreignkey.FlexibleForeignKey')(to=orm['sentry.User']))
models = {
'sentry.accessgroup': {
'Meta': {'unique_together': "(('team', 'name'),)", 'object_name': 'AccessGroup'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'managed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.User']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Project']", 'symmetrical': 'False'}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"}),
'type': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '50'})
},
'sentry.activity': {
'Meta': {'object_name': 'Activity'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Event']", 'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.apikey': {
'Meta': {'object_name': 'ApiKey'},
'allowed_origins': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'default': "'Default'", 'max_length': '64', 'blank': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Organization']"}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.auditlogentry': {
'Meta': {'object_name': 'AuditLogEntry'},
'actor': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'audit_actors'", 'null': 'True', 'to': "orm['sentry.User']"}),
'actor_key': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiKey']", 'null': 'True', 'blank': 'True'}),
'actor_label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'target_object': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'target_user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'audit_targets'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.authidentity': {
'Meta': {'unique_together': "(('auth_provider', 'ident'), ('auth_provider', 'user'))", 'object_name': 'AuthIdentity'},
'auth_provider': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.AuthProvider']"}),
'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'last_synced': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_verified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.authprovider': {
'Meta': {'object_name': 'AuthProvider'},
'config': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'default_global_access': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'default_role': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50'}),
'default_teams': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Team']", 'symmetrical': 'False', 'blank': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_sync': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']", 'unique': 'True'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'sync_time': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'})
},
'sentry.broadcast': {
'Meta': {'object_name': 'Broadcast'},
'badge': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'sentry.event': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'", 'index_together': "(('group', 'datetime'),)"},
'data': ('sentry.db.models.fields.node.NodeField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'event_set'", 'null': 'True', 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'time_spent': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'null': 'True'})
},
'sentry.eventmapping': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'EventMapping'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.file': {
'Meta': {'object_name': 'File'},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True'}),
'headers': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'path': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'size': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'storage': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'storage_options': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.group': {
'Meta': {'object_name': 'Group', 'db_table': "'sentry_groupedmessage'", 'index_together': "(('project', 'first_release'),)"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'first_release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']", 'null': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'resolved_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'score': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'time_spent_total': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '1', 'db_index': 'True'})
},
'sentry.groupassignee': {
'Meta': {'object_name': 'GroupAssignee', 'db_table': "'sentry_groupasignee'"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'assignee_set'", 'unique': 'True', 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'assignee_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'sentry_assignee_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupbookmark': {
'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'sentry_bookmark_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupemailthread': {
'Meta': {'unique_together': "(('email', 'group'), ('email', 'msgid'))", 'object_name': 'GroupEmailThread'},
'date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'groupemail_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'msgid': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'groupemail_set'", 'to': "orm['sentry.Project']"})
},
'sentry.grouphash': {
'Meta': {'unique_together': "(('project', 'hash'),)", 'object_name': 'GroupHash'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'})
},
'sentry.groupmeta': {
'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.grouprulestatus': {
'Meta': {'unique_together': "(('rule', 'group'),)", 'object_name': 'GroupRuleStatus'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_active': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'rule': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Rule']"}),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'})
},
'sentry.groupseen': {
'Meta': {'unique_together': "(('user', 'group'),)", 'object_name': 'GroupSeen'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'db_index': 'False'})
},
'sentry.grouptagkey': {
'Meta': {'unique_together': "(('project', 'group', 'key'),)", 'object_name': 'GroupTagKey'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'values_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.grouptagvalue': {
'Meta': {'unique_together': "(('project', 'key', 'value', 'group'),)", 'object_name': 'GroupTagValue', 'db_table': "'sentry_messagefiltervalue'"},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'grouptag'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'grouptag'", 'null': 'True', 'to': "orm['sentry.Project']"}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.helppage': {
'Meta': {'object_name': 'HelpPage'},
'content': ('django.db.models.fields.TextField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_visible': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64', 'unique': 'True', 'null': 'True'}),
'priority': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.lostpasswordhash': {
'Meta': {'object_name': 'LostPasswordHash'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'unique': 'True'})
},
'sentry.option': {
'Meta': {'object_name': 'Option'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.organization': {
'Meta': {'object_name': 'Organization'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'org_memberships'", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationMember']", 'to': "orm['sentry.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'owner': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.organizationaccessrequest': {
'Meta': {'unique_together': "(('team', 'member'),)", 'object_name': 'OrganizationAccessRequest'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'member': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.OrganizationMember']"}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.organizationmember': {
'Meta': {'unique_together': "(('organization', 'user'), ('organization', 'email'))", 'object_name': 'OrganizationMember'},
'counter': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'has_global_access': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Organization']"}),
'teams': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Team']", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationMemberTeam']", 'blank': 'True'}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'sentry_orgmember_set'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.organizationmemberteam': {
'Meta': {'unique_together': "(('team', 'organizationmember'),)", 'object_name': 'OrganizationMemberTeam', 'db_table': "'sentry_organizationmember_teams'"},
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'organizationmember': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.OrganizationMember']"}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.project': {
'Meta': {'unique_together': "(('team', 'slug'), ('organization', 'slug'))", 'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.projectkey': {
'Meta': {'object_name': 'ProjectKey'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'roles': ('django.db.models.fields.BigIntegerField', [], {'default': '1'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.projectoption': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.release': {
'Meta': {'unique_together': "(('project', 'version'),)", 'object_name': 'Release'},
'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_released': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_started': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'new_groups': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'ref': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.releasefile': {
'Meta': {'unique_together': "(('release', 'ident'),)", 'object_name': 'ReleaseFile'},
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'name': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.rule': {
'Meta': {'object_name': 'Rule'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.savedsearch': {
'Meta': {'unique_together': "(('project', 'name'),)", 'object_name': 'SavedSearch'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'query': ('django.db.models.fields.TextField', [], {})
},
'sentry.tagkey': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'TagKey', 'db_table': "'sentry_filterkey'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'values_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.tagvalue': {
'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'TagValue', 'db_table': "'sentry_filtervalue'"},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.team': {
'Meta': {'unique_together': "(('organization', 'slug'),)", 'object_name': 'Team'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'"},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_managed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
},
'sentry.useroption': {
'Meta': {'unique_together': "(('user', 'project', 'key'),)", 'object_name': 'UserOption'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.userreport': {
'Meta': {'object_name': 'UserReport', 'index_together': "(('project', 'event_id'),)"},
'comments': ('django.db.models.fields.TextField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
}
}
complete_apps = ['sentry'] | bsd-3-clause |
paule32/keyBase | tools/classes/shapedetector.py | 2 | 1062 | # import the necessary packages
import cv2
class ShapeDetector:
def __init__(self):
pass
def detect(self, c):
# initialize the shape name and approximate the contour
shape = "unidentified"
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.04 * peri, True)
# if the shape is a triangle, it will have 3 vertices
if len(approx) == 3:
shape = "triangle"
# if the shape has 4 vertices, it is either a square or
# a rectangle
elif len(approx) == 4:
# compute the bounding box of the contour and use the
# bounding box to compute the aspect ratio
(x, y, w, h) = cv2.boundingRect(approx)
ar = w / float(h)
# a square will have an aspect ratio that is approximately
# equal to one, otherwise, the shape is a rectangle
shape = "square" if ar >= 0.95 and ar <= 1.05 else "rectangle"
# if the shape is a pentagon, it will have 5 vertices
elif len(approx) == 5:
shape = "pentagon"
# otherwise, we assume the shape is a circle
else:
shape = "circle"
# return the name of the shape
return shape | mit |
vsajip/django | django/core/serializers/xml_serializer.py | 1 | 11688 | """
XML serializer.
"""
from __future__ import unicode_literals
from django.conf import settings
from django.core.serializers import base
from django.db import models, DEFAULT_DB_ALIAS
from django.utils.xmlutils import SimplerXMLGenerator
from django.utils.encoding import smart_unicode
from xml.dom import pulldom
class Serializer(base.Serializer):
"""
Serializes a QuerySet to XML.
"""
def indent(self, level):
if self.options.get('indent', None) is not None:
self.xml.ignorableWhitespace('\n' + ' ' * self.options.get('indent', None) * level)
def start_serialization(self):
"""
Start serialization -- open the XML document and the root element.
"""
self.xml = SimplerXMLGenerator(self.stream, self.options.get("encoding", settings.DEFAULT_CHARSET))
self.xml.startDocument()
self.xml.startElement("django-objects", {"version" : "1.0"})
def end_serialization(self):
"""
End serialization -- end the document.
"""
self.indent(0)
self.xml.endElement("django-objects")
self.xml.endDocument()
def start_object(self, obj):
"""
Called as each object is handled.
"""
if not hasattr(obj, "_meta"):
raise base.SerializationError("Non-model object (%s) encountered during serialization" % type(obj))
self.indent(1)
obj_pk = obj._get_pk_val()
if obj_pk is None:
attrs = {"model": smart_unicode(obj._meta),}
else:
attrs = {
"pk": smart_unicode(obj._get_pk_val()),
"model": smart_unicode(obj._meta),
}
self.xml.startElement("object", attrs)
def end_object(self, obj):
"""
Called after handling all fields for an object.
"""
self.indent(1)
self.xml.endElement("object")
def handle_field(self, obj, field):
"""
Called to handle each field on an object (except for ForeignKeys and
ManyToManyFields)
"""
self.indent(2)
self.xml.startElement("field", {
"name" : field.name,
"type" : field.get_internal_type()
})
# Get a "string version" of the object's data.
if getattr(obj, field.name) is not None:
self.xml.characters(field.value_to_string(obj))
else:
self.xml.addQuickElement("None")
self.xml.endElement("field")
def handle_fk_field(self, obj, field):
"""
Called to handle a ForeignKey (we need to treat them slightly
differently from regular fields).
"""
self._start_relational_field(field)
related_att = getattr(obj, field.get_attname())
if related_att is not None:
if self.use_natural_keys and hasattr(field.rel.to, 'natural_key'):
related = getattr(obj, field.name)
# If related object has a natural key, use it
related = related.natural_key()
# Iterable natural keys are rolled out as subelements
for key_value in related:
self.xml.startElement("natural", {})
self.xml.characters(smart_unicode(key_value))
self.xml.endElement("natural")
else:
self.xml.characters(smart_unicode(related_att))
else:
self.xml.addQuickElement("None")
self.xml.endElement("field")
def handle_m2m_field(self, obj, field):
"""
Called to handle a ManyToManyField. Related objects are only
serialized as references to the object's PK (i.e. the related *data*
is not dumped, just the relation).
"""
if field.rel.through._meta.auto_created:
self._start_relational_field(field)
if self.use_natural_keys and hasattr(field.rel.to, 'natural_key'):
# If the objects in the m2m have a natural key, use it
def handle_m2m(value):
natural = value.natural_key()
# Iterable natural keys are rolled out as subelements
self.xml.startElement("object", {})
for key_value in natural:
self.xml.startElement("natural", {})
self.xml.characters(smart_unicode(key_value))
self.xml.endElement("natural")
self.xml.endElement("object")
else:
def handle_m2m(value):
self.xml.addQuickElement("object", attrs={
'pk' : smart_unicode(value._get_pk_val())
})
for relobj in getattr(obj, field.name).iterator():
handle_m2m(relobj)
self.xml.endElement("field")
def _start_relational_field(self, field):
"""
Helper to output the <field> element for relational fields
"""
self.indent(2)
self.xml.startElement("field", {
"name" : field.name,
"rel" : field.rel.__class__.__name__,
"to" : smart_unicode(field.rel.to._meta),
})
class Deserializer(base.Deserializer):
"""
Deserialize XML.
"""
def __init__(self, stream_or_string, **options):
super(Deserializer, self).__init__(stream_or_string, **options)
self.event_stream = pulldom.parse(self.stream)
self.db = options.pop('using', DEFAULT_DB_ALIAS)
def next(self):
for event, node in self.event_stream:
if event == "START_ELEMENT" and node.nodeName == "object":
self.event_stream.expandNode(node)
return self._handle_object(node)
raise StopIteration
__next__ = next
def _handle_object(self, node):
"""
Convert an <object> node to a DeserializedObject.
"""
# Look up the model using the model loading mechanism. If this fails,
# bail.
Model = self._get_model_from_node(node, "model")
# Start building a data dictionary from the object.
# If the node is missing the pk set it to None
if node.hasAttribute("pk"):
pk = node.getAttribute("pk")
else:
pk = None
data = {Model._meta.pk.attname : Model._meta.pk.to_python(pk)}
# Also start building a dict of m2m data (this is saved as
# {m2m_accessor_attribute : [list_of_related_objects]})
m2m_data = {}
# Deseralize each field.
for field_node in node.getElementsByTagName("field"):
# If the field is missing the name attribute, bail (are you
# sensing a pattern here?)
field_name = field_node.getAttribute("name")
if not field_name:
raise base.DeserializationError("<field> node is missing the 'name' attribute")
# Get the field from the Model. This will raise a
# FieldDoesNotExist if, well, the field doesn't exist, which will
# be propagated correctly.
field = Model._meta.get_field(field_name)
# As is usually the case, relation fields get the special treatment.
if field.rel and isinstance(field.rel, models.ManyToManyRel):
m2m_data[field.name] = self._handle_m2m_field_node(field_node, field)
elif field.rel and isinstance(field.rel, models.ManyToOneRel):
data[field.attname] = self._handle_fk_field_node(field_node, field)
else:
if field_node.getElementsByTagName('None'):
value = None
else:
value = field.to_python(getInnerText(field_node).strip())
data[field.name] = value
# Return a DeserializedObject so that the m2m data has a place to live.
return base.DeserializedObject(Model(**data), m2m_data)
def _handle_fk_field_node(self, node, field):
"""
Handle a <field> node for a ForeignKey
"""
# Check if there is a child node named 'None', returning None if so.
if node.getElementsByTagName('None'):
return None
else:
if hasattr(field.rel.to._default_manager, 'get_by_natural_key'):
keys = node.getElementsByTagName('natural')
if keys:
# If there are 'natural' subelements, it must be a natural key
field_value = [getInnerText(k).strip() for k in keys]
obj = field.rel.to._default_manager.db_manager(self.db).get_by_natural_key(*field_value)
obj_pk = getattr(obj, field.rel.field_name)
# If this is a natural foreign key to an object that
# has a FK/O2O as the foreign key, use the FK value
if field.rel.to._meta.pk.rel:
obj_pk = obj_pk.pk
else:
# Otherwise, treat like a normal PK
field_value = getInnerText(node).strip()
obj_pk = field.rel.to._meta.get_field(field.rel.field_name).to_python(field_value)
return obj_pk
else:
field_value = getInnerText(node).strip()
return field.rel.to._meta.get_field(field.rel.field_name).to_python(field_value)
def _handle_m2m_field_node(self, node, field):
"""
Handle a <field> node for a ManyToManyField.
"""
if hasattr(field.rel.to._default_manager, 'get_by_natural_key'):
def m2m_convert(n):
keys = n.getElementsByTagName('natural')
if keys:
# If there are 'natural' subelements, it must be a natural key
field_value = [getInnerText(k).strip() for k in keys]
obj_pk = field.rel.to._default_manager.db_manager(self.db).get_by_natural_key(*field_value).pk
else:
# Otherwise, treat like a normal PK value.
obj_pk = field.rel.to._meta.pk.to_python(n.getAttribute('pk'))
return obj_pk
else:
m2m_convert = lambda n: field.rel.to._meta.pk.to_python(n.getAttribute('pk'))
return [m2m_convert(c) for c in node.getElementsByTagName("object")]
def _get_model_from_node(self, node, attr):
"""
Helper to look up a model from a <object model=...> or a <field
rel=... to=...> node.
"""
model_identifier = node.getAttribute(attr)
if not model_identifier:
raise base.DeserializationError(
"<%s> node is missing the required '%s' attribute" \
% (node.nodeName, attr))
try:
Model = models.get_model(*model_identifier.split("."))
except TypeError:
Model = None
if Model is None:
raise base.DeserializationError(
"<%s> node has invalid model identifier: '%s'" % \
(node.nodeName, model_identifier))
return Model
def getInnerText(node):
"""
Get all the inner text of a DOM node (recursively).
"""
# inspired by http://mail.python.org/pipermail/xml-sig/2005-March/011022.html
inner_text = []
for child in node.childNodes:
if child.nodeType == child.TEXT_NODE or child.nodeType == child.CDATA_SECTION_NODE:
inner_text.append(child.data)
elif child.nodeType == child.ELEMENT_NODE:
inner_text.extend(getInnerText(child))
else:
pass
return "".join(inner_text)
| bsd-3-clause |
Endika/odoomrp-utils | l10n_eu_product_adr_report/__openerp__.py | 12 | 1603 | # -*- encoding: utf-8 -*-
##############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
{
"name": "ADR Product report",
"version": "1.0",
"depends": [
"product",
],
"author": "OdooMRP team, "
"AvanzOSC, "
"Serv. Tecnol. Avanzados - Pedro M. Baeza",
"contributors": [
"Juan Ignacio Úbeda <juanignacioubeda@avanzosc.es>",
"Oihane Crucelaegui <oihanecrucelaegi@avanzosc.es>",
"Pedro M. Baeza <pedro.baeza@serviciosbaeza.com>",
"Ana Juaristi <anajuaristi@avanzosc.es>"
],
"category": "Product Management",
"website": "http://www.odoomrp.com",
"summary": "",
"data": [
"views/product_view.xml",
"report/product_adr_report.xml",
"security/ir.model.access.csv",
],
"installable": True,
}
| agpl-3.0 |
chaluemwut/fbserver | venv/lib/python2.7/site-packages/scipy/stats/morestats.py | 7 | 78330 | # Author: Travis Oliphant, 2002
#
# Further updates and enhancements by many SciPy developers.
#
from __future__ import division, print_function, absolute_import
import math
import warnings
import numpy as np
from numpy import (isscalar, r_, log, sum, around, unique, asarray,
zeros, arange, sort, amin, amax, any, atleast_1d, sqrt, ceil,
floor, array, poly1d, compress, not_equal, pi, exp, ravel, angle)
from numpy.testing.decorators import setastest
from scipy.lib.six import string_types
from scipy.lib._numpy_compat import count_nonzero
from scipy import optimize
from scipy import special
from . import statlib
from . import stats
from .stats import find_repeats
from .contingency import chi2_contingency
from . import distributions
from ._distn_infrastructure import rv_generic
__all__ = ['mvsdist',
'bayes_mvs', 'kstat', 'kstatvar', 'probplot', 'ppcc_max', 'ppcc_plot',
'boxcox_llf', 'boxcox', 'boxcox_normmax', 'boxcox_normplot',
'shapiro', 'anderson', 'ansari', 'bartlett', 'levene', 'binom_test',
'fligner', 'mood', 'wilcoxon', 'median_test',
'pdf_fromgamma', 'circmean', 'circvar', 'circstd', 'anderson_ksamp'
]
def bayes_mvs(data, alpha=0.90):
"""
Bayesian confidence intervals for the mean, var, and std.
Parameters
----------
data : array_like
Input data, if multi-dimensional it is flattened to 1-D by `bayes_mvs`.
Requires 2 or more data points.
alpha : float, optional
Probability that the returned confidence interval contains
the true parameter.
Returns
-------
mean_cntr, var_cntr, std_cntr : tuple
The three results are for the mean, variance and standard deviation,
respectively. Each result is a tuple of the form::
(center, (lower, upper))
with `center` the mean of the conditional pdf of the value given the
data, and `(lower, upper)` a confidence interval, centered on the
median, containing the estimate to a probability `alpha`.
Notes
-----
Each tuple of mean, variance, and standard deviation estimates represent
the (center, (lower, upper)) with center the mean of the conditional pdf
of the value given the data and (lower, upper) is a confidence interval
centered on the median, containing the estimate to a probability
`alpha`.
Converts data to 1-D and assumes all data has the same mean and variance.
Uses Jeffrey's prior for variance and std.
Equivalent to tuple((x.mean(), x.interval(alpha)) for x in mvsdist(dat))
References
----------
T.E. Oliphant, "A Bayesian perspective on estimating mean, variance, and
standard-deviation from data", http://hdl.handle.net/1877/438, 2006.
"""
res = mvsdist(data)
if alpha >= 1 or alpha <= 0:
raise ValueError("0 < alpha < 1 is required, but alpha=%s was given." % alpha)
return tuple((x.mean(), x.interval(alpha)) for x in res)
def mvsdist(data):
"""
'Frozen' distributions for mean, variance, and standard deviation of data.
Parameters
----------
data : array_like
Input array. Converted to 1-D using ravel.
Requires 2 or more data-points.
Returns
-------
mdist : "frozen" distribution object
Distribution object representing the mean of the data
vdist : "frozen" distribution object
Distribution object representing the variance of the data
sdist : "frozen" distribution object
Distribution object representing the standard deviation of the data
Notes
-----
The return values from bayes_mvs(data) is equivalent to
``tuple((x.mean(), x.interval(0.90)) for x in mvsdist(data))``.
In other words, calling ``<dist>.mean()`` and ``<dist>.interval(0.90)``
on the three distribution objects returned from this function will give
the same results that are returned from `bayes_mvs`.
Examples
--------
>>> from scipy.stats import mvsdist
>>> data = [6, 9, 12, 7, 8, 8, 13]
>>> mean, var, std = mvsdist(data)
We now have frozen distribution objects "mean", "var" and "std" that we can
examine:
>>> mean.mean()
9.0
>>> mean.interval(0.95)
(6.6120585482655692, 11.387941451734431)
>>> mean.std()
1.1952286093343936
"""
x = ravel(data)
n = len(x)
if (n < 2):
raise ValueError("Need at least 2 data-points.")
xbar = x.mean()
C = x.var()
if (n > 1000): # gaussian approximations for large n
mdist = distributions.norm(loc=xbar, scale=math.sqrt(C/n))
sdist = distributions.norm(loc=math.sqrt(C), scale=math.sqrt(C/(2.*n)))
vdist = distributions.norm(loc=C, scale=math.sqrt(2.0/n)*C)
else:
nm1 = n-1
fac = n*C/2.
val = nm1/2.
mdist = distributions.t(nm1,loc=xbar,scale=math.sqrt(C/nm1))
sdist = distributions.gengamma(val,-2,scale=math.sqrt(fac))
vdist = distributions.invgamma(val,scale=fac)
return mdist, vdist, sdist
def kstat(data,n=2):
"""
Return the nth k-statistic (1<=n<=4 so far).
The nth k-statistic is the unique symmetric unbiased estimator of the nth
cumulant kappa_n.
Parameters
----------
data : array_like
Input array.
n : int, {1, 2, 3, 4}, optional
Default is equal to 2.
Returns
-------
kstat : float
The nth k-statistic.
See Also
--------
kstatvar: Returns an unbiased estimator of the variance of the k-statistic.
Notes
-----
The cumulants are related to central moments but are specifically defined
using a power series expansion of the logarithm of the characteristic
function (which is the Fourier transform of the PDF).
In particular let phi(t) be the characteristic function, then::
ln phi(t) = > kappa_n (it)^n / n! (sum from n=0 to inf)
The first few cumulants (kappa_n) in terms of central moments (mu_n) are::
kappa_1 = mu_1
kappa_2 = mu_2
kappa_3 = mu_3
kappa_4 = mu_4 - 3*mu_2**2
kappa_5 = mu_5 - 10*mu_2 * mu_3
References
----------
http://mathworld.wolfram.com/k-Statistic.html
http://mathworld.wolfram.com/Cumulant.html
"""
if n > 4 or n < 1:
raise ValueError("k-statistics only supported for 1<=n<=4")
n = int(n)
S = zeros(n+1,'d')
data = ravel(data)
N = len(data)
for k in range(1,n+1):
S[k] = sum(data**k,axis=0)
if n == 1:
return S[1]*1.0/N
elif n == 2:
return (N*S[2]-S[1]**2.0)/(N*(N-1.0))
elif n == 3:
return (2*S[1]**3 - 3*N*S[1]*S[2]+N*N*S[3]) / (N*(N-1.0)*(N-2.0))
elif n == 4:
return (-6*S[1]**4 + 12*N*S[1]**2 * S[2] - 3*N*(N-1.0)*S[2]**2 -
4*N*(N+1)*S[1]*S[3] + N*N*(N+1)*S[4]) / \
(N*(N-1.0)*(N-2.0)*(N-3.0))
else:
raise ValueError("Should not be here.")
def kstatvar(data,n=2):
"""
Returns an unbiased estimator of the variance of the k-statistic.
See `kstat` for more details of the k-statistic.
Parameters
----------
data : array_like
Input array.
n : int, {1, 2}, optional
Default is equal to 2.
Returns
-------
kstatvar : float
The nth k-statistic variance.
See Also
--------
kstat
"""
data = ravel(data)
N = len(data)
if n == 1:
return kstat(data,n=2)*1.0/N
elif n == 2:
k2 = kstat(data,n=2)
k4 = kstat(data,n=4)
return (2*k2*k2*N + (N-1)*k4)/(N*(N+1))
else:
raise ValueError("Only n=1 or n=2 supported.")
def _calc_uniform_order_statistic_medians(x):
"""See Notes section of `probplot` for details."""
N = len(x)
osm_uniform = np.zeros(N, dtype=np.float64)
osm_uniform[-1] = 0.5**(1.0 / N)
osm_uniform[0] = 1 - osm_uniform[-1]
i = np.arange(2, N)
osm_uniform[1:-1] = (i - 0.3175) / (N + 0.365)
return osm_uniform
def _parse_dist_kw(dist, enforce_subclass=True):
"""Parse `dist` keyword.
Parameters
----------
dist : str or stats.distributions instance.
Several functions take `dist` as a keyword, hence this utility
function.
enforce_subclass : bool, optional
If True (default), `dist` needs to be a
`_distn_infrastructure.rv_generic` instance.
It can sometimes be useful to set this keyword to False, if a function
wants to accept objects that just look somewhat like such an instance
(for example, they have a ``ppf`` method).
"""
if isinstance(dist, rv_generic):
pass
elif isinstance(dist, string_types):
try:
dist = getattr(distributions, dist)
except AttributeError:
raise ValueError("%s is not a valid distribution name" % dist)
elif enforce_subclass:
msg = ("`dist` should be a stats.distributions instance or a string "
"with the name of such a distribution.")
raise ValueError(msg)
return dist
def probplot(x, sparams=(), dist='norm', fit=True, plot=None):
"""
Calculate quantiles for a probability plot, and optionally show the plot.
Generates a probability plot of sample data against the quantiles of a
specified theoretical distribution (the normal distribution by default).
`probplot` optionally calculates a best-fit line for the data and plots the
results using Matplotlib or a given plot function.
Parameters
----------
x : array_like
Sample/response data from which `probplot` creates the plot.
sparams : tuple, optional
Distribution-specific shape parameters (shape parameters plus location
and scale).
dist : str or stats.distributions instance, optional
Distribution or distribution function name. The default is 'norm' for a
normal probability plot. Objects that look enough like a
stats.distributions instance (i.e. they have a ``ppf`` method) are also
accepted.
fit : bool, optional
Fit a least-squares regression (best-fit) line to the sample data if
True (default).
plot : object, optional
If given, plots the quantiles and least squares fit.
`plot` is an object that has to have methods "plot" and "text".
The `matplotlib.pyplot` module or a Matplotlib Axes object can be used,
or a custom object with the same methods.
Default is None, which means that no plot is created.
Returns
-------
(osm, osr) : tuple of ndarrays
Tuple of theoretical quantiles (osm, or order statistic medians) and
ordered responses (osr). `osr` is simply sorted input `x`.
For details on how `osm` is calculated see the Notes section.
(slope, intercept, r) : tuple of floats, optional
Tuple containing the result of the least-squares fit, if that is
performed by `probplot`. `r` is the square root of the coefficient of
determination. If ``fit=False`` and ``plot=None``, this tuple is not
returned.
Notes
-----
Even if `plot` is given, the figure is not shown or saved by `probplot`;
``plt.show()`` or ``plt.savefig('figname.png')`` should be used after
calling `probplot`.
`probplot` generates a probability plot, which should not be confused with
a Q-Q or a P-P plot. Statsmodels has more extensive functionality of this
type, see ``statsmodels.api.ProbPlot``.
The formula used for the theoretical quantiles (horizontal axis of the
probability plot) is Filliben's estimate::
quantiles = dist.ppf(val), for
0.5**(1/n), for i = n
val = (i - 0.3175) / (n + 0.365), for i = 2, ..., n-1
1 - 0.5**(1/n), for i = 1
where ``i`` indicates the i-th ordered value and ``n`` is the total number
of values.
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> nsample = 100
>>> np.random.seed(7654321)
A t distribution with small degrees of freedom:
>>> ax1 = plt.subplot(221)
>>> x = stats.t.rvs(3, size=nsample)
>>> res = stats.probplot(x, plot=plt)
A t distribution with larger degrees of freedom:
>>> ax2 = plt.subplot(222)
>>> x = stats.t.rvs(25, size=nsample)
>>> res = stats.probplot(x, plot=plt)
A mixture of two normal distributions with broadcasting:
>>> ax3 = plt.subplot(223)
>>> x = stats.norm.rvs(loc=[0,5], scale=[1,1.5],
... size=(nsample/2.,2)).ravel()
>>> res = stats.probplot(x, plot=plt)
A standard normal distribution:
>>> ax4 = plt.subplot(224)
>>> x = stats.norm.rvs(loc=0, scale=1, size=nsample)
>>> res = stats.probplot(x, plot=plt)
Produce a new figure with a loggamma distribution, using the ``dist`` and
``sparams`` keywords:
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> x = stats.loggamma.rvs(c=2.5, size=500)
>>> stats.probplot(x, dist=stats.loggamma, sparams=(2.5,), plot=ax)
>>> ax.set_title("Probplot for loggamma dist with shape parameter 2.5")
Show the results with Matplotlib:
>>> plt.show()
"""
x = np.asarray(x)
osm_uniform = _calc_uniform_order_statistic_medians(x)
dist = _parse_dist_kw(dist, enforce_subclass=False)
if sparams is None:
sparams = ()
if isscalar(sparams):
sparams = (sparams,)
if not isinstance(sparams, tuple):
sparams = tuple(sparams)
osm = dist.ppf(osm_uniform, *sparams)
osr = sort(x)
if fit or (plot is not None):
# perform a linear fit.
slope, intercept, r, prob, sterrest = stats.linregress(osm, osr)
if plot is not None:
plot.plot(osm, osr, 'bo', osm, slope*osm + intercept, 'r-')
try:
if hasattr(plot, 'set_title'):
# Matplotlib Axes instance or something that looks like it
plot.set_title('Probability Plot')
plot.set_xlabel('Quantiles')
plot.set_ylabel('Ordered Values')
else:
# matplotlib.pyplot module
plot.title('Probability Plot')
plot.xlabel('Quantiles')
plot.ylabel('Ordered Values')
except:
# Not an MPL object or something that looks (enough) like it.
# Don't crash on adding labels or title
pass
# Add R^2 value to the plot as text
xmin = amin(osm)
xmax = amax(osm)
ymin = amin(x)
ymax = amax(x)
posx = xmin + 0.70 * (xmax - xmin)
posy = ymin + 0.01 * (ymax - ymin)
plot.text(posx, posy, "$R^2=%1.4f$" % r**2)
if fit:
return (osm, osr), (slope, intercept, r)
else:
return osm, osr
def ppcc_max(x, brack=(0.0,1.0), dist='tukeylambda'):
"""Returns the shape parameter that maximizes the probability plot
correlation coefficient for the given data to a one-parameter
family of distributions.
See also ppcc_plot
"""
dist = _parse_dist_kw(dist)
osm_uniform = _calc_uniform_order_statistic_medians(x)
osr = sort(x)
# this function computes the x-axis values of the probability plot
# and computes a linear regression (including the correlation)
# and returns 1-r so that a minimization function maximizes the
# correlation
def tempfunc(shape, mi, yvals, func):
xvals = func(mi, shape)
r, prob = stats.pearsonr(xvals, yvals)
return 1-r
return optimize.brent(tempfunc, brack=brack, args=(osm_uniform, osr, dist.ppf))
def ppcc_plot(x,a,b,dist='tukeylambda', plot=None, N=80):
"""Returns (shape, ppcc), and optionally plots shape vs. ppcc
(probability plot correlation coefficient) as a function of shape
parameter for a one-parameter family of distributions from shape
value a to b.
See also ppcc_max
"""
svals = r_[a:b:complex(N)]
ppcc = svals*0.0
k = 0
for sval in svals:
r1,r2 = probplot(x,sval,dist=dist,fit=1)
ppcc[k] = r2[-1]
k += 1
if plot is not None:
plot.plot(svals, ppcc, 'x')
plot.title('(%s) PPCC Plot' % dist)
plot.xlabel('Prob Plot Corr. Coef.')
plot.ylabel('Shape Values')
return svals, ppcc
def boxcox_llf(lmb, data):
r"""The boxcox log-likelihood function.
Parameters
----------
lmb : scalar
Parameter for Box-Cox transformation. See `boxcox` for details.
data : array_like
Data to calculate Box-Cox log-likelihood for. If `data` is
multi-dimensional, the log-likelihood is calculated along the first
axis.
Returns
-------
llf : float or ndarray
Box-Cox log-likelihood of `data` given `lmb`. A float for 1-D `data`,
an array otherwise.
See Also
--------
boxcox, probplot, boxcox_normplot, boxcox_normmax
Notes
-----
The Box-Cox log-likelihood function is defined here as
.. math::
llf = (\lambda - 1) \sum_i(\log(x_i)) -
N/2 \log(\sum_i (y_i - \bar{y})^2 / N),
where ``y`` is the Box-Cox transformed input data ``x``.
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> from mpl_toolkits.axes_grid1.inset_locator import inset_axes
>>> np.random.seed(1245)
Generate some random variates and calculate Box-Cox log-likelihood values
for them for a range of ``lmbda`` values:
>>> x = stats.loggamma.rvs(5, loc=10, size=1000)
>>> lmbdas = np.linspace(-2, 10)
>>> llf = np.zeros(lmbdas.shape, dtype=np.float)
>>> for ii, lmbda in enumerate(lmbdas):
... llf[ii] = stats.boxcox_llf(lmbda, x)
Also find the optimal lmbda value with `boxcox`:
>>> x_most_normal, lmbda_optimal = stats.boxcox(x)
Plot the log-likelihood as function of lmbda. Add the optimal lmbda as a
horizontal line to check that that's really the optimum:
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(lmbdas, llf, 'b.-')
>>> ax.axhline(stats.boxcox_llf(lmbda_optimal, x), color='r')
>>> ax.set_xlabel('lmbda parameter')
>>> ax.set_ylabel('Box-Cox log-likelihood')
Now add some probability plots to show that where the log-likelihood is
maximized the data transformed with `boxcox` looks closest to normal:
>>> locs = [3, 10, 4] # 'lower left', 'center', 'lower right'
>>> for lmbda, loc in zip([-1, lmbda_optimal, 9], locs):
... xt = stats.boxcox(x, lmbda=lmbda)
... (osm, osr), (slope, intercept, r_sq) = stats.probplot(xt)
... ax_inset = inset_axes(ax, width="20%", height="20%", loc=loc)
... ax_inset.plot(osm, osr, 'c.', osm, slope*osm + intercept, 'k-')
... ax_inset.set_xticklabels([])
... ax_inset.set_yticklabels([])
... ax_inset.set_title('$\lambda=%1.2f$' % lmbda)
>>> plt.show()
"""
data = np.asarray(data)
N = data.shape[0]
if N == 0:
return np.nan
y = boxcox(data, lmb)
y_mean = np.mean(y, axis=0)
llf = (lmb - 1) * np.sum(np.log(data), axis=0)
llf -= N / 2.0 * np.log(np.sum((y - y_mean)**2. / N, axis=0))
return llf
def _boxcox_conf_interval(x, lmax, alpha):
# Need to find the lambda for which
# f(x,lmbda) >= f(x,lmax) - 0.5*chi^2_alpha;1
fac = 0.5 * distributions.chi2.ppf(1 - alpha, 1)
target = boxcox_llf(lmax, x) - fac
def rootfunc(lmbda, data, target):
return boxcox_llf(lmbda, data) - target
# Find positive endpoint of interval in which answer is to be found
newlm = lmax + 0.5
N = 0
while (rootfunc(newlm, x, target) > 0.0) and (N < 500):
newlm += 0.1
N += 1
if N == 500:
raise RuntimeError("Could not find endpoint.")
lmplus = optimize.brentq(rootfunc, lmax, newlm, args=(x, target))
# Now find negative interval in the same way
newlm = lmax - 0.5
N = 0
while (rootfunc(newlm, x, target) > 0.0) and (N < 500):
newlm -= 0.1
N += 1
if N == 500:
raise RuntimeError("Could not find endpoint.")
lmminus = optimize.brentq(rootfunc, newlm, lmax, args=(x, target))
return lmminus, lmplus
def boxcox(x, lmbda=None, alpha=None):
r"""
Return a positive dataset transformed by a Box-Cox power transformation.
Parameters
----------
x : ndarray
Input array. Should be 1-dimensional.
lmbda : {None, scalar}, optional
If `lmbda` is not None, do the transformation for that value.
If `lmbda` is None, find the lambda that maximizes the log-likelihood
function and return it as the second output argument.
alpha : {None, float}, optional
If `alpha` is not None, return the ``100 * (1-alpha)%`` confidence
interval for `lmbda` as the third output argument.
Must be between 0.0 and 1.0.
Returns
-------
boxcox : ndarray
Box-Cox power transformed array.
maxlog : float, optional
If the `lmbda` parameter is None, the second returned argument is
the lambda that maximizes the log-likelihood function.
(min_ci, max_ci) : tuple of float, optional
If `lmbda` parameter is None and `alpha` is not None, this returned
tuple of floats represents the minimum and maximum confidence limits
given `alpha`.
See Also
--------
probplot, boxcox_normplot, boxcox_normmax, boxcox_llf
Notes
-----
The Box-Cox transform is given by::
y = (x**lmbda - 1) / lmbda, for lmbda > 0
log(x), for lmbda = 0
`boxcox` requires the input data to be positive. Sometimes a Box-Cox
transformation provides a shift parameter to achieve this; `boxcox` does
not. Such a shift parameter is equivalent to adding a positive constant to
`x` before calling `boxcox`.
The confidence limits returned when `alpha` is provided give the interval
where:
.. math::
llf(\hat{\lambda}) - llf(\lambda) < \frac{1}{2}\chi^2(1 - \alpha, 1),
with ``llf`` the log-likelihood function and :math:`\chi^2` the chi-squared
function.
References
----------
G.E.P. Box and D.R. Cox, "An Analysis of Transformations", Journal of the
Royal Statistical Society B, 26, 211-252 (1964).
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
We generate some random variates from a non-normal distribution and make a
probability plot for it, to show it is non-normal in the tails:
>>> fig = plt.figure()
>>> ax1 = fig.add_subplot(211)
>>> x = stats.loggamma.rvs(5, size=500) + 5
>>> stats.probplot(x, dist=stats.norm, plot=ax1)
>>> ax1.set_xlabel('')
>>> ax1.set_title('Probplot against normal distribution')
We now use `boxcox` to transform the data so it's closest to normal:
>>> ax2 = fig.add_subplot(212)
>>> xt, _ = stats.boxcox(x)
>>> stats.probplot(xt, dist=stats.norm, plot=ax2)
>>> ax2.set_title('Probplot after Box-Cox transformation')
>>> plt.show()
"""
x = np.asarray(x)
if x.size == 0:
return x
if any(x <= 0):
raise ValueError("Data must be positive.")
if lmbda is not None: # single transformation
return special.boxcox(x, lmbda)
# If lmbda=None, find the lmbda that maximizes the log-likelihood function.
lmax = boxcox_normmax(x, method='mle')
y = boxcox(x, lmax)
if alpha is None:
return y, lmax
else:
# Find confidence interval
interval = _boxcox_conf_interval(x, lmax, alpha)
return y, lmax, interval
def boxcox_normmax(x, brack=(-2.0, 2.0), method='pearsonr'):
"""Compute optimal Box-Cox transform parameter for input data.
Parameters
----------
x : array_like
Input array.
brack : 2-tuple, optional
The starting interval for a downhill bracket search with
`optimize.brent`. Note that this is in most cases not critical; the
final result is allowed to be outside this bracket.
method : str, optional
The method to determine the optimal transform parameter (`boxcox`
``lmbda`` parameter). Options are:
'pearsonr' (default)
Maximizes the Pearson correlation coefficient between
``y = boxcox(x)`` and the expected values for ``y`` if `x` would be
normally-distributed.
'mle'
Minimizes the log-likelihood `boxcox_llf`. This is the method used
in `boxcox`.
'all'
Use all optimization methods available, and return all results.
Useful to compare different methods.
Returns
-------
maxlog : float or ndarray
The optimal transform parameter found. An array instead of a scalar
for ``method='all'``.
See Also
--------
boxcox, boxcox_llf, boxcox_normplot
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> np.random.seed(1234) # make this example reproducible
Generate some data and determine optimal ``lmbda`` in various ways:
>>> x = stats.loggamma.rvs(5, size=30) + 5
>>> y, lmax_mle = stats.boxcox(x)
>>> lmax_pearsonr = stats.boxcox_normmax(x)
>>> lmax_mle
7.177...
>>> lmax_pearsonr
7.916...
>>> stats.boxcox_normmax(x, method='all')
array([ 7.91667384, 7.17718692])
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> stats.boxcox_normplot(x, -10, 10, plot=ax)
>>> ax.axvline(lmax_mle, color='r')
>>> ax.axvline(lmax_pearsonr, color='g', ls='--')
>>> plt.show()
"""
def _pearsonr(x, brack):
osm_uniform = _calc_uniform_order_statistic_medians(x)
xvals = distributions.norm.ppf(osm_uniform)
def _eval_pearsonr(lmbda, xvals, samps):
# This function computes the x-axis values of the probability plot
# and computes a linear regression (including the correlation) and
# returns ``1 - r`` so that a minimization function maximizes the
# correlation.
y = boxcox(samps, lmbda)
yvals = np.sort(y)
r, prob = stats.pearsonr(xvals, yvals)
return 1 - r
return optimize.brent(_eval_pearsonr, brack=brack, args=(xvals, x))
def _mle(x, brack):
def _eval_mle(lmb, data):
# function to minimize
return -boxcox_llf(lmb, data)
return optimize.brent(_eval_mle, brack=brack, args=(x,))
def _all(x, brack):
maxlog = np.zeros(2, dtype=np.float)
maxlog[0] = _pearsonr(x, brack)
maxlog[1] = _mle(x, brack)
return maxlog
methods = {'pearsonr': _pearsonr,
'mle': _mle,
'all': _all}
if method not in methods.keys():
raise ValueError("Method %s not recognized." % method)
optimfunc = methods[method]
return optimfunc(x, brack)
def boxcox_normplot(x, la, lb, plot=None, N=80):
"""Compute parameters for a Box-Cox normality plot, optionally show it.
A Box-Cox normality plot shows graphically what the best transformation
parameter is to use in `boxcox` to obtain a distribution that is close
to normal.
Parameters
----------
x : array_like
Input array.
la, lb : scalar
The lower and upper bounds for the ``lmbda`` values to pass to `boxcox`
for Box-Cox transformations. These are also the limits of the
horizontal axis of the plot if that is generated.
plot : object, optional
If given, plots the quantiles and least squares fit.
`plot` is an object that has to have methods "plot" and "text".
The `matplotlib.pyplot` module or a Matplotlib Axes object can be used,
or a custom object with the same methods.
Default is None, which means that no plot is created.
N : int, optional
Number of points on the horizontal axis (equally distributed from
`la` to `lb`).
Returns
-------
lmbdas : ndarray
The ``lmbda`` values for which a Box-Cox transform was done.
ppcc : ndarray
Probability Plot Correlelation Coefficient, as obtained from `probplot`
when fitting the Box-Cox transformed input `x` against a normal
distribution.
See Also
--------
probplot, boxcox, boxcox_normmax, boxcox_llf, ppcc_max
Notes
-----
Even if `plot` is given, the figure is not shown or saved by
`boxcox_normplot`; ``plt.show()`` or ``plt.savefig('figname.png')``
should be used after calling `probplot`.
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
Generate some non-normally distributed data, and create a Box-Cox plot:
>>> x = stats.loggamma.rvs(5, size=500) + 5
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> stats.boxcox_normplot(x, -20, 20, plot=ax)
Determine and plot the optimal ``lmbda`` to transform ``x`` and plot it in
the same plot:
>>> _, maxlog = stats.boxcox(x)
>>> ax.axvline(maxlog, color='r')
>>> plt.show()
"""
x = np.asarray(x)
if x.size == 0:
return x
if lb <= la:
raise ValueError("`lb` has to be larger than `la`.")
lmbdas = np.linspace(la, lb, num=N)
ppcc = lmbdas * 0.0
for i, val in enumerate(lmbdas):
# Determine for each lmbda the correlation coefficient of transformed x
z = boxcox(x, lmbda=val)
_, r2 = probplot(z, dist='norm', fit=True)
ppcc[i] = r2[-1]
if plot is not None:
plot.plot(lmbdas, ppcc, 'x')
try:
if hasattr(plot, 'set_title'):
# Matplotlib Axes instance or something that looks like it
plot.set_title('Box-Cox Normality Plot')
plot.set_ylabel('Prob Plot Corr. Coef.')
plot.set_xlabel('$\lambda$')
else:
# matplotlib.pyplot module
plot.title('Box-Cox Normality Plot')
plot.ylabel('Prob Plot Corr. Coef.')
plot.xlabel('$\lambda$')
except Exception:
# Not an MPL object or something that looks (enough) like it.
# Don't crash on adding labels or title
pass
return lmbdas, ppcc
def shapiro(x, a=None, reta=False):
"""
Perform the Shapiro-Wilk test for normality.
The Shapiro-Wilk test tests the null hypothesis that the
data was drawn from a normal distribution.
Parameters
----------
x : array_like
Array of sample data.
a : array_like, optional
Array of internal parameters used in the calculation. If these
are not given, they will be computed internally. If x has length
n, then a must have length n/2.
reta : bool, optional
Whether or not to return the internally computed a values. The
default is False.
Returns
-------
W : float
The test statistic.
p-value : float
The p-value for the hypothesis test.
a : array_like, optional
If `reta` is True, then these are the internally computed "a"
values that may be passed into this function on future calls.
See Also
--------
anderson : The Anderson-Darling test for normality
References
----------
.. [1] http://www.itl.nist.gov/div898/handbook/prc/section2/prc213.htm
"""
N = len(x)
if N < 3:
raise ValueError("Data must be at least length 3.")
if a is None:
a = zeros(N,'f')
init = 0
else:
if len(a) != N//2:
raise ValueError("len(a) must equal len(x)/2")
init = 1
y = sort(x)
a, w, pw, ifault = statlib.swilk(y, a[:N//2], init)
if ifault not in [0,2]:
warnings.warn(str(ifault))
if N > 5000:
warnings.warn("p-value may not be accurate for N > 5000.")
if reta:
return w, pw, a
else:
return w, pw
# Values from Stephens, M A, "EDF Statistics for Goodness of Fit and
# Some Comparisons", Journal of he American Statistical
# Association, Vol. 69, Issue 347, Sept. 1974, pp 730-737
_Avals_norm = array([0.576, 0.656, 0.787, 0.918, 1.092])
_Avals_expon = array([0.922, 1.078, 1.341, 1.606, 1.957])
# From Stephens, M A, "Goodness of Fit for the Extreme Value Distribution",
# Biometrika, Vol. 64, Issue 3, Dec. 1977, pp 583-588.
_Avals_gumbel = array([0.474, 0.637, 0.757, 0.877, 1.038])
# From Stephens, M A, "Tests of Fit for the Logistic Distribution Based
# on the Empirical Distribution Function.", Biometrika,
# Vol. 66, Issue 3, Dec. 1979, pp 591-595.
_Avals_logistic = array([0.426, 0.563, 0.660, 0.769, 0.906, 1.010])
def anderson(x,dist='norm'):
"""
Anderson-Darling test for data coming from a particular distribution
The Anderson-Darling test is a modification of the Kolmogorov-
Smirnov test `kstest` for the null hypothesis that a sample is
drawn from a population that follows a particular distribution.
For the Anderson-Darling test, the critical values depend on
which distribution is being tested against. This function works
for normal, exponential, logistic, or Gumbel (Extreme Value
Type I) distributions.
Parameters
----------
x : array_like
array of sample data
dist : {'norm','expon','logistic','gumbel','extreme1'}, optional
the type of distribution to test against. The default is 'norm'
and 'extreme1' is a synonym for 'gumbel'
Returns
-------
A2 : float
The Anderson-Darling test statistic
critical : list
The critical values for this distribution
sig : list
The significance levels for the corresponding critical values
in percents. The function returns critical values for a
differing set of significance levels depending on the
distribution that is being tested against.
Notes
-----
Critical values provided are for the following significance levels:
normal/exponenential
15%, 10%, 5%, 2.5%, 1%
logistic
25%, 10%, 5%, 2.5%, 1%, 0.5%
Gumbel
25%, 10%, 5%, 2.5%, 1%
If A2 is larger than these critical values then for the corresponding
significance level, the null hypothesis that the data come from the
chosen distribution can be rejected.
References
----------
.. [1] http://www.itl.nist.gov/div898/handbook/prc/section2/prc213.htm
.. [2] Stephens, M. A. (1974). EDF Statistics for Goodness of Fit and
Some Comparisons, Journal of the American Statistical Association,
Vol. 69, pp. 730-737.
.. [3] Stephens, M. A. (1976). Asymptotic Results for Goodness-of-Fit
Statistics with Unknown Parameters, Annals of Statistics, Vol. 4,
pp. 357-369.
.. [4] Stephens, M. A. (1977). Goodness of Fit for the Extreme Value
Distribution, Biometrika, Vol. 64, pp. 583-588.
.. [5] Stephens, M. A. (1977). Goodness of Fit with Special Reference
to Tests for Exponentiality , Technical Report No. 262,
Department of Statistics, Stanford University, Stanford, CA.
.. [6] Stephens, M. A. (1979). Tests of Fit for the Logistic Distribution
Based on the Empirical Distribution Function, Biometrika, Vol. 66,
pp. 591-595.
"""
if dist not in ['norm','expon','gumbel','extreme1','logistic']:
raise ValueError("Invalid distribution; dist must be 'norm', "
"'expon', 'gumbel', 'extreme1' or 'logistic'.")
y = sort(x)
xbar = np.mean(x, axis=0)
N = len(y)
if dist == 'norm':
s = np.std(x, ddof=1, axis=0)
w = (y-xbar)/s
z = distributions.norm.cdf(w)
sig = array([15,10,5,2.5,1])
critical = around(_Avals_norm / (1.0 + 4.0/N - 25.0/N/N),3)
elif dist == 'expon':
w = y / xbar
z = distributions.expon.cdf(w)
sig = array([15,10,5,2.5,1])
critical = around(_Avals_expon / (1.0 + 0.6/N),3)
elif dist == 'logistic':
def rootfunc(ab,xj,N):
a,b = ab
tmp = (xj-a)/b
tmp2 = exp(tmp)
val = [sum(1.0/(1+tmp2),axis=0)-0.5*N,
sum(tmp*(1.0-tmp2)/(1+tmp2),axis=0)+N]
return array(val)
sol0 = array([xbar,np.std(x, ddof=1, axis=0)])
sol = optimize.fsolve(rootfunc,sol0,args=(x,N),xtol=1e-5)
w = (y-sol[0])/sol[1]
z = distributions.logistic.cdf(w)
sig = array([25,10,5,2.5,1,0.5])
critical = around(_Avals_logistic / (1.0+0.25/N),3)
else: # (dist == 'gumbel') or (dist == 'extreme1'):
# the following is incorrect, see ticket:1097
#def fixedsolve(th,xj,N):
# val = stats.sum(xj)*1.0/N
# tmp = exp(-xj/th)
# term = sum(xj*tmp,axis=0)
# term /= sum(tmp,axis=0)
# return val - term
#s = optimize.fixed_point(fixedsolve, 1.0, args=(x,N),xtol=1e-5)
#xbar = -s*log(sum(exp(-x/s),axis=0)*1.0/N)
xbar, s = distributions.gumbel_l.fit(x)
w = (y-xbar)/s
z = distributions.gumbel_l.cdf(w)
sig = array([25,10,5,2.5,1])
critical = around(_Avals_gumbel / (1.0 + 0.2/sqrt(N)),3)
i = arange(1,N+1)
S = sum((2*i-1.0)/N*(log(z)+log(1-z[::-1])),axis=0)
A2 = -N-S
return A2, critical, sig
def _anderson_ksamp_midrank(samples, Z, Zstar, k, n, N):
"""
Compute A2akN equation 7 of Scholz and Stephens.
Parameters
----------
samples : sequence of 1-D array_like
Array of sample arrays.
Z : array_like
Sorted array of all observations.
Zstar : array_like
Sorted array of unique observations.
k : int
Number of samples.
n : array_like
Number of observations in each sample.
N : int
Total number of observations.
Returns
-------
A2aKN : float
The A2aKN statistics of Scholz and Stephens 1987.
"""
A2akN = 0.
Z_ssorted_left = Z.searchsorted(Zstar, 'left')
if N == Zstar.size:
lj = 1.
else:
lj = Z.searchsorted(Zstar, 'right') - Z_ssorted_left
Bj = Z_ssorted_left + lj / 2.
for i in arange(0, k):
s = np.sort(samples[i])
s_ssorted_right = s.searchsorted(Zstar, side='right')
Mij = s_ssorted_right.astype(np.float)
fij = s_ssorted_right - s.searchsorted(Zstar, 'left')
Mij -= fij / 2.
inner = lj / float(N) * (N * Mij - Bj * n[i])**2 / \
(Bj * (N - Bj) - N * lj / 4.)
A2akN += inner.sum() / n[i]
A2akN *= (N - 1.) / N
return A2akN
def _anderson_ksamp_right(samples, Z, Zstar, k, n, N):
"""
Compute A2akN equation 6 of Scholz & Stephens.
Parameters
----------
samples : sequence of 1-D array_like
Array of sample arrays.
Z : array_like
Sorted array of all observations.
Zstar : array_like
Sorted array of unique observations.
k : int
Number of samples.
n : array_like
Number of observations in each sample.
N : int
Total number of observations.
Returns
-------
A2KN : float
The A2KN statistics of Scholz and Stephens 1987.
"""
A2kN = 0.
lj = Z.searchsorted(Zstar[:-1], 'right') - Z.searchsorted(Zstar[:-1],
'left')
Bj = lj.cumsum()
for i in arange(0, k):
s = np.sort(samples[i])
Mij = s.searchsorted(Zstar[:-1], side='right')
inner = lj / float(N) * (N * Mij - Bj * n[i])**2 / (Bj * (N - Bj))
A2kN += inner.sum() / n[i]
return A2kN
def anderson_ksamp(samples, midrank=True):
"""The Anderson-Darling test for k-samples.
The k-sample Anderson-Darling test is a modification of the
one-sample Anderson-Darling test. It tests the null hypothesis
that k-samples are drawn from the same population without having
to specify the distribution function of that population. The
critical values depend on the number of samples.
Parameters
----------
samples : sequence of 1-D array_like
Array of sample data in arrays.
midrank : bool, optional
Type of Anderson-Darling test which is computed. Default
(True) is the midrank test applicable to continuous and
discrete populations. If False, the right side empirical
distribution is used.
Returns
-------
A2 : float
Normalized k-sample Anderson-Darling test statistic.
critical : array
The critical values for significance levels 25%, 10%, 5%, 2.5%, 1%.
p : float
An approximate significance level at which the null hypothesis for the
provided samples can be rejected.
Raises
------
ValueError
If less than 2 samples are provided, a sample is empty, or no
distinct observations are in the samples.
See Also
--------
ks_2samp : 2 sample Kolmogorov-Smirnov test
anderson : 1 sample Anderson-Darling test
Notes
-----
[1]_ Defines three versions of the k-sample Anderson-Darling test:
one for continuous distributions and two for discrete
distributions, in which ties between samples may occur. The
default of this routine is to compute the version based on the
midrank empirical distribution function. This test is applicable
to continuous and discrete data. If midrank is set to False, the
right side empirical distribution is used for a test for discrete
data. According to [1]_, the two discrete test statistics differ
only slightly if a few collisions due to round-off errors occur in
the test not adjusted for ties between samples.
.. versionadded:: 0.14.0
References
----------
.. [1] Scholz, F. W and Stephens, M. A. (1987), K-Sample
Anderson-Darling Tests, Journal of the American Statistical
Association, Vol. 82, pp. 918-924.
Examples
--------
>>> from scipy import stats
>>> np.random.seed(314159)
The null hypothesis that the two random samples come from the same
distribution can be rejected at the 5% level because the returned
test value is greater than the critical value for 5% (1.961) but
not at the 2.5% level. The interpolation gives an approximate
significance level of 3.1%:
>>> stats.anderson_ksamp([np.random.normal(size=50),
... np.random.normal(loc=0.5, size=30)])
(2.4615796189876105,
array([ 0.325, 1.226, 1.961, 2.718, 3.752]),
0.03134990135800783)
The null hypothesis cannot be rejected for three samples from an
identical distribution. The approximate p-value (87%) has to be
computed by extrapolation and may not be very accurate:
>>> stats.anderson_ksamp([np.random.normal(size=50),
... np.random.normal(size=30), np.random.normal(size=20)])
(-0.73091722665244196,
array([ 0.44925884, 1.3052767 , 1.9434184 , 2.57696569, 3.41634856]),
0.8789283903979661)
"""
k = len(samples)
if (k < 2):
raise ValueError("anderson_ksamp needs at least two samples")
samples = list(map(np.asarray, samples))
Z = np.sort(np.hstack(samples))
N = Z.size
Zstar = np.unique(Z)
if Zstar.size < 2:
raise ValueError("anderson_ksamp needs more than one distinct "
"observation")
n = np.array([sample.size for sample in samples])
if any(n == 0):
raise ValueError("anderson_ksamp encountered sample without "
"observations")
if midrank:
A2kN = _anderson_ksamp_midrank(samples, Z, Zstar, k, n, N)
else:
A2kN = _anderson_ksamp_right(samples, Z, Zstar, k, n, N)
h = (1. / arange(1, N)).sum()
H = (1. / n).sum()
g = 0
for l in arange(1, N-1):
inner = np.array([1. / ((N - l) * m) for m in arange(l+1, N)])
g += inner.sum()
a = (4*g - 6) * (k - 1) + (10 - 6*g)*H
b = (2*g - 4)*k**2 + 8*h*k + (2*g - 14*h - 4)*H - 8*h + 4*g - 6
c = (6*h + 2*g - 2)*k**2 + (4*h - 4*g + 6)*k + (2*h - 6)*H + 4*h
d = (2*h + 6)*k**2 - 4*h*k
sigmasq = (a*N**3 + b*N**2 + c*N + d) / ((N - 1.) * (N - 2.) * (N - 3.))
m = k - 1
A2 = (A2kN - m) / math.sqrt(sigmasq)
# The b_i values are the interpolation coefficients from Table 2
# of Scholz and Stephens 1987
b0 = np.array([0.675, 1.281, 1.645, 1.96, 2.326])
b1 = np.array([-0.245, 0.25, 0.678, 1.149, 1.822])
b2 = np.array([-0.105, -0.305, -0.362, -0.391, -0.396])
critical = b0 + b1 / math.sqrt(m) + b2 / m
pf = np.polyfit(critical, log(np.array([0.25, 0.1, 0.05, 0.025, 0.01])), 2)
if A2 < critical.min() or A2 > critical.max():
warnings.warn("approximate p-value will be computed by extrapolation")
p = math.exp(np.polyval(pf, A2))
return A2, critical, p
def ansari(x,y):
"""
Perform the Ansari-Bradley test for equal scale parameters
The Ansari-Bradley test is a non-parametric test for the equality
of the scale parameter of the distributions from which two
samples were drawn.
Parameters
----------
x, y : array_like
arrays of sample data
Returns
-------
AB : float
The Ansari-Bradley test statistic
p-value : float
The p-value of the hypothesis test
See Also
--------
fligner : A non-parametric test for the equality of k variances
mood : A non-parametric test for the equality of two scale parameters
Notes
-----
The p-value given is exact when the sample sizes are both less than
55 and there are no ties, otherwise a normal approximation for the
p-value is used.
References
----------
.. [1] Sprent, Peter and N.C. Smeeton. Applied nonparametric statistical
methods. 3rd ed. Chapman and Hall/CRC. 2001. Section 5.8.2.
"""
x,y = asarray(x),asarray(y)
n = len(x)
m = len(y)
if m < 1:
raise ValueError("Not enough other observations.")
if n < 1:
raise ValueError("Not enough test observations.")
N = m+n
xy = r_[x,y] # combine
rank = stats.rankdata(xy)
symrank = amin(array((rank,N-rank+1)),0)
AB = sum(symrank[:n],axis=0)
uxy = unique(xy)
repeats = (len(uxy) != len(xy))
exact = ((m < 55) and (n < 55) and not repeats)
if repeats and ((m < 55) or (n < 55)):
warnings.warn("Ties preclude use of exact statistic.")
if exact:
astart, a1, ifault = statlib.gscale(n,m)
ind = AB-astart
total = sum(a1,axis=0)
if ind < len(a1)/2.0:
cind = int(ceil(ind))
if (ind == cind):
pval = 2.0*sum(a1[:cind+1],axis=0)/total
else:
pval = 2.0*sum(a1[:cind],axis=0)/total
else:
find = int(floor(ind))
if (ind == floor(ind)):
pval = 2.0*sum(a1[find:],axis=0)/total
else:
pval = 2.0*sum(a1[find+1:],axis=0)/total
return AB, min(1.0,pval)
# otherwise compute normal approximation
if N % 2: # N odd
mnAB = n*(N+1.0)**2 / 4.0 / N
varAB = n*m*(N+1.0)*(3+N**2)/(48.0*N**2)
else:
mnAB = n*(N+2.0)/4.0
varAB = m*n*(N+2)*(N-2.0)/48/(N-1.0)
if repeats: # adjust variance estimates
# compute sum(tj * rj**2,axis=0)
fac = sum(symrank**2,axis=0)
if N % 2: # N odd
varAB = m*n*(16*N*fac-(N+1)**4)/(16.0 * N**2 * (N-1))
else: # N even
varAB = m*n*(16*fac-N*(N+2)**2)/(16.0 * N * (N-1))
z = (AB - mnAB)/sqrt(varAB)
pval = distributions.norm.sf(abs(z)) * 2.0
return AB, pval
def bartlett(*args):
"""
Perform Bartlett's test for equal variances
Bartlett's test tests the null hypothesis that all input samples
are from populations with equal variances. For samples
from significantly non-normal populations, Levene's test
`levene` is more robust.
Parameters
----------
sample1, sample2,... : array_like
arrays of sample data. May be different lengths.
Returns
-------
T : float
The test statistic.
p-value : float
The p-value of the test.
References
----------
.. [1] http://www.itl.nist.gov/div898/handbook/eda/section3/eda357.htm
.. [2] Snedecor, George W. and Cochran, William G. (1989), Statistical
Methods, Eighth Edition, Iowa State University Press.
"""
k = len(args)
if k < 2:
raise ValueError("Must enter at least two input sample vectors.")
Ni = zeros(k)
ssq = zeros(k,'d')
for j in range(k):
Ni[j] = len(args[j])
ssq[j] = np.var(args[j], ddof=1)
Ntot = sum(Ni,axis=0)
spsq = sum((Ni-1)*ssq,axis=0)/(1.0*(Ntot-k))
numer = (Ntot*1.0-k)*log(spsq) - sum((Ni-1.0)*log(ssq),axis=0)
denom = 1.0 + (1.0/(3*(k-1)))*((sum(1.0/(Ni-1.0),axis=0))-1.0/(Ntot-k))
T = numer / denom
pval = distributions.chi2.sf(T,k-1) # 1 - cdf
return T, pval
def levene(*args, **kwds):
"""
Perform Levene test for equal variances.
The Levene test tests the null hypothesis that all input samples
are from populations with equal variances. Levene's test is an
alternative to Bartlett's test `bartlett` in the case where
there are significant deviations from normality.
Parameters
----------
sample1, sample2, ... : array_like
The sample data, possibly with different lengths
center : {'mean', 'median', 'trimmed'}, optional
Which function of the data to use in the test. The default
is 'median'.
proportiontocut : float, optional
When `center` is 'trimmed', this gives the proportion of data points
to cut from each end. (See `scipy.stats.trim_mean`.)
Default is 0.05.
Returns
-------
W : float
The test statistic.
p-value : float
The p-value for the test.
Notes
-----
Three variations of Levene's test are possible. The possibilities
and their recommended usages are:
* 'median' : Recommended for skewed (non-normal) distributions>
* 'mean' : Recommended for symmetric, moderate-tailed distributions.
* 'trimmed' : Recommended for heavy-tailed distributions.
References
----------
.. [1] http://www.itl.nist.gov/div898/handbook/eda/section3/eda35a.htm
.. [2] Levene, H. (1960). In Contributions to Probability and Statistics:
Essays in Honor of Harold Hotelling, I. Olkin et al. eds.,
Stanford University Press, pp. 278-292.
.. [3] Brown, M. B. and Forsythe, A. B. (1974), Journal of the American
Statistical Association, 69, 364-367
"""
# Handle keyword arguments.
center = 'median'
proportiontocut = 0.05
for kw, value in kwds.items():
if kw not in ['center', 'proportiontocut']:
raise TypeError("levene() got an unexpected keyword "
"argument '%s'" % kw)
if kw == 'center':
center = value
else:
proportiontocut = value
k = len(args)
if k < 2:
raise ValueError("Must enter at least two input sample vectors.")
Ni = zeros(k)
Yci = zeros(k, 'd')
if center not in ['mean', 'median', 'trimmed']:
raise ValueError("Keyword argument <center> must be 'mean', 'median'"
+ "or 'trimmed'.")
if center == 'median':
func = lambda x: np.median(x, axis=0)
elif center == 'mean':
func = lambda x: np.mean(x, axis=0)
else: # center == 'trimmed'
args = tuple(stats.trimboth(np.sort(arg), proportiontocut)
for arg in args)
func = lambda x: np.mean(x, axis=0)
for j in range(k):
Ni[j] = len(args[j])
Yci[j] = func(args[j])
Ntot = sum(Ni, axis=0)
# compute Zij's
Zij = [None]*k
for i in range(k):
Zij[i] = abs(asarray(args[i])-Yci[i])
# compute Zbari
Zbari = zeros(k, 'd')
Zbar = 0.0
for i in range(k):
Zbari[i] = np.mean(Zij[i], axis=0)
Zbar += Zbari[i]*Ni[i]
Zbar /= Ntot
numer = (Ntot-k) * sum(Ni*(Zbari-Zbar)**2, axis=0)
# compute denom_variance
dvar = 0.0
for i in range(k):
dvar += sum((Zij[i]-Zbari[i])**2, axis=0)
denom = (k-1.0)*dvar
W = numer / denom
pval = distributions.f.sf(W, k-1, Ntot-k) # 1 - cdf
return W, pval
@setastest(False)
def binom_test(x, n=None, p=0.5):
"""
Perform a test that the probability of success is p.
This is an exact, two-sided test of the null hypothesis
that the probability of success in a Bernoulli experiment
is `p`.
Parameters
----------
x : integer or array_like
the number of successes, or if x has length 2, it is the
number of successes and the number of failures.
n : integer
the number of trials. This is ignored if x gives both the
number of successes and failures
p : float, optional
The hypothesized probability of success. 0 <= p <= 1. The
default value is p = 0.5
Returns
-------
p-value : float
The p-value of the hypothesis test
References
----------
.. [1] http://en.wikipedia.org/wiki/Binomial_test
"""
x = atleast_1d(x).astype(np.integer)
if len(x) == 2:
n = x[1]+x[0]
x = x[0]
elif len(x) == 1:
x = x[0]
if n is None or n < x:
raise ValueError("n must be >= x")
n = np.int_(n)
else:
raise ValueError("Incorrect length for x.")
if (p > 1.0) or (p < 0.0):
raise ValueError("p must be in range [0,1]")
d = distributions.binom.pmf(x, n, p)
rerr = 1+1e-7
if (x == p*n):
# special case as shortcut, would also be handled by `else` below
pval = 1.
elif (x < p*n):
i = np.arange(np.ceil(p*n), n+1)
y = np.sum(distributions.binom.pmf(i, n, p) <= d*rerr, axis=0)
pval = (distributions.binom.cdf(x, n, p) +
distributions.binom.sf(n-y, n, p))
else:
i = np.arange(np.floor(p*n) + 1)
y = np.sum(distributions.binom.pmf(i, n, p) <= d*rerr, axis=0)
pval = (distributions.binom.cdf(y-1, n, p) +
distributions.binom.sf(x-1, n, p))
return min(1.0, pval)
def _apply_func(x, g, func):
# g is list of indices into x
# separating x into different groups
# func should be applied over the groups
g = unique(r_[0, g, len(x)])
output = []
for k in range(len(g)-1):
output.append(func(x[g[k]:g[k+1]]))
return asarray(output)
def fligner(*args, **kwds):
"""
Perform Fligner's test for equal variances.
Fligner's test tests the null hypothesis that all input samples
are from populations with equal variances. Fligner's test is
non-parametric in contrast to Bartlett's test `bartlett` and
Levene's test `levene`.
Parameters
----------
sample1, sample2, ... : array_like
Arrays of sample data. Need not be the same length.
center : {'mean', 'median', 'trimmed'}, optional
Keyword argument controlling which function of the data is used in
computing the test statistic. The default is 'median'.
proportiontocut : float, optional
When `center` is 'trimmed', this gives the proportion of data points
to cut from each end. (See `scipy.stats.trim_mean`.)
Default is 0.05.
Returns
-------
Xsq : float
The test statistic.
p-value : float
The p-value for the hypothesis test.
Notes
-----
As with Levene's test there are three variants of Fligner's test that
differ by the measure of central tendency used in the test. See `levene`
for more information.
References
----------
.. [1] http://www.stat.psu.edu/~bgl/center/tr/TR993.ps
.. [2] Fligner, M.A. and Killeen, T.J. (1976). Distribution-free two-sample
tests for scale. 'Journal of the American Statistical Association.'
71(353), 210-213.
"""
# Handle keyword arguments.
center = 'median'
proportiontocut = 0.05
for kw, value in kwds.items():
if kw not in ['center', 'proportiontocut']:
raise TypeError("fligner() got an unexpected keyword "
"argument '%s'" % kw)
if kw == 'center':
center = value
else:
proportiontocut = value
k = len(args)
if k < 2:
raise ValueError("Must enter at least two input sample vectors.")
if center not in ['mean','median','trimmed']:
raise ValueError("Keyword argument <center> must be 'mean', 'median'"
+ "or 'trimmed'.")
if center == 'median':
func = lambda x: np.median(x, axis=0)
elif center == 'mean':
func = lambda x: np.mean(x, axis=0)
else: # center == 'trimmed'
args = tuple(stats.trimboth(arg, proportiontocut) for arg in args)
func = lambda x: np.mean(x, axis=0)
Ni = asarray([len(args[j]) for j in range(k)])
Yci = asarray([func(args[j]) for j in range(k)])
Ntot = sum(Ni, axis=0)
# compute Zij's
Zij = [abs(asarray(args[i]) - Yci[i]) for i in range(k)]
allZij = []
g = [0]
for i in range(k):
allZij.extend(list(Zij[i]))
g.append(len(allZij))
ranks = stats.rankdata(allZij)
a = distributions.norm.ppf(ranks/(2*(Ntot + 1.0)) + 0.5)
# compute Aibar
Aibar = _apply_func(a, g, sum) / Ni
anbar = np.mean(a, axis=0)
varsq = np.var(a, axis=0, ddof=1)
Xsq = sum(Ni*(asarray(Aibar) - anbar)**2.0, axis=0)/varsq
pval = distributions.chi2.sf(Xsq, k - 1) # 1 - cdf
return Xsq, pval
def mood(x, y, axis=0):
"""
Perform Mood's test for equal scale parameters.
Mood's two-sample test for scale parameters is a non-parametric
test for the null hypothesis that two samples are drawn from the
same distribution with the same scale parameter.
Parameters
----------
x, y : array_like
Arrays of sample data.
axis: int, optional
The axis along which the samples are tested. `x` and `y` can be of
different length along `axis`.
If `axis` is None, `x` and `y` are flattened and the test is done on
all values in the flattened arrays.
Returns
-------
z : scalar or ndarray
The z-score for the hypothesis test. For 1-D inputs a scalar is
returned.
p-value : scalar ndarray
The p-value for the hypothesis test.
See Also
--------
fligner : A non-parametric test for the equality of k variances
ansari : A non-parametric test for the equality of 2 variances
bartlett : A parametric test for equality of k variances in normal samples
levene : A parametric test for equality of k variances
Notes
-----
The data are assumed to be drawn from probability distributions ``f(x)``
and ``f(x/s) / s`` respectively, for some probability density function f.
The null hypothesis is that ``s == 1``.
For multi-dimensional arrays, if the inputs are of shapes
``(n0, n1, n2, n3)`` and ``(n0, m1, n2, n3)``, then if ``axis=1``, the
resulting z and p values will have shape ``(n0, n2, n3)``. Note that
``n1`` and ``m1`` don't have to be equal, but the other dimensions do.
Examples
--------
>>> from scipy import stats
>>> x2 = np.random.randn(2, 45, 6, 7)
>>> x1 = np.random.randn(2, 30, 6, 7)
>>> z, p = stats.mood(x1, x2, axis=1)
>>> p.shape
(2, 6, 7)
Find the number of points where the difference in scale is not significant:
>>> (p > 0.1).sum()
74
Perform the test with different scales:
>>> x1 = np.random.randn(2, 30)
>>> x2 = np.random.randn(2, 35) * 10.0
>>> stats.mood(x1, x2, axis=1)
(array([-5.84332354, -5.6840814 ]), array([5.11694980e-09, 1.31517628e-08]))
"""
x = np.asarray(x, dtype=float)
y = np.asarray(y, dtype=float)
if axis is None:
x = x.flatten()
y = y.flatten()
axis = 0
# Determine shape of the result arrays
res_shape = tuple([x.shape[ax] for ax in range(len(x.shape)) if ax != axis])
if not (res_shape == tuple([y.shape[ax] for ax in range(len(y.shape)) if
ax != axis])):
raise ValueError("Dimensions of x and y on all axes except `axis` "
"should match")
n = x.shape[axis]
m = y.shape[axis]
N = m + n
if N < 3:
raise ValueError("Not enough observations.")
xy = np.concatenate((x, y), axis=axis)
if axis != 0:
xy = np.rollaxis(xy, axis)
xy = xy.reshape(xy.shape[0], -1)
# Generalized to the n-dimensional case by adding the axis argument, and
# using for loops, since rankdata is not vectorized. For improving
# performance consider vectorizing rankdata function.
all_ranks = np.zeros_like(xy)
for j in range(xy.shape[1]):
all_ranks[:, j] = stats.rankdata(xy[:, j])
Ri = all_ranks[:n]
M = sum((Ri - (N + 1.0) / 2) ** 2, axis=0)
# Approx stat.
mnM = n * (N * N - 1.0) / 12
varM = m * n * (N + 1.0) * (N + 2) * (N - 2) / 180
z = (M - mnM) / sqrt(varM)
# sf for right tail, cdf for left tail. Factor 2 for two-sidedness
z_pos = z > 0
pval = np.zeros_like(z)
pval[z_pos] = 2 * distributions.norm.sf(z[z_pos])
pval[~z_pos] = 2 * distributions.norm.cdf(z[~z_pos])
if res_shape == ():
# Return scalars, not 0-D arrays
z = z[0]
pval = pval[0]
else:
z.shape = res_shape
pval.shape = res_shape
return z, pval
def wilcoxon(x, y=None, zero_method="wilcox", correction=False):
"""
Calculate the Wilcoxon signed-rank test.
The Wilcoxon signed-rank test tests the null hypothesis that two
related paired samples come from the same distribution. In particular,
it tests whether the distribution of the differences x - y is symmetric
about zero. It is a non-parametric version of the paired T-test.
Parameters
----------
x : array_like
The first set of measurements.
y : array_like, optional
The second set of measurements. If `y` is not given, then the `x`
array is considered to be the differences between the two sets of
measurements.
zero_method : string, {"pratt", "wilcox", "zsplit"}, optional
"pratt":
Pratt treatment: includes zero-differences in the ranking process
(more conservative)
"wilcox":
Wilcox treatment: discards all zero-differences
"zsplit":
Zero rank split: just like Pratt, but spliting the zero rank
between positive and negative ones
correction : bool, optional
If True, apply continuity correction by adjusting the Wilcoxon rank
statistic by 0.5 towards the mean value when computing the
z-statistic. Default is False.
Returns
-------
T : float
The sum of the ranks of the differences above or below zero, whichever
is smaller.
p-value : float
The two-sided p-value for the test.
Notes
-----
Because the normal approximation is used for the calculations, the
samples used should be large. A typical rule is to require that
n > 20.
References
----------
.. [1] http://en.wikipedia.org/wiki/Wilcoxon_signed-rank_test
"""
if zero_method not in ["wilcox", "pratt", "zsplit"]:
raise ValueError("Zero method should be either 'wilcox' \
or 'pratt' or 'zsplit'")
if y is None:
d = x
else:
x, y = map(asarray, (x, y))
if len(x) != len(y):
raise ValueError('Unequal N in wilcoxon. Aborting.')
d = x-y
if zero_method == "wilcox":
d = compress(not_equal(d, 0), d, axis=-1) # Keep all non-zero differences
count = len(d)
if (count < 10):
warnings.warn("Warning: sample size too small for normal approximation.")
r = stats.rankdata(abs(d))
r_plus = sum((d > 0) * r, axis=0)
r_minus = sum((d < 0) * r, axis=0)
if zero_method == "zsplit":
r_zero = sum((d == 0) * r, axis=0)
r_plus += r_zero / 2.
r_minus += r_zero / 2.
T = min(r_plus, r_minus)
mn = count*(count + 1.) * 0.25
se = count*(count + 1.) * (2. * count + 1.)
if zero_method == "pratt":
r = r[d != 0]
replist, repnum = find_repeats(r)
if repnum.size != 0:
# Correction for repeated elements.
se -= 0.5 * (repnum * (repnum * repnum - 1)).sum()
se = sqrt(se / 24)
correction = 0.5 * int(bool(correction)) * np.sign(T - mn)
z = (T - mn - correction) / se
prob = 2. * distributions.norm.sf(abs(z))
return T, prob
@setastest(False)
def median_test(*args, **kwds):
"""
Mood's median test.
Test that two or more samples come from populations with the same median.
Let ``n = len(args)`` be the number of samples. The "grand median" of
all the data is computed, and a contingency table is formed by
classifying the values in each sample as being above or below the grand
median. The contingency table, along with `correction` and `lambda_`,
are passed to `scipy.stats.chi2_contingency` to compute the test statistic
and p-value.
Parameters
----------
sample1, sample2, ... : array_like
The set of samples. There must be at least two samples.
Each sample must be a one-dimensional sequence containing at least
one value. The samples are not required to have the same length.
ties : str, optional
Determines how values equal to the grand median are classified in
the contingency table. The string must be one of::
"below":
Values equal to the grand median are counted as "below".
"above":
Values equal to the grand median are counted as "above".
"ignore":
Values equal to the grand median are not counted.
The default is "below".
correction : bool, optional
If True, *and* there are just two samples, apply Yates' correction
for continuity when computing the test statistic associated with
the contingency table. Default is True.
lambda_ : float or str, optional.
By default, the statistic computed in this test is Pearson's
chi-squared statistic. `lambda_` allows a statistic from the
Cressie-Read power divergence family to be used instead. See
`power_divergence` for details.
Default is 1 (Pearson's chi-squared statistic).
Returns
-------
stat : float
The test statistic. The statistic that is returned is determined by
`lambda_`. The default is Pearson's chi-squared statistic.
p : float
The p-value of the test.
m : float
The grand median.
table : ndarray
The contingency table. The shape of the table is (2, n), where
n is the number of samples. The first row holds the counts of the
values above the grand median, and the second row holds the counts
of the values below the grand median. The table allows further
analysis with, for example, `scipy.stats.chi2_contingency`, or with
`scipy.stats.fisher_exact` if there are two samples, without having
to recompute the table.
See Also
--------
kruskal : Compute the Kruskal-Wallis H-test for independent samples.
mannwhitneyu : Computes the Mann-Whitney rank test on samples x and y.
Notes
-----
.. versionadded:: 0.15.0
References
----------
.. [1] Mood, A. M., Introduction to the Theory of Statistics. McGraw-Hill
(1950), pp. 394-399.
.. [2] Zar, J. H., Biostatistical Analysis, 5th ed. Prentice Hall (2010).
See Sections 8.12 and 10.15.
Examples
--------
A biologist runs an experiment in which there are three groups of plants.
Group 1 has 16 plants, group 2 has 15 plants, and group 3 has 17 plants.
Each plant produces a number of seeds. The seed counts for each group
are::
Group 1: 10 14 14 18 20 22 24 25 31 31 32 39 43 43 48 49
Group 2: 28 30 31 33 34 35 36 40 44 55 57 61 91 92 99
Group 3: 0 3 9 22 23 25 25 33 34 34 40 45 46 48 62 67 84
The following code applies Mood's median test to these samples.
>>> g1 = [10, 14, 14, 18, 20, 22, 24, 25, 31, 31, 32, 39, 43, 43, 48, 49]
>>> g2 = [28, 30, 31, 33, 34, 35, 36, 40, 44, 55, 57, 61, 91, 92, 99]
>>> g3 = [0, 3, 9, 22, 23, 25, 25, 33, 34, 34, 40, 45, 46, 48, 62, 67, 84]
>>> stat, p, med, tbl = median_test(g1, g2, g3)
The median is
>>> med
34.0
and the contingency table is
>>> tbl
array([[ 5, 10, 7],
[11, 5, 10]])
`p` is too large to conclude that the medians are not the same:
>>> p
0.12609082774093244
The "G-test" can be performed by passing ``lambda_="log-likelihood"`` to
`median_test`.
>>> g, p, med, tbl = median_test(g1, g2, g3, lambda_="log-likelihood")
>>> p
0.12224779737117837
The median occurs several times in the data, so we'll get a different
result if, for example, ``ties="above"`` is used:
>>> stat, p, med, tbl = median_test(g1, g2, g3, ties="above")
>>> p
0.063873276069553273
>>> tbl
array([[ 5, 11, 9],
[11, 4, 8]])
This example demonstrates that if the data set is not large and there
are values equal to the median, the p-value can be sensitive to the
choice of `ties`.
"""
ties = kwds.pop('ties', 'below')
correction = kwds.pop('correction', True)
lambda_ = kwds.pop('lambda_', None)
if len(kwds) > 0:
bad_kwd = kwds.keys()[0]
raise TypeError("median_test() got an unexpected keyword "
"argument %r" % bad_kwd)
if len(args) < 2:
raise ValueError('median_test requires two or more samples.')
ties_options = ['below', 'above', 'ignore']
if ties not in ties_options:
raise ValueError("invalid 'ties' option '%s'; 'ties' must be one "
"of: %s" % (ties, str(ties_options)[1:-1]))
data = [np.asarray(arg) for arg in args]
# Validate the sizes and shapes of the arguments.
for k, d in enumerate(data):
if d.size == 0:
raise ValueError("Sample %d is empty. All samples must "
"contain at least one value." % (k + 1))
if d.ndim != 1:
raise ValueError("Sample %d has %d dimensions. All "
"samples must be one-dimensional sequences." %
(k + 1, d.ndim))
grand_median = np.median(np.concatenate(data))
# Create the contingency table.
table = np.zeros((2, len(data)), dtype=np.int64)
for k, sample in enumerate(data):
nabove = count_nonzero(sample > grand_median)
nbelow = count_nonzero(sample < grand_median)
nequal = sample.size - (nabove + nbelow)
table[0, k] += nabove
table[1, k] += nbelow
if ties == "below":
table[1, k] += nequal
elif ties == "above":
table[0, k] += nequal
# Check that no row or column of the table is all zero.
# Such a table can not be given to chi2_contingency, because it would have
# a zero in the table of expected frequencies.
rowsums = table.sum(axis=1)
if rowsums[0] == 0:
raise ValueError("All values are below the grand median (%r)." %
grand_median)
if rowsums[1] == 0:
raise ValueError("All values are above the grand median (%r)." %
grand_median)
if ties == "ignore":
# We already checked that each sample has at least one value, but it
# is possible that all those values equal the grand median. If `ties`
# is "ignore", that would result in a column of zeros in `table`. We
# check for that case here.
zero_cols = np.where((table == 0).all(axis=0))[0]
if len(zero_cols) > 0:
msg = ("All values in sample %d are equal to the grand "
"median (%r), so they are ignored, resulting in an "
"empty sample." % (zero_cols[0] + 1, grand_median))
raise ValueError(msg)
stat, p, dof, expected = chi2_contingency(table, lambda_=lambda_,
correction=correction)
return stat, p, grand_median, table
def _hermnorm(N):
# return the negatively normalized hermite polynomials up to order N-1
# (inclusive)
# using the recursive relationship
# p_n+1 = p_n(x)' - x*p_n(x)
# and p_0(x) = 1
plist = [None]*N
plist[0] = poly1d(1)
for n in range(1,N):
plist[n] = plist[n-1].deriv() - poly1d([1,0])*plist[n-1]
return plist
def pdf_fromgamma(g1, g2, g3=0.0, g4=None):
if g4 is None:
g4 = 3*g2*g2
sigsq = 1.0/g2
sig = sqrt(sigsq)
mu = g1*sig**3.0
p12 = _hermnorm(13)
for k in range(13):
p12[k] = p12[k]/sig**k
# Add all of the terms to polynomial
totp = p12[0] - (g1/6.0*p12[3]) + \
(g2/24.0*p12[4] + g1*g1/72.0*p12[6]) - \
(g3/120.0*p12[5] + g1*g2/144.0*p12[7] + g1**3.0/1296.0*p12[9]) + \
(g4/720*p12[6] + (g2*g2/1152.0+g1*g3/720)*p12[8] +
g1*g1*g2/1728.0*p12[10] + g1**4.0/31104.0*p12[12])
# Final normalization
totp = totp / sqrt(2*pi)/sig
def thefunc(x):
xn = (x-mu)/sig
return totp(xn)*exp(-xn*xn/2.0)
return thefunc
def _circfuncs_common(samples, high, low):
samples = np.asarray(samples)
if samples.size == 0:
return np.nan, np.nan
ang = (samples - low)*2*pi / (high-low)
return samples, ang
def circmean(samples, high=2*pi, low=0, axis=None):
"""
Compute the circular mean for samples in a range.
Parameters
----------
samples : array_like
Input array.
high : float or int, optional
High boundary for circular mean range. Default is ``2*pi``.
low : float or int, optional
Low boundary for circular mean range. Default is 0.
axis : int, optional
Axis along which means are computed. The default is to compute
the mean of the flattened array.
Returns
-------
circmean : float
Circular mean.
"""
samples, ang = _circfuncs_common(samples, high, low)
res = angle(np.mean(exp(1j*ang), axis=axis))
mask = res < 0
if (mask.ndim > 0):
res[mask] += 2*pi
elif mask:
res = res + 2*pi
return res*(high-low)/2.0/pi + low
def circvar(samples, high=2*pi, low=0, axis=None):
"""
Compute the circular variance for samples assumed to be in a range
Parameters
----------
samples : array_like
Input array.
low : float or int, optional
Low boundary for circular variance range. Default is 0.
high : float or int, optional
High boundary for circular variance range. Default is ``2*pi``.
axis : int, optional
Axis along which variances are computed. The default is to compute
the variance of the flattened array.
Returns
-------
circvar : float
Circular variance.
Notes
-----
This uses a definition of circular variance that in the limit of small
angles returns a number close to the 'linear' variance.
"""
samples, ang = _circfuncs_common(samples, high, low)
res = np.mean(exp(1j*ang), axis=axis)
R = abs(res)
return ((high-low)/2.0/pi)**2 * 2 * log(1/R)
def circstd(samples, high=2*pi, low=0, axis=None):
"""
Compute the circular standard deviation for samples assumed to be in the
range [low to high].
Parameters
----------
samples : array_like
Input array.
low : float or int, optional
Low boundary for circular standard deviation range. Default is 0.
high : float or int, optional
High boundary for circular standard deviation range.
Default is ``2*pi``.
axis : int, optional
Axis along which standard deviations are computed. The default is
to compute the standard deviation of the flattened array.
Returns
-------
circstd : float
Circular standard deviation.
Notes
-----
This uses a definition of circular standard deviation that in the limit of
small angles returns a number close to the 'linear' standard deviation.
"""
samples, ang = _circfuncs_common(samples, high, low)
res = np.mean(exp(1j*ang), axis=axis)
R = abs(res)
return ((high-low)/2.0/pi) * sqrt(-2*log(R))
# Tests to include (from R) -- some of these already in stats.
########
# X Ansari-Bradley
# X Bartlett (and Levene)
# X Binomial
# Y Pearson's Chi-squared (stats.chisquare)
# Y Association Between Paired samples (stats.pearsonr, stats.spearmanr)
# stats.kendalltau) -- these need work though
# Fisher's exact test
# X Fligner-Killeen Test
# Y Friedman Rank Sum (stats.friedmanchisquare?)
# Y Kruskal-Wallis
# Y Kolmogorov-Smirnov
# Cochran-Mantel-Haenszel Chi-Squared for Count
# McNemar's Chi-squared for Count
# X Mood Two-Sample
# X Test For Equal Means in One-Way Layout (see stats.ttest also)
# Pairwise Comparisons of proportions
# Pairwise t tests
# Tabulate p values for pairwise comparisons
# Pairwise Wilcoxon rank sum tests
# Power calculations two sample test of prop.
# Power calculations for one and two sample t tests
# Equal or Given Proportions
# Trend in Proportions
# Quade Test
# Y Student's T Test
# Y F Test to compare two variances
# XY Wilcoxon Rank Sum and Signed Rank Tests
| apache-2.0 |
steveandroulakis/mytardis | tardis/tardis_portal/management/commands/checkhashes.py | 2 | 1777 | """
Management utility to create a token user
"""
import hashlib
from contextlib import closing
from urllib2 import urlopen
from django.core.management.base import BaseCommand, CommandError
from tardis.tardis_portal.models import Dataset_File
CHUNK_SIZE = 32*1024
class AlgorithmNotSupported(Exception):
pass
class NoHashAvailable(Exception):
pass
class ValidationFailed(Exception):
pass
class Command(BaseCommand):
help = 'Used to check file hashes against actual files.'
def handle(self, *args, **options):
verbosity = int(options.get('verbosity', 1))
for df in Dataset_File.objects.all():
for algorithm in ('sha512', 'md5'):
try:
validate_digest(algorithm, df)
# Success, so don't try another
break
except AlgorithmNotSupported:
continue
except NoHashAvailable:
continue
except ValidationFailed:
self.stdout.write("%d/%s: FAILED\n" % (df.dataset.id, df))
if verbosity > 1:
self.stdout.write("%d/%s: OK\n" % (df.dataset.id, df))
def validate_digest(algorithm, datafile):
try:
expected = getattr(datafile, "%ssum" % algorithm)
except AttributeError:
raise AlgorithmNotSupported()
if not expected:
raise NoHashAvailable()
url = datafile.get_actual_url()
if not url:
raise ValidationFailed()
digest = getattr(hashlib, algorithm)()
with closing(urlopen(url)) as f:
for chunk in iter(lambda: f.read(CHUNK_SIZE), ''):
digest.update(chunk)
if expected != digest.hexdigest():
raise ValidationFailed
| bsd-3-clause |
opencorato/write-it | nuntium/tests/api/handle_bounces_test.py | 3 | 1453 | # -*- coding: utf-8 -*-
from django.core.management import call_command
from tastypie.test import ResourceTestCase, TestApiClient
from django.contrib.auth.models import User
from ...models import OutboundMessage, OutboundMessageIdentifier
class HandleBounces(ResourceTestCase):
def setUp(self):
super(HandleBounces, self).setUp()
call_command('loaddata', 'example_data', verbosity=0)
self.api_client = TestApiClient()
self.user = User.objects.get(id=1)
self.outbound_message = OutboundMessage.objects.get(id=1)
self.identifier = OutboundMessageIdentifier.objects.get(outbound_message=self.outbound_message)
def get_credentials(self):
credentials = self.create_apikey(username=self.user.username, api_key=self.user.api_key.key)
return credentials
def test_handle_bounces_endpoint(self):
url = '/api/v1/handle_bounce/'
bounce_data = {
'key': self.identifier.key,
}
resp = self.api_client.post(url, data=bounce_data, authentication=self.get_credentials())
self.assertHttpCreated(resp)
def test_handle_bounces_sets_the_contact_to_bounced(self):
url = '/api/v1/handle_bounce/'
bounce_data = {
'key': self.identifier.key,
}
self.api_client.post(url, data=bounce_data, authentication=self.get_credentials())
self.assertTrue(self.outbound_message.contact.is_bounced)
| gpl-3.0 |
vguerci/Deluge.app | deluge/plugins/autoadd/autoadd/webui.py | 6 | 1952 | #
# webui.py
#
# Copyright (C) 2009 GazpachoKing <chase.sterling@gmail.com>
#
# Basic plugin template created by:
# Copyright (C) 2008 Martijn Voncken <mvoncken@gmail.com>
# Copyright (C) 2007-2009 Andrew Resch <andrewresch@gmail.com>
# Copyright (C) 2009 Damien Churchill <damoxc@gmail.com>
#
# Deluge is free software.
#
# You may redistribute it and/or modify it under the terms of the
# GNU General Public License, as published by the Free Software
# Foundation; either version 3 of the License, or (at your option)
# any later version.
#
# deluge is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with deluge. If not, write to:
# The Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor
# Boston, MA 02110-1301, USA.
#
# In addition, as a special exception, the copyright holders give
# permission to link the code of portions of this program with the OpenSSL
# library.
# You must obey the GNU General Public License in all respects for all of
# the code used other than OpenSSL. If you modify file(s) with this
# exception, you may extend this exception to your version of the file(s),
# but you are not obligated to do so. If you do not wish to do so, delete
# this exception statement from your version. If you delete this exception
# statement from all source files in the program, then also delete it here.
#
from deluge.log import LOG as log
from deluge.ui.client import client
from deluge import component
from deluge.plugins.pluginbase import WebPluginBase
from common import get_resource
class WebUI(WebPluginBase):
scripts = [get_resource("autoadd.js")]
def enable(self):
pass
def disable(self):
pass
| gpl-3.0 |
CVML/scikit-learn | sklearn/feature_extraction/tests/test_feature_hasher.py | 258 | 2861 | from __future__ import unicode_literals
import numpy as np
from sklearn.feature_extraction import FeatureHasher
from nose.tools import assert_raises, assert_true
from numpy.testing import assert_array_equal, assert_equal
def test_feature_hasher_dicts():
h = FeatureHasher(n_features=16)
assert_equal("dict", h.input_type)
raw_X = [{"dada": 42, "tzara": 37}, {"gaga": 17}]
X1 = FeatureHasher(n_features=16).transform(raw_X)
gen = (iter(d.items()) for d in raw_X)
X2 = FeatureHasher(n_features=16, input_type="pair").transform(gen)
assert_array_equal(X1.toarray(), X2.toarray())
def test_feature_hasher_strings():
# mix byte and Unicode strings; note that "foo" is a duplicate in row 0
raw_X = [["foo", "bar", "baz", "foo".encode("ascii")],
["bar".encode("ascii"), "baz", "quux"]]
for lg_n_features in (7, 9, 11, 16, 22):
n_features = 2 ** lg_n_features
it = (x for x in raw_X) # iterable
h = FeatureHasher(n_features, non_negative=True, input_type="string")
X = h.transform(it)
assert_equal(X.shape[0], len(raw_X))
assert_equal(X.shape[1], n_features)
assert_true(np.all(X.data > 0))
assert_equal(X[0].sum(), 4)
assert_equal(X[1].sum(), 3)
assert_equal(X.nnz, 6)
def test_feature_hasher_pairs():
raw_X = (iter(d.items()) for d in [{"foo": 1, "bar": 2},
{"baz": 3, "quux": 4, "foo": -1}])
h = FeatureHasher(n_features=16, input_type="pair")
x1, x2 = h.transform(raw_X).toarray()
x1_nz = sorted(np.abs(x1[x1 != 0]))
x2_nz = sorted(np.abs(x2[x2 != 0]))
assert_equal([1, 2], x1_nz)
assert_equal([1, 3, 4], x2_nz)
def test_hash_empty_input():
n_features = 16
raw_X = [[], (), iter(range(0))]
h = FeatureHasher(n_features=n_features, input_type="string")
X = h.transform(raw_X)
assert_array_equal(X.A, np.zeros((len(raw_X), n_features)))
def test_hasher_invalid_input():
assert_raises(ValueError, FeatureHasher, input_type="gobbledygook")
assert_raises(ValueError, FeatureHasher, n_features=-1)
assert_raises(ValueError, FeatureHasher, n_features=0)
assert_raises(TypeError, FeatureHasher, n_features='ham')
h = FeatureHasher(n_features=np.uint16(2 ** 6))
assert_raises(ValueError, h.transform, [])
assert_raises(Exception, h.transform, [[5.5]])
assert_raises(Exception, h.transform, [[None]])
def test_hasher_set_params():
# Test delayed input validation in fit (useful for grid search).
hasher = FeatureHasher()
hasher.set_params(n_features=np.inf)
assert_raises(TypeError, hasher.fit)
def test_hasher_zeros():
# Assert that no zeros are materialized in the output.
X = FeatureHasher().transform([{'foo': 0}])
assert_equal(X.data.shape, (0,))
| bsd-3-clause |
sam-tsai/django | tests/i18n/tests.py | 45 | 79916 | # -*- encoding: utf-8 -*-
from __future__ import unicode_literals
import datetime
import decimal
import gettext as gettext_module
import os
import pickle
from contextlib import contextmanager
from importlib import import_module
from threading import local
from unittest import skipUnless
from django import forms
from django.conf import settings
from django.template import Context, Template, TemplateSyntaxError
from django.test import (
RequestFactory, SimpleTestCase, TestCase, override_settings,
)
from django.utils import six, translation
from django.utils._os import upath
from django.utils.formats import (
date_format, get_format, get_format_modules, iter_format_modules, localize,
localize_input, reset_format_cache, sanitize_separators, time_format,
)
from django.utils.numberformat import format as nformat
from django.utils.safestring import SafeBytes, SafeString, SafeText, mark_safe
from django.utils.six import PY3
from django.utils.translation import (
LANGUAGE_SESSION_KEY, activate, check_for_language, deactivate,
get_language, get_language_bidi, get_language_from_request,
get_language_info, gettext, gettext_lazy, ngettext_lazy, npgettext,
npgettext_lazy, pgettext, pgettext_lazy, string_concat, to_locale,
trans_real, ugettext, ugettext_lazy, ungettext, ungettext_lazy,
)
from .forms import CompanyForm, I18nForm, SelectDateForm
from .models import Company, TestModel
here = os.path.dirname(os.path.abspath(upath(__file__)))
extended_locale_paths = settings.LOCALE_PATHS + [
os.path.join(here, 'other', 'locale'),
]
@contextmanager
def patch_formats(lang, **settings):
from django.utils.formats import _format_cache
# Populate _format_cache with temporary values
for key, value in settings.items():
_format_cache[(key, lang)] = value
try:
yield
finally:
reset_format_cache()
class TranslationTests(SimpleTestCase):
@translation.override('fr')
def test_plural(self):
"""
Test plurals with ungettext. French differs from English in that 0 is singular.
"""
self.assertEqual(ungettext("%d year", "%d years", 0) % 0, "0 année")
self.assertEqual(ungettext("%d year", "%d years", 2) % 2, "2 années")
self.assertEqual(ungettext("%(size)d byte", "%(size)d bytes", 0) % {'size': 0}, "0 octet")
self.assertEqual(ungettext("%(size)d byte", "%(size)d bytes", 2) % {'size': 2}, "2 octets")
def test_override(self):
activate('de')
try:
with translation.override('pl'):
self.assertEqual(get_language(), 'pl')
self.assertEqual(get_language(), 'de')
with translation.override(None):
self.assertEqual(get_language(), None)
with translation.override('pl'):
pass
self.assertEqual(get_language(), None)
self.assertEqual(get_language(), 'de')
finally:
deactivate()
def test_override_decorator(self):
@translation.override('pl')
def func_pl():
self.assertEqual(get_language(), 'pl')
@translation.override(None)
def func_none():
self.assertEqual(get_language(), None)
try:
activate('de')
func_pl()
self.assertEqual(get_language(), 'de')
func_none()
self.assertEqual(get_language(), 'de')
finally:
deactivate()
def test_override_exit(self):
"""
Test that the language restored is the one used when the function was
called, not the one used when the decorator was initialized. refs #23381
"""
activate('fr')
@translation.override('pl')
def func_pl():
pass
deactivate()
try:
activate('en')
func_pl()
self.assertEqual(get_language(), 'en')
finally:
deactivate()
def test_lazy_objects(self):
"""
Format string interpolation should work with *_lazy objects.
"""
s = ugettext_lazy('Add %(name)s')
d = {'name': 'Ringo'}
self.assertEqual('Add Ringo', s % d)
with translation.override('de', deactivate=True):
self.assertEqual('Ringo hinzuf\xfcgen', s % d)
with translation.override('pl'):
self.assertEqual('Dodaj Ringo', s % d)
# It should be possible to compare *_lazy objects.
s1 = ugettext_lazy('Add %(name)s')
self.assertEqual(s, s1)
s2 = gettext_lazy('Add %(name)s')
s3 = gettext_lazy('Add %(name)s')
self.assertEqual(s2, s3)
self.assertEqual(s, s2)
s4 = ugettext_lazy('Some other string')
self.assertNotEqual(s, s4)
@skipUnless(six.PY2, "No more bytestring translations on PY3")
def test_lazy_and_bytestrings(self):
# On Python 2, (n)gettext_lazy should not transform a bytestring to unicode
self.assertEqual(gettext_lazy(b"test").upper(), b"TEST")
self.assertEqual((ngettext_lazy(b"%d test", b"%d tests") % 1).upper(), b"1 TEST")
# Other versions of lazy functions always return unicode
self.assertEqual(ugettext_lazy(b"test").upper(), "TEST")
self.assertEqual((ungettext_lazy(b"%d test", b"%d tests") % 1).upper(), "1 TEST")
self.assertEqual(pgettext_lazy(b"context", b"test").upper(), "TEST")
self.assertEqual(
(npgettext_lazy(b"context", b"%d test", b"%d tests") % 1).upper(),
"1 TEST"
)
def test_lazy_pickle(self):
s1 = ugettext_lazy("test")
self.assertEqual(six.text_type(s1), "test")
s2 = pickle.loads(pickle.dumps(s1))
self.assertEqual(six.text_type(s2), "test")
@override_settings(LOCALE_PATHS=extended_locale_paths)
def test_ungettext_lazy(self):
simple_with_format = ungettext_lazy('%d good result', '%d good results')
simple_str_with_format = ngettext_lazy(str('%d good result'), str('%d good results'))
simple_context_with_format = npgettext_lazy('Exclamation', '%d good result', '%d good results')
simple_without_format = ungettext_lazy('good result', 'good results')
with translation.override('de'):
self.assertEqual(simple_with_format % 1, '1 gutes Resultat')
self.assertEqual(simple_with_format % 4, '4 guten Resultate')
self.assertEqual(simple_str_with_format % 1, str('1 gutes Resultat'))
self.assertEqual(simple_str_with_format % 4, str('4 guten Resultate'))
self.assertEqual(simple_context_with_format % 1, '1 gutes Resultat!')
self.assertEqual(simple_context_with_format % 4, '4 guten Resultate!')
self.assertEqual(simple_without_format % 1, 'gutes Resultat')
self.assertEqual(simple_without_format % 4, 'guten Resultate')
complex_nonlazy = ungettext_lazy('Hi %(name)s, %(num)d good result', 'Hi %(name)s, %(num)d good results', 4)
complex_deferred = ungettext_lazy('Hi %(name)s, %(num)d good result', 'Hi %(name)s, %(num)d good results', 'num')
complex_str_nonlazy = ngettext_lazy(str('Hi %(name)s, %(num)d good result'), str('Hi %(name)s, %(num)d good results'), 4)
complex_str_deferred = ngettext_lazy(str('Hi %(name)s, %(num)d good result'), str('Hi %(name)s, %(num)d good results'), 'num')
complex_context_nonlazy = npgettext_lazy('Greeting', 'Hi %(name)s, %(num)d good result', 'Hi %(name)s, %(num)d good results', 4)
complex_context_deferred = npgettext_lazy('Greeting', 'Hi %(name)s, %(num)d good result', 'Hi %(name)s, %(num)d good results', 'num')
with translation.override('de'):
self.assertEqual(complex_nonlazy % {'num': 4, 'name': 'Jim'}, 'Hallo Jim, 4 guten Resultate')
self.assertEqual(complex_deferred % {'name': 'Jim', 'num': 1}, 'Hallo Jim, 1 gutes Resultat')
self.assertEqual(complex_deferred % {'name': 'Jim', 'num': 5}, 'Hallo Jim, 5 guten Resultate')
with six.assertRaisesRegex(self, KeyError, 'Your dictionary lacks key.*'):
complex_deferred % {'name': 'Jim'}
self.assertEqual(complex_str_nonlazy % {'num': 4, 'name': 'Jim'}, str('Hallo Jim, 4 guten Resultate'))
self.assertEqual(complex_str_deferred % {'name': 'Jim', 'num': 1}, str('Hallo Jim, 1 gutes Resultat'))
self.assertEqual(complex_str_deferred % {'name': 'Jim', 'num': 5}, str('Hallo Jim, 5 guten Resultate'))
with six.assertRaisesRegex(self, KeyError, 'Your dictionary lacks key.*'):
complex_str_deferred % {'name': 'Jim'}
self.assertEqual(complex_context_nonlazy % {'num': 4, 'name': 'Jim'}, 'Willkommen Jim, 4 guten Resultate')
self.assertEqual(complex_context_deferred % {'name': 'Jim', 'num': 1}, 'Willkommen Jim, 1 gutes Resultat')
self.assertEqual(complex_context_deferred % {'name': 'Jim', 'num': 5}, 'Willkommen Jim, 5 guten Resultate')
with six.assertRaisesRegex(self, KeyError, 'Your dictionary lacks key.*'):
complex_context_deferred % {'name': 'Jim'}
@skipUnless(six.PY2, "PY3 doesn't have distinct int and long types")
def test_ungettext_lazy_long(self):
"""
Regression test for #22820: int and long should be treated alike in ungettext_lazy.
"""
result = ungettext_lazy('%(name)s has %(num)d good result', '%(name)s has %(num)d good results', 4)
self.assertEqual(result % {'name': 'Joe', 'num': 4}, "Joe has 4 good results")
# Now with a long
result = ungettext_lazy(
'%(name)s has %(num)d good result', '%(name)s has %(num)d good results',
long(4) # NOQA: long undefined on PY3
)
self.assertEqual(result % {'name': 'Joe', 'num': 4}, "Joe has 4 good results")
@override_settings(LOCALE_PATHS=extended_locale_paths)
def test_pgettext(self):
trans_real._active = local()
trans_real._translations = {}
with translation.override('de'):
self.assertEqual(pgettext("unexisting", "May"), "May")
self.assertEqual(pgettext("month name", "May"), "Mai")
self.assertEqual(pgettext("verb", "May"), "Kann")
self.assertEqual(npgettext("search", "%d result", "%d results", 4) % 4, "4 Resultate")
@override_settings(LOCALE_PATHS=extended_locale_paths)
def test_template_tags_pgettext(self):
"""
Ensure that message contexts are taken into account the {% trans %} and
{% blocktrans %} template tags.
Refs #14806.
"""
trans_real._active = local()
trans_real._translations = {}
with translation.override('de'):
# {% trans %} -----------------------------------
# Inexisting context...
t = Template('{% load i18n %}{% trans "May" context "unexisting" %}')
rendered = t.render(Context())
self.assertEqual(rendered, 'May')
# Existing context...
# Using a literal
t = Template('{% load i18n %}{% trans "May" context "month name" %}')
rendered = t.render(Context())
self.assertEqual(rendered, 'Mai')
t = Template('{% load i18n %}{% trans "May" context "verb" %}')
rendered = t.render(Context())
self.assertEqual(rendered, 'Kann')
# Using a variable
t = Template('{% load i18n %}{% trans "May" context message_context %}')
rendered = t.render(Context({'message_context': 'month name'}))
self.assertEqual(rendered, 'Mai')
t = Template('{% load i18n %}{% trans "May" context message_context %}')
rendered = t.render(Context({'message_context': 'verb'}))
self.assertEqual(rendered, 'Kann')
# Using a filter
t = Template('{% load i18n %}{% trans "May" context message_context|lower %}')
rendered = t.render(Context({'message_context': 'MONTH NAME'}))
self.assertEqual(rendered, 'Mai')
t = Template('{% load i18n %}{% trans "May" context message_context|lower %}')
rendered = t.render(Context({'message_context': 'VERB'}))
self.assertEqual(rendered, 'Kann')
# Using 'as'
t = Template('{% load i18n %}{% trans "May" context "month name" as var %}Value: {{ var }}')
rendered = t.render(Context())
self.assertEqual(rendered, 'Value: Mai')
t = Template('{% load i18n %}{% trans "May" as var context "verb" %}Value: {{ var }}')
rendered = t.render(Context())
self.assertEqual(rendered, 'Value: Kann')
# {% blocktrans %} ------------------------------
# Inexisting context...
t = Template('{% load i18n %}{% blocktrans context "unexisting" %}May{% endblocktrans %}')
rendered = t.render(Context())
self.assertEqual(rendered, 'May')
# Existing context...
# Using a literal
t = Template('{% load i18n %}{% blocktrans context "month name" %}May{% endblocktrans %}')
rendered = t.render(Context())
self.assertEqual(rendered, 'Mai')
t = Template('{% load i18n %}{% blocktrans context "verb" %}May{% endblocktrans %}')
rendered = t.render(Context())
self.assertEqual(rendered, 'Kann')
# Using a variable
t = Template('{% load i18n %}{% blocktrans context message_context %}May{% endblocktrans %}')
rendered = t.render(Context({'message_context': 'month name'}))
self.assertEqual(rendered, 'Mai')
t = Template('{% load i18n %}{% blocktrans context message_context %}May{% endblocktrans %}')
rendered = t.render(Context({'message_context': 'verb'}))
self.assertEqual(rendered, 'Kann')
# Using a filter
t = Template('{% load i18n %}{% blocktrans context message_context|lower %}May{% endblocktrans %}')
rendered = t.render(Context({'message_context': 'MONTH NAME'}))
self.assertEqual(rendered, 'Mai')
t = Template('{% load i18n %}{% blocktrans context message_context|lower %}May{% endblocktrans %}')
rendered = t.render(Context({'message_context': 'VERB'}))
self.assertEqual(rendered, 'Kann')
# Using 'count'
t = Template('{% load i18n %}{% blocktrans count number=1 context "super search" %}{{ number }} super result{% plural %}{{ number }} super results{% endblocktrans %}')
rendered = t.render(Context())
self.assertEqual(rendered, '1 Super-Ergebnis')
t = Template('{% load i18n %}{% blocktrans count number=2 context "super search" %}{{ number }} super result{% plural %}{{ number }} super results{% endblocktrans %}')
rendered = t.render(Context())
self.assertEqual(rendered, '2 Super-Ergebnisse')
t = Template('{% load i18n %}{% blocktrans context "other super search" count number=1 %}{{ number }} super result{% plural %}{{ number }} super results{% endblocktrans %}')
rendered = t.render(Context())
self.assertEqual(rendered, '1 anderen Super-Ergebnis')
t = Template('{% load i18n %}{% blocktrans context "other super search" count number=2 %}{{ number }} super result{% plural %}{{ number }} super results{% endblocktrans %}')
rendered = t.render(Context())
self.assertEqual(rendered, '2 andere Super-Ergebnisse')
# Using 'with'
t = Template('{% load i18n %}{% blocktrans with num_comments=5 context "comment count" %}There are {{ num_comments }} comments{% endblocktrans %}')
rendered = t.render(Context())
self.assertEqual(rendered, 'Es gibt 5 Kommentare')
t = Template('{% load i18n %}{% blocktrans with num_comments=5 context "other comment count" %}There are {{ num_comments }} comments{% endblocktrans %}')
rendered = t.render(Context())
self.assertEqual(rendered, 'Andere: Es gibt 5 Kommentare')
# Using trimmed
t = Template('{% load i18n %}{% blocktrans trimmed %}\n\nThere\n\t are 5 \n\n comments\n{% endblocktrans %}')
rendered = t.render(Context())
self.assertEqual(rendered, 'There are 5 comments')
t = Template('{% load i18n %}{% blocktrans with num_comments=5 context "comment count" trimmed %}\n\nThere are \t\n \t {{ num_comments }} comments\n\n{% endblocktrans %}')
rendered = t.render(Context())
self.assertEqual(rendered, 'Es gibt 5 Kommentare')
t = Template('{% load i18n %}{% blocktrans context "other super search" count number=2 trimmed %}\n{{ number }} super \n result{% plural %}{{ number }} super results{% endblocktrans %}')
rendered = t.render(Context())
self.assertEqual(rendered, '2 andere Super-Ergebnisse')
# Mis-uses
self.assertRaises(TemplateSyntaxError, Template, '{% load i18n %}{% blocktrans context with month="May" %}{{ month }}{% endblocktrans %}')
self.assertRaises(TemplateSyntaxError, Template, '{% load i18n %}{% blocktrans context %}{% endblocktrans %}')
self.assertRaises(TemplateSyntaxError, Template, '{% load i18n %}{% blocktrans count number=2 context %}{{ number }} super result{% plural %}{{ number }} super results{% endblocktrans %}')
def test_string_concat(self):
"""
six.text_type(string_concat(...)) should not raise a TypeError - #4796
"""
self.assertEqual('django', six.text_type(string_concat("dja", "ngo")))
def test_empty_value(self):
"""
Empty value must stay empty after being translated (#23196).
"""
with translation.override('de'):
self.assertEqual("", ugettext(""))
self.assertEqual(str(""), gettext(str("")))
s = mark_safe("")
self.assertEqual(s, ugettext(s))
def test_safe_status(self):
"""
Translating a string requiring no auto-escaping shouldn't change the "safe" status.
"""
s = mark_safe(str('Password'))
self.assertEqual(SafeString, type(s))
with translation.override('de', deactivate=True):
self.assertEqual(SafeText, type(ugettext(s)))
self.assertEqual('aPassword', SafeText('a') + s)
self.assertEqual('Passworda', s + SafeText('a'))
self.assertEqual('Passworda', s + mark_safe('a'))
self.assertEqual('aPassword', mark_safe('a') + s)
self.assertEqual('as', mark_safe('a') + mark_safe('s'))
def test_maclines(self):
"""
Translations on files with mac or dos end of lines will be converted
to unix eof in .po catalogs, and they have to match when retrieved
"""
ca_translation = trans_real.translation('ca')
ca_translation._catalog['Mac\nEOF\n'] = 'Catalan Mac\nEOF\n'
ca_translation._catalog['Win\nEOF\n'] = 'Catalan Win\nEOF\n'
with translation.override('ca', deactivate=True):
self.assertEqual('Catalan Mac\nEOF\n', ugettext('Mac\rEOF\r'))
self.assertEqual('Catalan Win\nEOF\n', ugettext('Win\r\nEOF\r\n'))
def test_to_locale(self):
"""
Tests the to_locale function and the special case of Serbian Latin
(refs #12230 and r11299)
"""
self.assertEqual(to_locale('en-us'), 'en_US')
self.assertEqual(to_locale('sr-lat'), 'sr_Lat')
def test_to_language(self):
"""
Test the to_language function
"""
self.assertEqual(trans_real.to_language('en_US'), 'en-us')
self.assertEqual(trans_real.to_language('sr_Lat'), 'sr-lat')
def test_language_bidi(self):
self.assertEqual(get_language_bidi(), False)
with translation.override(None):
self.assertEqual(get_language_bidi(), False)
@override_settings(LOCALE_PATHS=[os.path.join(here, 'other', 'locale')])
def test_bad_placeholder_1(self):
"""
Error in translation file should not crash template rendering
(%(person)s is translated as %(personne)s in fr.po)
Refs #16516.
"""
with translation.override('fr'):
t = Template('{% load i18n %}{% blocktrans %}My name is {{ person }}.{% endblocktrans %}')
rendered = t.render(Context({'person': 'James'}))
self.assertEqual(rendered, 'My name is James.')
@override_settings(LOCALE_PATHS=[os.path.join(here, 'other', 'locale')])
def test_bad_placeholder_2(self):
"""
Error in translation file should not crash template rendering
(%(person) misses a 's' in fr.po, causing the string formatting to fail)
Refs #18393.
"""
with translation.override('fr'):
t = Template('{% load i18n %}{% blocktrans %}My other name is {{ person }}.{% endblocktrans %}')
rendered = t.render(Context({'person': 'James'}))
self.assertEqual(rendered, 'My other name is James.')
class TranslationThreadSafetyTests(SimpleTestCase):
def setUp(self):
self._old_language = get_language()
self._translations = trans_real._translations
# here we rely on .split() being called inside the _fetch()
# in trans_real.translation()
class sideeffect_str(str):
def split(self, *args, **kwargs):
res = str.split(self, *args, **kwargs)
trans_real._translations['en-YY'] = None
return res
trans_real._translations = {sideeffect_str('en-XX'): None}
def tearDown(self):
trans_real._translations = self._translations
activate(self._old_language)
def test_bug14894_translation_activate_thread_safety(self):
translation_count = len(trans_real._translations)
try:
translation.activate('pl')
except RuntimeError:
self.fail('translation.activate() is not thread-safe')
# make sure sideeffect_str actually added a new translation
self.assertLess(translation_count, len(trans_real._translations))
@override_settings(USE_L10N=True)
class FormattingTests(SimpleTestCase):
def setUp(self):
super(FormattingTests, self).setUp()
self.n = decimal.Decimal('66666.666')
self.f = 99999.999
self.d = datetime.date(2009, 12, 31)
self.dt = datetime.datetime(2009, 12, 31, 20, 50)
self.t = datetime.time(10, 15, 48)
self.l = 10000 if PY3 else long(10000) # NOQA: long undefined on PY3
self.ctxt = Context({
'n': self.n,
't': self.t,
'd': self.d,
'dt': self.dt,
'f': self.f,
'l': self.l,
})
def test_locale_independent(self):
"""
Localization of numbers
"""
with self.settings(USE_THOUSAND_SEPARATOR=False):
self.assertEqual('66666.66', nformat(self.n, decimal_sep='.', decimal_pos=2, grouping=3, thousand_sep=','))
self.assertEqual('66666A6', nformat(self.n, decimal_sep='A', decimal_pos=1, grouping=1, thousand_sep='B'))
self.assertEqual('66666', nformat(self.n, decimal_sep='X', decimal_pos=0, grouping=1, thousand_sep='Y'))
with self.settings(USE_THOUSAND_SEPARATOR=True):
self.assertEqual('66,666.66', nformat(self.n, decimal_sep='.', decimal_pos=2, grouping=3, thousand_sep=','))
self.assertEqual('6B6B6B6B6A6', nformat(self.n, decimal_sep='A', decimal_pos=1, grouping=1, thousand_sep='B'))
self.assertEqual('-66666.6', nformat(-66666.666, decimal_sep='.', decimal_pos=1))
self.assertEqual('-66666.0', nformat(int('-66666'), decimal_sep='.', decimal_pos=1))
self.assertEqual('10000.0', nformat(self.l, decimal_sep='.', decimal_pos=1))
# This unusual grouping/force_grouping combination may be triggered by the intcomma filter (#17414)
self.assertEqual('10000', nformat(self.l, decimal_sep='.', decimal_pos=0, grouping=0, force_grouping=True))
# date filter
self.assertEqual('31.12.2009 в 20:50', Template('{{ dt|date:"d.m.Y в H:i" }}').render(self.ctxt))
self.assertEqual('⌚ 10:15', Template('{{ t|time:"⌚ H:i" }}').render(self.ctxt))
@override_settings(USE_L10N=False)
def test_l10n_disabled(self):
"""
Catalan locale with format i18n disabled translations will be used,
but not formats
"""
with translation.override('ca', deactivate=True):
self.maxDiff = 3000
self.assertEqual('N j, Y', get_format('DATE_FORMAT'))
self.assertEqual(0, get_format('FIRST_DAY_OF_WEEK'))
self.assertEqual('.', get_format('DECIMAL_SEPARATOR'))
self.assertEqual('10:15 a.m.', time_format(self.t))
self.assertEqual('des. 31, 2009', date_format(self.d))
self.assertEqual('desembre 2009', date_format(self.d, 'YEAR_MONTH_FORMAT'))
self.assertEqual('12/31/2009 8:50 p.m.', date_format(self.dt, 'SHORT_DATETIME_FORMAT'))
self.assertEqual('No localizable', localize('No localizable'))
self.assertEqual('66666.666', localize(self.n))
self.assertEqual('99999.999', localize(self.f))
self.assertEqual('10000', localize(self.l))
self.assertEqual('des. 31, 2009', localize(self.d))
self.assertEqual('des. 31, 2009, 8:50 p.m.', localize(self.dt))
self.assertEqual('66666.666', Template('{{ n }}').render(self.ctxt))
self.assertEqual('99999.999', Template('{{ f }}').render(self.ctxt))
self.assertEqual('des. 31, 2009', Template('{{ d }}').render(self.ctxt))
self.assertEqual('des. 31, 2009, 8:50 p.m.', Template('{{ dt }}').render(self.ctxt))
self.assertEqual('66666.67', Template('{{ n|floatformat:2 }}').render(self.ctxt))
self.assertEqual('100000.0', Template('{{ f|floatformat }}').render(self.ctxt))
self.assertEqual('10:15 a.m.', Template('{{ t|time:"TIME_FORMAT" }}').render(self.ctxt))
self.assertEqual('12/31/2009', Template('{{ d|date:"SHORT_DATE_FORMAT" }}').render(self.ctxt))
self.assertEqual('12/31/2009 8:50 p.m.', Template('{{ dt|date:"SHORT_DATETIME_FORMAT" }}').render(self.ctxt))
form = I18nForm({
'decimal_field': '66666,666',
'float_field': '99999,999',
'date_field': '31/12/2009',
'datetime_field': '31/12/2009 20:50',
'time_field': '20:50',
'integer_field': '1.234',
})
self.assertFalse(form.is_valid())
self.assertEqual(['Introdu\xefu un n\xfamero.'], form.errors['float_field'])
self.assertEqual(['Introdu\xefu un n\xfamero.'], form.errors['decimal_field'])
self.assertEqual(['Introdu\xefu una data v\xe0lida.'], form.errors['date_field'])
self.assertEqual(['Introdu\xefu una data/hora v\xe0lides.'], form.errors['datetime_field'])
self.assertEqual(['Introdu\xefu un n\xfamero sencer.'], form.errors['integer_field'])
form2 = SelectDateForm({
'date_field_month': '12',
'date_field_day': '31',
'date_field_year': '2009'
})
self.assertTrue(form2.is_valid())
self.assertEqual(datetime.date(2009, 12, 31), form2.cleaned_data['date_field'])
self.assertHTMLEqual(
'<select name="mydate_month" id="id_mydate_month">\n<option value="0">---</option>\n<option value="1">gener</option>\n<option value="2">febrer</option>\n<option value="3">mar\xe7</option>\n<option value="4">abril</option>\n<option value="5">maig</option>\n<option value="6">juny</option>\n<option value="7">juliol</option>\n<option value="8">agost</option>\n<option value="9">setembre</option>\n<option value="10">octubre</option>\n<option value="11">novembre</option>\n<option value="12" selected="selected">desembre</option>\n</select>\n<select name="mydate_day" id="id_mydate_day">\n<option value="0">---</option>\n<option value="1">1</option>\n<option value="2">2</option>\n<option value="3">3</option>\n<option value="4">4</option>\n<option value="5">5</option>\n<option value="6">6</option>\n<option value="7">7</option>\n<option value="8">8</option>\n<option value="9">9</option>\n<option value="10">10</option>\n<option value="11">11</option>\n<option value="12">12</option>\n<option value="13">13</option>\n<option value="14">14</option>\n<option value="15">15</option>\n<option value="16">16</option>\n<option value="17">17</option>\n<option value="18">18</option>\n<option value="19">19</option>\n<option value="20">20</option>\n<option value="21">21</option>\n<option value="22">22</option>\n<option value="23">23</option>\n<option value="24">24</option>\n<option value="25">25</option>\n<option value="26">26</option>\n<option value="27">27</option>\n<option value="28">28</option>\n<option value="29">29</option>\n<option value="30">30</option>\n<option value="31" selected="selected">31</option>\n</select>\n<select name="mydate_year" id="id_mydate_year">\n<option value="0">---</option>\n<option value="2009" selected="selected">2009</option>\n<option value="2010">2010</option>\n<option value="2011">2011</option>\n<option value="2012">2012</option>\n<option value="2013">2013</option>\n<option value="2014">2014</option>\n<option value="2015">2015</option>\n<option value="2016">2016</option>\n<option value="2017">2017</option>\n<option value="2018">2018</option>\n</select>',
forms.SelectDateWidget(years=range(2009, 2019)).render('mydate', datetime.date(2009, 12, 31))
)
# We shouldn't change the behavior of the floatformat filter re:
# thousand separator and grouping when USE_L10N is False even
# if the USE_THOUSAND_SEPARATOR, NUMBER_GROUPING and
# THOUSAND_SEPARATOR settings are specified
with self.settings(USE_THOUSAND_SEPARATOR=True,
NUMBER_GROUPING=1, THOUSAND_SEPARATOR='!'):
self.assertEqual('66666.67', Template('{{ n|floatformat:2 }}').render(self.ctxt))
self.assertEqual('100000.0', Template('{{ f|floatformat }}').render(self.ctxt))
def test_false_like_locale_formats(self):
"""
Ensure that the active locale's formats take precedence over the
default settings even if they would be interpreted as False in a
conditional test (e.g. 0 or empty string).
Refs #16938.
"""
with patch_formats('fr', THOUSAND_SEPARATOR='', FIRST_DAY_OF_WEEK=0):
with translation.override('fr'):
with self.settings(USE_THOUSAND_SEPARATOR=True, THOUSAND_SEPARATOR='!'):
self.assertEqual('', get_format('THOUSAND_SEPARATOR'))
# Even a second time (after the format has been cached)...
self.assertEqual('', get_format('THOUSAND_SEPARATOR'))
with self.settings(FIRST_DAY_OF_WEEK=1):
self.assertEqual(0, get_format('FIRST_DAY_OF_WEEK'))
# Even a second time (after the format has been cached)...
self.assertEqual(0, get_format('FIRST_DAY_OF_WEEK'))
def test_l10n_enabled(self):
self.maxDiff = 3000
# Catalan locale
with translation.override('ca', deactivate=True):
self.assertEqual('j \d\e F \d\e Y', get_format('DATE_FORMAT'))
self.assertEqual(1, get_format('FIRST_DAY_OF_WEEK'))
self.assertEqual(',', get_format('DECIMAL_SEPARATOR'))
self.assertEqual('10:15', time_format(self.t))
self.assertEqual('31 de desembre de 2009', date_format(self.d))
self.assertEqual('desembre del 2009', date_format(self.d, 'YEAR_MONTH_FORMAT'))
self.assertEqual('31/12/2009 20:50', date_format(self.dt, 'SHORT_DATETIME_FORMAT'))
self.assertEqual('No localizable', localize('No localizable'))
with self.settings(USE_THOUSAND_SEPARATOR=True):
self.assertEqual('66.666,666', localize(self.n))
self.assertEqual('99.999,999', localize(self.f))
self.assertEqual('10.000', localize(self.l))
self.assertEqual('True', localize(True))
with self.settings(USE_THOUSAND_SEPARATOR=False):
self.assertEqual('66666,666', localize(self.n))
self.assertEqual('99999,999', localize(self.f))
self.assertEqual('10000', localize(self.l))
self.assertEqual('31 de desembre de 2009', localize(self.d))
self.assertEqual('31 de desembre de 2009 a les 20:50', localize(self.dt))
with self.settings(USE_THOUSAND_SEPARATOR=True):
self.assertEqual('66.666,666', Template('{{ n }}').render(self.ctxt))
self.assertEqual('99.999,999', Template('{{ f }}').render(self.ctxt))
self.assertEqual('10.000', Template('{{ l }}').render(self.ctxt))
with self.settings(USE_THOUSAND_SEPARATOR=True):
form3 = I18nForm({
'decimal_field': '66.666,666',
'float_field': '99.999,999',
'date_field': '31/12/2009',
'datetime_field': '31/12/2009 20:50',
'time_field': '20:50',
'integer_field': '1.234',
})
self.assertTrue(form3.is_valid())
self.assertEqual(decimal.Decimal('66666.666'), form3.cleaned_data['decimal_field'])
self.assertEqual(99999.999, form3.cleaned_data['float_field'])
self.assertEqual(datetime.date(2009, 12, 31), form3.cleaned_data['date_field'])
self.assertEqual(datetime.datetime(2009, 12, 31, 20, 50), form3.cleaned_data['datetime_field'])
self.assertEqual(datetime.time(20, 50), form3.cleaned_data['time_field'])
self.assertEqual(1234, form3.cleaned_data['integer_field'])
with self.settings(USE_THOUSAND_SEPARATOR=False):
self.assertEqual('66666,666', Template('{{ n }}').render(self.ctxt))
self.assertEqual('99999,999', Template('{{ f }}').render(self.ctxt))
self.assertEqual('31 de desembre de 2009', Template('{{ d }}').render(self.ctxt))
self.assertEqual('31 de desembre de 2009 a les 20:50', Template('{{ dt }}').render(self.ctxt))
self.assertEqual('66666,67', Template('{{ n|floatformat:2 }}').render(self.ctxt))
self.assertEqual('100000,0', Template('{{ f|floatformat }}').render(self.ctxt))
self.assertEqual('10:15', Template('{{ t|time:"TIME_FORMAT" }}').render(self.ctxt))
self.assertEqual('31/12/2009', Template('{{ d|date:"SHORT_DATE_FORMAT" }}').render(self.ctxt))
self.assertEqual('31/12/2009 20:50', Template('{{ dt|date:"SHORT_DATETIME_FORMAT" }}').render(self.ctxt))
self.assertEqual(date_format(datetime.datetime.now(), "DATE_FORMAT"),
Template('{% now "DATE_FORMAT" %}').render(self.ctxt))
with self.settings(USE_THOUSAND_SEPARATOR=False):
form4 = I18nForm({
'decimal_field': '66666,666',
'float_field': '99999,999',
'date_field': '31/12/2009',
'datetime_field': '31/12/2009 20:50',
'time_field': '20:50',
'integer_field': '1234',
})
self.assertTrue(form4.is_valid())
self.assertEqual(decimal.Decimal('66666.666'), form4.cleaned_data['decimal_field'])
self.assertEqual(99999.999, form4.cleaned_data['float_field'])
self.assertEqual(datetime.date(2009, 12, 31), form4.cleaned_data['date_field'])
self.assertEqual(datetime.datetime(2009, 12, 31, 20, 50), form4.cleaned_data['datetime_field'])
self.assertEqual(datetime.time(20, 50), form4.cleaned_data['time_field'])
self.assertEqual(1234, form4.cleaned_data['integer_field'])
form5 = SelectDateForm({
'date_field_month': '12',
'date_field_day': '31',
'date_field_year': '2009'
})
self.assertTrue(form5.is_valid())
self.assertEqual(datetime.date(2009, 12, 31), form5.cleaned_data['date_field'])
self.assertHTMLEqual(
'<select name="mydate_day" id="id_mydate_day">\n<option value="0">---</option>\n<option value="1">1</option>\n<option value="2">2</option>\n<option value="3">3</option>\n<option value="4">4</option>\n<option value="5">5</option>\n<option value="6">6</option>\n<option value="7">7</option>\n<option value="8">8</option>\n<option value="9">9</option>\n<option value="10">10</option>\n<option value="11">11</option>\n<option value="12">12</option>\n<option value="13">13</option>\n<option value="14">14</option>\n<option value="15">15</option>\n<option value="16">16</option>\n<option value="17">17</option>\n<option value="18">18</option>\n<option value="19">19</option>\n<option value="20">20</option>\n<option value="21">21</option>\n<option value="22">22</option>\n<option value="23">23</option>\n<option value="24">24</option>\n<option value="25">25</option>\n<option value="26">26</option>\n<option value="27">27</option>\n<option value="28">28</option>\n<option value="29">29</option>\n<option value="30">30</option>\n<option value="31" selected="selected">31</option>\n</select>\n<select name="mydate_month" id="id_mydate_month">\n<option value="0">---</option>\n<option value="1">gener</option>\n<option value="2">febrer</option>\n<option value="3">mar\xe7</option>\n<option value="4">abril</option>\n<option value="5">maig</option>\n<option value="6">juny</option>\n<option value="7">juliol</option>\n<option value="8">agost</option>\n<option value="9">setembre</option>\n<option value="10">octubre</option>\n<option value="11">novembre</option>\n<option value="12" selected="selected">desembre</option>\n</select>\n<select name="mydate_year" id="id_mydate_year">\n<option value="0">---</option>\n<option value="2009" selected="selected">2009</option>\n<option value="2010">2010</option>\n<option value="2011">2011</option>\n<option value="2012">2012</option>\n<option value="2013">2013</option>\n<option value="2014">2014</option>\n<option value="2015">2015</option>\n<option value="2016">2016</option>\n<option value="2017">2017</option>\n<option value="2018">2018</option>\n</select>',
forms.SelectDateWidget(years=range(2009, 2019)).render('mydate', datetime.date(2009, 12, 31))
)
# Russian locale (with E as month)
with translation.override('ru', deactivate=True):
self.assertHTMLEqual(
'<select name="mydate_day" id="id_mydate_day">\n<option value="0">---</option>\n<option value="1">1</option>\n<option value="2">2</option>\n<option value="3">3</option>\n<option value="4">4</option>\n<option value="5">5</option>\n<option value="6">6</option>\n<option value="7">7</option>\n<option value="8">8</option>\n<option value="9">9</option>\n<option value="10">10</option>\n<option value="11">11</option>\n<option value="12">12</option>\n<option value="13">13</option>\n<option value="14">14</option>\n<option value="15">15</option>\n<option value="16">16</option>\n<option value="17">17</option>\n<option value="18">18</option>\n<option value="19">19</option>\n<option value="20">20</option>\n<option value="21">21</option>\n<option value="22">22</option>\n<option value="23">23</option>\n<option value="24">24</option>\n<option value="25">25</option>\n<option value="26">26</option>\n<option value="27">27</option>\n<option value="28">28</option>\n<option value="29">29</option>\n<option value="30">30</option>\n<option value="31" selected="selected">31</option>\n</select>\n<select name="mydate_month" id="id_mydate_month">\n<option value="0">---</option>\n<option value="1">\u042f\u043d\u0432\u0430\u0440\u044c</option>\n<option value="2">\u0424\u0435\u0432\u0440\u0430\u043b\u044c</option>\n<option value="3">\u041c\u0430\u0440\u0442</option>\n<option value="4">\u0410\u043f\u0440\u0435\u043b\u044c</option>\n<option value="5">\u041c\u0430\u0439</option>\n<option value="6">\u0418\u044e\u043d\u044c</option>\n<option value="7">\u0418\u044e\u043b\u044c</option>\n<option value="8">\u0410\u0432\u0433\u0443\u0441\u0442</option>\n<option value="9">\u0421\u0435\u043d\u0442\u044f\u0431\u0440\u044c</option>\n<option value="10">\u041e\u043a\u0442\u044f\u0431\u0440\u044c</option>\n<option value="11">\u041d\u043e\u044f\u0431\u0440\u044c</option>\n<option value="12" selected="selected">\u0414\u0435\u043a\u0430\u0431\u0440\u044c</option>\n</select>\n<select name="mydate_year" id="id_mydate_year">\n<option value="0">---</option>\n<option value="2009" selected="selected">2009</option>\n<option value="2010">2010</option>\n<option value="2011">2011</option>\n<option value="2012">2012</option>\n<option value="2013">2013</option>\n<option value="2014">2014</option>\n<option value="2015">2015</option>\n<option value="2016">2016</option>\n<option value="2017">2017</option>\n<option value="2018">2018</option>\n</select>',
forms.SelectDateWidget(years=range(2009, 2019)).render('mydate', datetime.date(2009, 12, 31))
)
# English locale
with translation.override('en', deactivate=True):
self.assertEqual('N j, Y', get_format('DATE_FORMAT'))
self.assertEqual(0, get_format('FIRST_DAY_OF_WEEK'))
self.assertEqual('.', get_format('DECIMAL_SEPARATOR'))
self.assertEqual('Dec. 31, 2009', date_format(self.d))
self.assertEqual('December 2009', date_format(self.d, 'YEAR_MONTH_FORMAT'))
self.assertEqual('12/31/2009 8:50 p.m.', date_format(self.dt, 'SHORT_DATETIME_FORMAT'))
self.assertEqual('No localizable', localize('No localizable'))
with self.settings(USE_THOUSAND_SEPARATOR=True):
self.assertEqual('66,666.666', localize(self.n))
self.assertEqual('99,999.999', localize(self.f))
self.assertEqual('10,000', localize(self.l))
with self.settings(USE_THOUSAND_SEPARATOR=False):
self.assertEqual('66666.666', localize(self.n))
self.assertEqual('99999.999', localize(self.f))
self.assertEqual('10000', localize(self.l))
self.assertEqual('Dec. 31, 2009', localize(self.d))
self.assertEqual('Dec. 31, 2009, 8:50 p.m.', localize(self.dt))
with self.settings(USE_THOUSAND_SEPARATOR=True):
self.assertEqual('66,666.666', Template('{{ n }}').render(self.ctxt))
self.assertEqual('99,999.999', Template('{{ f }}').render(self.ctxt))
self.assertEqual('10,000', Template('{{ l }}').render(self.ctxt))
with self.settings(USE_THOUSAND_SEPARATOR=False):
self.assertEqual('66666.666', Template('{{ n }}').render(self.ctxt))
self.assertEqual('99999.999', Template('{{ f }}').render(self.ctxt))
self.assertEqual('Dec. 31, 2009', Template('{{ d }}').render(self.ctxt))
self.assertEqual('Dec. 31, 2009, 8:50 p.m.', Template('{{ dt }}').render(self.ctxt))
self.assertEqual('66666.67', Template('{{ n|floatformat:2 }}').render(self.ctxt))
self.assertEqual('100000.0', Template('{{ f|floatformat }}').render(self.ctxt))
self.assertEqual('12/31/2009', Template('{{ d|date:"SHORT_DATE_FORMAT" }}').render(self.ctxt))
self.assertEqual('12/31/2009 8:50 p.m.', Template('{{ dt|date:"SHORT_DATETIME_FORMAT" }}').render(self.ctxt))
form5 = I18nForm({
'decimal_field': '66666.666',
'float_field': '99999.999',
'date_field': '12/31/2009',
'datetime_field': '12/31/2009 20:50',
'time_field': '20:50',
'integer_field': '1234',
})
self.assertTrue(form5.is_valid())
self.assertEqual(decimal.Decimal('66666.666'), form5.cleaned_data['decimal_field'])
self.assertEqual(99999.999, form5.cleaned_data['float_field'])
self.assertEqual(datetime.date(2009, 12, 31), form5.cleaned_data['date_field'])
self.assertEqual(datetime.datetime(2009, 12, 31, 20, 50), form5.cleaned_data['datetime_field'])
self.assertEqual(datetime.time(20, 50), form5.cleaned_data['time_field'])
self.assertEqual(1234, form5.cleaned_data['integer_field'])
form6 = SelectDateForm({
'date_field_month': '12',
'date_field_day': '31',
'date_field_year': '2009'
})
self.assertTrue(form6.is_valid())
self.assertEqual(datetime.date(2009, 12, 31), form6.cleaned_data['date_field'])
self.assertHTMLEqual(
'<select name="mydate_month" id="id_mydate_month">\n<option value="0">---</option>\n<option value="1">January</option>\n<option value="2">February</option>\n<option value="3">March</option>\n<option value="4">April</option>\n<option value="5">May</option>\n<option value="6">June</option>\n<option value="7">July</option>\n<option value="8">August</option>\n<option value="9">September</option>\n<option value="10">October</option>\n<option value="11">November</option>\n<option value="12" selected="selected">December</option>\n</select>\n<select name="mydate_day" id="id_mydate_day">\n<option value="0">---</option>\n<option value="1">1</option>\n<option value="2">2</option>\n<option value="3">3</option>\n<option value="4">4</option>\n<option value="5">5</option>\n<option value="6">6</option>\n<option value="7">7</option>\n<option value="8">8</option>\n<option value="9">9</option>\n<option value="10">10</option>\n<option value="11">11</option>\n<option value="12">12</option>\n<option value="13">13</option>\n<option value="14">14</option>\n<option value="15">15</option>\n<option value="16">16</option>\n<option value="17">17</option>\n<option value="18">18</option>\n<option value="19">19</option>\n<option value="20">20</option>\n<option value="21">21</option>\n<option value="22">22</option>\n<option value="23">23</option>\n<option value="24">24</option>\n<option value="25">25</option>\n<option value="26">26</option>\n<option value="27">27</option>\n<option value="28">28</option>\n<option value="29">29</option>\n<option value="30">30</option>\n<option value="31" selected="selected">31</option>\n</select>\n<select name="mydate_year" id="id_mydate_year">\n<option value="0">---</option>\n<option value="2009" selected="selected">2009</option>\n<option value="2010">2010</option>\n<option value="2011">2011</option>\n<option value="2012">2012</option>\n<option value="2013">2013</option>\n<option value="2014">2014</option>\n<option value="2015">2015</option>\n<option value="2016">2016</option>\n<option value="2017">2017</option>\n<option value="2018">2018</option>\n</select>',
forms.SelectDateWidget(years=range(2009, 2019)).render('mydate', datetime.date(2009, 12, 31))
)
def test_sub_locales(self):
"""
Check if sublocales fall back to the main locale
"""
with self.settings(USE_THOUSAND_SEPARATOR=True):
with translation.override('de-at', deactivate=True):
self.assertEqual('66.666,666', Template('{{ n }}').render(self.ctxt))
with translation.override('es-us', deactivate=True):
self.assertEqual('31 de Diciembre de 2009', date_format(self.d))
def test_localized_input(self):
"""
Tests if form input is correctly localized
"""
self.maxDiff = 1200
with translation.override('de-at', deactivate=True):
form6 = CompanyForm({
'name': 'acme',
'date_added': datetime.datetime(2009, 12, 31, 6, 0, 0),
'cents_paid': decimal.Decimal('59.47'),
'products_delivered': 12000,
})
self.assertTrue(form6.is_valid())
self.assertHTMLEqual(
form6.as_ul(),
'<li><label for="id_name">Name:</label> <input id="id_name" type="text" name="name" value="acme" maxlength="50" /></li>\n<li><label for="id_date_added">Date added:</label> <input type="text" name="date_added" value="31.12.2009 06:00:00" id="id_date_added" /></li>\n<li><label for="id_cents_paid">Cents paid:</label> <input type="text" name="cents_paid" value="59,47" id="id_cents_paid" /></li>\n<li><label for="id_products_delivered">Products delivered:</label> <input type="text" name="products_delivered" value="12000" id="id_products_delivered" /></li>'
)
self.assertEqual(localize_input(datetime.datetime(2009, 12, 31, 6, 0, 0)), '31.12.2009 06:00:00')
self.assertEqual(datetime.datetime(2009, 12, 31, 6, 0, 0), form6.cleaned_data['date_added'])
with self.settings(USE_THOUSAND_SEPARATOR=True):
# Checking for the localized "products_delivered" field
self.assertInHTML('<input type="text" name="products_delivered" value="12.000" id="id_products_delivered" />', form6.as_ul())
def test_sanitize_separators(self):
"""
Tests django.utils.formats.sanitize_separators.
"""
# Non-strings are untouched
self.assertEqual(sanitize_separators(123), 123)
with translation.override('ru', deactivate=True):
# Russian locale has non-breaking space (\xa0) as thousand separator
# Check that usual space is accepted too when sanitizing inputs
with self.settings(USE_THOUSAND_SEPARATOR=True):
self.assertEqual(sanitize_separators('1\xa0234\xa0567'), '1234567')
self.assertEqual(sanitize_separators('77\xa0777,777'), '77777.777')
self.assertEqual(sanitize_separators('12 345'), '12345')
self.assertEqual(sanitize_separators('77 777,777'), '77777.777')
with self.settings(USE_THOUSAND_SEPARATOR=True, USE_L10N=False):
self.assertEqual(sanitize_separators('12\xa0345'), '12\xa0345')
with patch_formats(get_language(), THOUSAND_SEPARATOR='.', DECIMAL_SEPARATOR=','):
with self.settings(USE_THOUSAND_SEPARATOR=True):
self.assertEqual(sanitize_separators('10.234'), '10234')
# Suspicion that user entered dot as decimal separator (#22171)
self.assertEqual(sanitize_separators('10.10'), '10.10')
def test_iter_format_modules(self):
"""
Tests the iter_format_modules function.
"""
# Importing some format modules so that we can compare the returned
# modules with these expected modules
default_mod = import_module('django.conf.locale.de.formats')
test_mod = import_module('i18n.other.locale.de.formats')
test_mod2 = import_module('i18n.other2.locale.de.formats')
with translation.override('de-at', deactivate=True):
# Should return the correct default module when no setting is set
self.assertEqual(list(iter_format_modules('de')), [default_mod])
# When the setting is a string, should return the given module and
# the default module
self.assertEqual(
list(iter_format_modules('de', 'i18n.other.locale')),
[test_mod, default_mod])
# When setting is a list of strings, should return the given
# modules and the default module
self.assertEqual(
list(iter_format_modules('de', ['i18n.other.locale', 'i18n.other2.locale'])),
[test_mod, test_mod2, default_mod])
def test_iter_format_modules_stability(self):
"""
Tests the iter_format_modules function always yields format modules in
a stable and correct order in presence of both base ll and ll_CC formats.
"""
en_format_mod = import_module('django.conf.locale.en.formats')
en_gb_format_mod = import_module('django.conf.locale.en_GB.formats')
self.assertEqual(list(iter_format_modules('en-gb')), [en_gb_format_mod, en_format_mod])
def test_get_format_modules_lang(self):
with translation.override('de', deactivate=True):
self.assertEqual('.', get_format('DECIMAL_SEPARATOR', lang='en'))
def test_get_format_modules_stability(self):
with self.settings(FORMAT_MODULE_PATH='i18n.other.locale'):
with translation.override('de', deactivate=True):
old = str("%r") % get_format_modules(reverse=True)
new = str("%r") % get_format_modules(reverse=True) # second try
self.assertEqual(new, old, 'Value returned by get_formats_modules() must be preserved between calls.')
def test_localize_templatetag_and_filter(self):
"""
Tests the {% localize %} templatetag
"""
context = Context({'value': 3.14})
template1 = Template("{% load l10n %}{% localize %}{{ value }}{% endlocalize %};{% localize on %}{{ value }}{% endlocalize %}")
template2 = Template("{% load l10n %}{{ value }};{% localize off %}{{ value }};{% endlocalize %}{{ value }}")
template3 = Template('{% load l10n %}{{ value }};{{ value|unlocalize }}')
template4 = Template('{% load l10n %}{{ value }};{{ value|localize }}')
output1 = '3,14;3,14'
output2 = '3,14;3.14;3,14'
output3 = '3,14;3.14'
output4 = '3.14;3,14'
with translation.override('de', deactivate=True):
with self.settings(USE_L10N=False):
self.assertEqual(template1.render(context), output1)
self.assertEqual(template4.render(context), output4)
with self.settings(USE_L10N=True):
self.assertEqual(template1.render(context), output1)
self.assertEqual(template2.render(context), output2)
self.assertEqual(template3.render(context), output3)
def test_localized_as_text_as_hidden_input(self):
"""
Tests if form input with 'as_hidden' or 'as_text' is correctly localized. Ticket #18777
"""
self.maxDiff = 1200
with translation.override('de-at', deactivate=True):
template = Template('{% load l10n %}{{ form.date_added }}; {{ form.cents_paid }}')
template_as_text = Template('{% load l10n %}{{ form.date_added.as_text }}; {{ form.cents_paid.as_text }}')
template_as_hidden = Template('{% load l10n %}{{ form.date_added.as_hidden }}; {{ form.cents_paid.as_hidden }}')
form = CompanyForm({
'name': 'acme',
'date_added': datetime.datetime(2009, 12, 31, 6, 0, 0),
'cents_paid': decimal.Decimal('59.47'),
'products_delivered': 12000,
})
context = Context({'form': form})
self.assertTrue(form.is_valid())
self.assertHTMLEqual(
template.render(context),
'<input id="id_date_added" name="date_added" type="text" value="31.12.2009 06:00:00" />; <input id="id_cents_paid" name="cents_paid" type="text" value="59,47" />'
)
self.assertHTMLEqual(
template_as_text.render(context),
'<input id="id_date_added" name="date_added" type="text" value="31.12.2009 06:00:00" />; <input id="id_cents_paid" name="cents_paid" type="text" value="59,47" />'
)
self.assertHTMLEqual(
template_as_hidden.render(context),
'<input id="id_date_added" name="date_added" type="hidden" value="31.12.2009 06:00:00" />; <input id="id_cents_paid" name="cents_paid" type="hidden" value="59,47" />'
)
class MiscTests(SimpleTestCase):
def setUp(self):
super(MiscTests, self).setUp()
self.rf = RequestFactory()
@override_settings(LANGUAGE_CODE='de')
def test_english_fallback(self):
"""
With a non-English LANGUAGE_CODE and if the active language is English
or one of its variants, the untranslated string should be returned
(instead of falling back to LANGUAGE_CODE) (See #24413).
"""
self.assertEqual(ugettext("Image"), "Bild")
with translation.override('en'):
self.assertEqual(ugettext("Image"), "Image")
with translation.override('en-us'):
self.assertEqual(ugettext("Image"), "Image")
with translation.override('en-ca'):
self.assertEqual(ugettext("Image"), "Image")
def test_parse_spec_http_header(self):
"""
Testing HTTP header parsing. First, we test that we can parse the
values according to the spec (and that we extract all the pieces in
the right order).
"""
p = trans_real.parse_accept_lang_header
# Good headers.
self.assertEqual([('de', 1.0)], p('de'))
self.assertEqual([('en-au', 1.0)], p('en-AU'))
self.assertEqual([('es-419', 1.0)], p('es-419'))
self.assertEqual([('*', 1.0)], p('*;q=1.00'))
self.assertEqual([('en-au', 0.123)], p('en-AU;q=0.123'))
self.assertEqual([('en-au', 0.5)], p('en-au;q=0.5'))
self.assertEqual([('en-au', 1.0)], p('en-au;q=1.0'))
self.assertEqual([('da', 1.0), ('en', 0.5), ('en-gb', 0.25)], p('da, en-gb;q=0.25, en;q=0.5'))
self.assertEqual([('en-au-xx', 1.0)], p('en-au-xx'))
self.assertEqual([('de', 1.0), ('en-au', 0.75), ('en-us', 0.5), ('en', 0.25), ('es', 0.125), ('fa', 0.125)], p('de,en-au;q=0.75,en-us;q=0.5,en;q=0.25,es;q=0.125,fa;q=0.125'))
self.assertEqual([('*', 1.0)], p('*'))
self.assertEqual([('de', 1.0)], p('de;q=0.'))
self.assertEqual([('en', 1.0), ('*', 0.5)], p('en; q=1.0, * ; q=0.5'))
self.assertEqual([], p(''))
# Bad headers; should always return [].
self.assertEqual([], p('en-gb;q=1.0000'))
self.assertEqual([], p('en;q=0.1234'))
self.assertEqual([], p('en;q=.2'))
self.assertEqual([], p('abcdefghi-au'))
self.assertEqual([], p('**'))
self.assertEqual([], p('en,,gb'))
self.assertEqual([], p('en-au;q=0.1.0'))
self.assertEqual([], p('XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXZ,en'))
self.assertEqual([], p('da, en-gb;q=0.8, en;q=0.7,#'))
self.assertEqual([], p('de;q=2.0'))
self.assertEqual([], p('de;q=0.a'))
self.assertEqual([], p('12-345'))
self.assertEqual([], p(''))
self.assertEqual([], p('en; q=1,'))
def test_parse_literal_http_header(self):
"""
Now test that we parse a literal HTTP header correctly.
"""
g = get_language_from_request
r = self.rf.get('/')
r.COOKIES = {}
r.META = {'HTTP_ACCEPT_LANGUAGE': 'pt-br'}
self.assertEqual('pt-br', g(r))
r.META = {'HTTP_ACCEPT_LANGUAGE': 'pt'}
self.assertEqual('pt', g(r))
r.META = {'HTTP_ACCEPT_LANGUAGE': 'es,de'}
self.assertEqual('es', g(r))
r.META = {'HTTP_ACCEPT_LANGUAGE': 'es-ar,de'}
self.assertEqual('es-ar', g(r))
# This test assumes there won't be a Django translation to a US
# variation of the Spanish language, a safe assumption. When the
# user sets it as the preferred language, the main 'es'
# translation should be selected instead.
r.META = {'HTTP_ACCEPT_LANGUAGE': 'es-us'}
self.assertEqual(g(r), 'es')
# This tests the following scenario: there isn't a main language (zh)
# translation of Django but there is a translation to variation (zh-hans)
# the user sets zh-hans as the preferred language, it should be selected
# by Django without falling back nor ignoring it.
r.META = {'HTTP_ACCEPT_LANGUAGE': 'zh-hans,de'}
self.assertEqual(g(r), 'zh-hans')
r.META = {'HTTP_ACCEPT_LANGUAGE': 'NL'}
self.assertEqual('nl', g(r))
r.META = {'HTTP_ACCEPT_LANGUAGE': 'fy'}
self.assertEqual('fy', g(r))
r.META = {'HTTP_ACCEPT_LANGUAGE': 'ia'}
self.assertEqual('ia', g(r))
r.META = {'HTTP_ACCEPT_LANGUAGE': 'sr-latn'}
self.assertEqual('sr-latn', g(r))
r.META = {'HTTP_ACCEPT_LANGUAGE': 'zh-hans'}
self.assertEqual('zh-hans', g(r))
r.META = {'HTTP_ACCEPT_LANGUAGE': 'zh-hant'}
self.assertEqual('zh-hant', g(r))
@override_settings(
LANGUAGES=[
('en', 'English'),
('zh-hans', 'Simplified Chinese'),
('zh-hant', 'Traditional Chinese'),
]
)
def test_support_for_deprecated_chinese_language_codes(self):
"""
Some browsers (Firefox, IE etc) use deprecated language codes. As these
language codes will be removed in Django 1.9, these will be incorrectly
matched. For example zh-tw (traditional) will be interpreted as zh-hans
(simplified), which is wrong. So we should also accept these deprecated
language codes.
refs #18419 -- this is explicitly for browser compatibility
"""
g = get_language_from_request
r = self.rf.get('/')
r.COOKIES = {}
r.META = {'HTTP_ACCEPT_LANGUAGE': 'zh-cn,en'}
self.assertEqual(g(r), 'zh-hans')
r.META = {'HTTP_ACCEPT_LANGUAGE': 'zh-tw,en'}
self.assertEqual(g(r), 'zh-hant')
def test_special_fallback_language(self):
"""
Some languages may have special fallbacks that don't follow the simple
'fr-ca' -> 'fr' logic (notably Chinese codes).
"""
r = self.rf.get('/')
r.COOKIES = {}
r.META = {'HTTP_ACCEPT_LANGUAGE': 'zh-my,en'}
self.assertEqual(get_language_from_request(r), 'zh-hans')
def test_parse_language_cookie(self):
"""
Now test that we parse language preferences stored in a cookie correctly.
"""
g = get_language_from_request
r = self.rf.get('/')
r.COOKIES = {settings.LANGUAGE_COOKIE_NAME: 'pt-br'}
r.META = {}
self.assertEqual('pt-br', g(r))
r.COOKIES = {settings.LANGUAGE_COOKIE_NAME: 'pt'}
r.META = {}
self.assertEqual('pt', g(r))
r.COOKIES = {settings.LANGUAGE_COOKIE_NAME: 'es'}
r.META = {'HTTP_ACCEPT_LANGUAGE': 'de'}
self.assertEqual('es', g(r))
# This test assumes there won't be a Django translation to a US
# variation of the Spanish language, a safe assumption. When the
# user sets it as the preferred language, the main 'es'
# translation should be selected instead.
r.COOKIES = {settings.LANGUAGE_COOKIE_NAME: 'es-us'}
r.META = {}
self.assertEqual(g(r), 'es')
# This tests the following scenario: there isn't a main language (zh)
# translation of Django but there is a translation to variation (zh-hans)
# the user sets zh-hans as the preferred language, it should be selected
# by Django without falling back nor ignoring it.
r.COOKIES = {settings.LANGUAGE_COOKIE_NAME: 'zh-hans'}
r.META = {'HTTP_ACCEPT_LANGUAGE': 'de'}
self.assertEqual(g(r), 'zh-hans')
def test_get_language_from_path_real(self):
g = trans_real.get_language_from_path
self.assertEqual(g('/pl/'), 'pl')
self.assertEqual(g('/pl'), 'pl')
self.assertEqual(g('/xyz/'), None)
def test_get_language_from_path_null(self):
from django.utils.translation.trans_null import get_language_from_path as g
self.assertEqual(g('/pl/'), None)
self.assertEqual(g('/pl'), None)
self.assertEqual(g('/xyz/'), None)
@override_settings(LOCALE_PATHS=extended_locale_paths)
def test_percent_in_translatable_block(self):
t_sing = Template("{% load i18n %}{% blocktrans %}The result was {{ percent }}%{% endblocktrans %}")
t_plur = Template("{% load i18n %}{% blocktrans count num as number %}{{ percent }}% represents {{ num }} object{% plural %}{{ percent }}% represents {{ num }} objects{% endblocktrans %}")
with translation.override('de'):
self.assertEqual(t_sing.render(Context({'percent': 42})), 'Das Ergebnis war 42%')
self.assertEqual(t_plur.render(Context({'percent': 42, 'num': 1})), '42% stellt 1 Objekt dar')
self.assertEqual(t_plur.render(Context({'percent': 42, 'num': 4})), '42% stellt 4 Objekte dar')
@override_settings(LOCALE_PATHS=extended_locale_paths)
def test_percent_formatting_in_blocktrans(self):
"""
Test that using Python's %-formatting is properly escaped in blocktrans,
singular or plural
"""
t_sing = Template("{% load i18n %}{% blocktrans %}There are %(num_comments)s comments{% endblocktrans %}")
t_plur = Template("{% load i18n %}{% blocktrans count num as number %}%(percent)s% represents {{ num }} object{% plural %}%(percent)s% represents {{ num }} objects{% endblocktrans %}")
with translation.override('de'):
# Strings won't get translated as they don't match after escaping %
self.assertEqual(t_sing.render(Context({'num_comments': 42})), 'There are %(num_comments)s comments')
self.assertEqual(t_plur.render(Context({'percent': 42, 'num': 1})), '%(percent)s% represents 1 object')
self.assertEqual(t_plur.render(Context({'percent': 42, 'num': 4})), '%(percent)s% represents 4 objects')
def test_cache_resetting(self):
"""
#14170 after setting LANGUAGE, cache should be cleared and languages
previously valid should not be used.
"""
g = get_language_from_request
r = self.rf.get('/')
r.COOKIES = {}
r.META = {'HTTP_ACCEPT_LANGUAGE': 'pt-br'}
self.assertEqual('pt-br', g(r))
with self.settings(LANGUAGES=[('en', 'English')]):
self.assertNotEqual('pt-br', g(r))
class ResolutionOrderI18NTests(SimpleTestCase):
def setUp(self):
super(ResolutionOrderI18NTests, self).setUp()
activate('de')
def tearDown(self):
deactivate()
super(ResolutionOrderI18NTests, self).tearDown()
def assertUgettext(self, msgid, msgstr):
result = ugettext(msgid)
self.assertIn(msgstr, result, ("The string '%s' isn't in the "
"translation of '%s'; the actual result is '%s'." % (msgstr, msgid, result)))
class AppResolutionOrderI18NTests(ResolutionOrderI18NTests):
@override_settings(LANGUAGE_CODE='de')
def test_app_translation(self):
# Original translation.
self.assertUgettext('Date/time', 'Datum/Zeit')
# Different translation.
with self.modify_settings(INSTALLED_APPS={'append': 'i18n.resolution'}):
# Force refreshing translations.
activate('de')
# Doesn't work because it's added later in the list.
self.assertUgettext('Date/time', 'Datum/Zeit')
with self.modify_settings(INSTALLED_APPS={'remove': 'django.contrib.admin.apps.SimpleAdminConfig'}):
# Force refreshing translations.
activate('de')
# Unless the original is removed from the list.
self.assertUgettext('Date/time', 'Datum/Zeit (APP)')
@override_settings(LOCALE_PATHS=extended_locale_paths)
class LocalePathsResolutionOrderI18NTests(ResolutionOrderI18NTests):
def test_locale_paths_translation(self):
self.assertUgettext('Time', 'LOCALE_PATHS')
def test_locale_paths_override_app_translation(self):
with self.settings(INSTALLED_APPS=['i18n.resolution']):
self.assertUgettext('Time', 'LOCALE_PATHS')
class DjangoFallbackResolutionOrderI18NTests(ResolutionOrderI18NTests):
def test_django_fallback(self):
self.assertEqual(ugettext('Date/time'), 'Datum/Zeit')
class TestModels(TestCase):
def test_lazy(self):
tm = TestModel()
tm.save()
def test_safestr(self):
c = Company(cents_paid=12, products_delivered=1)
c.name = SafeText('Iñtërnâtiônàlizætiøn1')
c.save()
c.name = SafeBytes('Iñtërnâtiônàlizætiøn1'.encode('utf-8'))
c.save()
class TestLanguageInfo(SimpleTestCase):
def test_localized_language_info(self):
li = get_language_info('de')
self.assertEqual(li['code'], 'de')
self.assertEqual(li['name_local'], 'Deutsch')
self.assertEqual(li['name'], 'German')
self.assertEqual(li['bidi'], False)
def test_unknown_language_code(self):
six.assertRaisesRegex(self, KeyError, r"Unknown language code xx\.", get_language_info, 'xx')
def test_unknown_only_country_code(self):
li = get_language_info('de-xx')
self.assertEqual(li['code'], 'de')
self.assertEqual(li['name_local'], 'Deutsch')
self.assertEqual(li['name'], 'German')
self.assertEqual(li['bidi'], False)
def test_unknown_language_code_and_country_code(self):
six.assertRaisesRegex(self, KeyError, r"Unknown language code xx-xx and xx\.", get_language_info, 'xx-xx')
def test_fallback_language_code(self):
"""
get_language_info return the first fallback language info if the lang_info
struct does not contain the 'name' key.
"""
li = get_language_info('zh-my')
self.assertEqual(li['code'], 'zh-hans')
li = get_language_info('zh-hans')
self.assertEqual(li['code'], 'zh-hans')
class MultipleLocaleActivationTests(SimpleTestCase):
"""
Tests for template rendering behavior when multiple locales are activated
during the lifetime of the same process.
"""
def setUp(self):
super(MultipleLocaleActivationTests, self).setUp()
self._old_language = get_language()
def tearDown(self):
super(MultipleLocaleActivationTests, self).tearDown()
activate(self._old_language)
def test_single_locale_activation(self):
"""
Simple baseline behavior with one locale for all the supported i18n constructs.
"""
with translation.override('fr'):
self.assertEqual(Template("{{ _('Yes') }}").render(Context({})), 'Oui')
self.assertEqual(Template("{% load i18n %}{% trans 'Yes' %}").render(Context({})), 'Oui')
self.assertEqual(Template("{% load i18n %}{% blocktrans %}Yes{% endblocktrans %}").render(Context({})), 'Oui')
# Literal marked up with _() in a filter expression
def test_multiple_locale_filter(self):
with translation.override('de'):
t = Template("{% load i18n %}{{ 0|yesno:_('yes,no,maybe') }}")
with translation.override(self._old_language), translation.override('nl'):
self.assertEqual(t.render(Context({})), 'nee')
def test_multiple_locale_filter_deactivate(self):
with translation.override('de', deactivate=True):
t = Template("{% load i18n %}{{ 0|yesno:_('yes,no,maybe') }}")
with translation.override('nl'):
self.assertEqual(t.render(Context({})), 'nee')
def test_multiple_locale_filter_direct_switch(self):
with translation.override('de'):
t = Template("{% load i18n %}{{ 0|yesno:_('yes,no,maybe') }}")
with translation.override('nl'):
self.assertEqual(t.render(Context({})), 'nee')
# Literal marked up with _()
def test_multiple_locale(self):
with translation.override('de'):
t = Template("{{ _('No') }}")
with translation.override(self._old_language), translation.override('nl'):
self.assertEqual(t.render(Context({})), 'Nee')
def test_multiple_locale_deactivate(self):
with translation.override('de', deactivate=True):
t = Template("{{ _('No') }}")
with translation.override('nl'):
self.assertEqual(t.render(Context({})), 'Nee')
def test_multiple_locale_direct_switch(self):
with translation.override('de'):
t = Template("{{ _('No') }}")
with translation.override('nl'):
self.assertEqual(t.render(Context({})), 'Nee')
# Literal marked up with _(), loading the i18n template tag library
def test_multiple_locale_loadi18n(self):
with translation.override('de'):
t = Template("{% load i18n %}{{ _('No') }}")
with translation.override(self._old_language), translation.override('nl'):
self.assertEqual(t.render(Context({})), 'Nee')
def test_multiple_locale_loadi18n_deactivate(self):
with translation.override('de', deactivate=True):
t = Template("{% load i18n %}{{ _('No') }}")
with translation.override('nl'):
self.assertEqual(t.render(Context({})), 'Nee')
def test_multiple_locale_loadi18n_direct_switch(self):
with translation.override('de'):
t = Template("{% load i18n %}{{ _('No') }}")
with translation.override('nl'):
self.assertEqual(t.render(Context({})), 'Nee')
# trans i18n tag
def test_multiple_locale_trans(self):
with translation.override('de'):
t = Template("{% load i18n %}{% trans 'No' %}")
with translation.override(self._old_language), translation.override('nl'):
self.assertEqual(t.render(Context({})), 'Nee')
def test_multiple_locale_deactivate_trans(self):
with translation.override('de', deactivate=True):
t = Template("{% load i18n %}{% trans 'No' %}")
with translation.override('nl'):
self.assertEqual(t.render(Context({})), 'Nee')
def test_multiple_locale_direct_switch_trans(self):
with translation.override('de'):
t = Template("{% load i18n %}{% trans 'No' %}")
with translation.override('nl'):
self.assertEqual(t.render(Context({})), 'Nee')
# blocktrans i18n tag
def test_multiple_locale_btrans(self):
with translation.override('de'):
t = Template("{% load i18n %}{% blocktrans %}No{% endblocktrans %}")
with translation.override(self._old_language), translation.override('nl'):
self.assertEqual(t.render(Context({})), 'Nee')
def test_multiple_locale_deactivate_btrans(self):
with translation.override('de', deactivate=True):
t = Template("{% load i18n %}{% blocktrans %}No{% endblocktrans %}")
with translation.override('nl'):
self.assertEqual(t.render(Context({})), 'Nee')
def test_multiple_locale_direct_switch_btrans(self):
with translation.override('de'):
t = Template("{% load i18n %}{% blocktrans %}No{% endblocktrans %}")
with translation.override('nl'):
self.assertEqual(t.render(Context({})), 'Nee')
@override_settings(
USE_I18N=True,
LANGUAGES=[
('en', 'English'),
('fr', 'French'),
],
MIDDLEWARE_CLASSES=[
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
],
ROOT_URLCONF='i18n.urls',
)
class LocaleMiddlewareTests(TestCase):
def test_streaming_response(self):
# Regression test for #5241
response = self.client.get('/fr/streaming/')
self.assertContains(response, "Oui/Non")
response = self.client.get('/en/streaming/')
self.assertContains(response, "Yes/No")
@override_settings(
MIDDLEWARE_CLASSES=[
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
],
)
def test_language_not_saved_to_session(self):
"""Checks that current language is not automatically saved to
session on every request."""
# Regression test for #21473
self.client.get('/fr/simple/')
self.assertNotIn(LANGUAGE_SESSION_KEY, self.client.session)
@override_settings(
USE_I18N=True,
LANGUAGES=[
('bg', 'Bulgarian'),
('en-us', 'English'),
('pt-br', 'Portugese (Brazil)'),
],
MIDDLEWARE_CLASSES=[
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
],
ROOT_URLCONF='i18n.urls'
)
class CountrySpecificLanguageTests(SimpleTestCase):
def setUp(self):
super(CountrySpecificLanguageTests, self).setUp()
self.rf = RequestFactory()
def test_check_for_language(self):
self.assertTrue(check_for_language('en'))
self.assertTrue(check_for_language('en-us'))
self.assertTrue(check_for_language('en-US'))
self.assertTrue(check_for_language('be'))
self.assertTrue(check_for_language('be@latin'))
self.assertTrue(check_for_language('sr-RS@latin'))
self.assertTrue(check_for_language('sr-RS@12345'))
self.assertFalse(check_for_language('en-ü'))
self.assertFalse(check_for_language('en\x00'))
self.assertFalse(check_for_language(None))
self.assertFalse(check_for_language('be@ '))
# Specifying encoding is not supported (Django enforces UTF-8)
self.assertFalse(check_for_language('tr-TR.UTF-8'))
self.assertFalse(check_for_language('tr-TR.UTF8'))
self.assertFalse(check_for_language('de-DE.utf-8'))
def test_get_language_from_request(self):
# issue 19919
r = self.rf.get('/')
r.COOKIES = {}
r.META = {'HTTP_ACCEPT_LANGUAGE': 'en-US,en;q=0.8,bg;q=0.6,ru;q=0.4'}
lang = get_language_from_request(r)
self.assertEqual('en-us', lang)
r = self.rf.get('/')
r.COOKIES = {}
r.META = {'HTTP_ACCEPT_LANGUAGE': 'bg-bg,en-US;q=0.8,en;q=0.6,ru;q=0.4'}
lang = get_language_from_request(r)
self.assertEqual('bg', lang)
def test_specific_language_codes(self):
# issue 11915
r = self.rf.get('/')
r.COOKIES = {}
r.META = {'HTTP_ACCEPT_LANGUAGE': 'pt,en-US;q=0.8,en;q=0.6,ru;q=0.4'}
lang = get_language_from_request(r)
self.assertEqual('pt-br', lang)
r = self.rf.get('/')
r.COOKIES = {}
r.META = {'HTTP_ACCEPT_LANGUAGE': 'pt-pt,en-US;q=0.8,en;q=0.6,ru;q=0.4'}
lang = get_language_from_request(r)
self.assertEqual('pt-br', lang)
class TranslationFilesMissing(SimpleTestCase):
def setUp(self):
super(TranslationFilesMissing, self).setUp()
self.gettext_find_builtin = gettext_module.find
def tearDown(self):
gettext_module.find = self.gettext_find_builtin
super(TranslationFilesMissing, self).tearDown()
def patchGettextFind(self):
gettext_module.find = lambda *args, **kw: None
def test_failure_finding_default_mo_files(self):
'''
Ensure IOError is raised if the default language is unparseable.
Refs: #18192
'''
self.patchGettextFind()
trans_real._translations = {}
self.assertRaises(IOError, activate, 'en')
| bsd-3-clause |
rahya/parser-and-set | gtest-1.7/xcode/Scripts/versiongenerate.py | 3088 | 4536 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""A script to prepare version informtion for use the gtest Info.plist file.
This script extracts the version information from the configure.ac file and
uses it to generate a header file containing the same information. The
#defines in this header file will be included in during the generation of
the Info.plist of the framework, giving the correct value to the version
shown in the Finder.
This script makes the following assumptions (these are faults of the script,
not problems with the Autoconf):
1. The AC_INIT macro will be contained within the first 1024 characters
of configure.ac
2. The version string will be 3 integers separated by periods and will be
surrounded by squre brackets, "[" and "]" (e.g. [1.0.1]). The first
segment represents the major version, the second represents the minor
version and the third represents the fix version.
3. No ")" character exists between the opening "(" and closing ")" of
AC_INIT, including in comments and character strings.
"""
import sys
import re
# Read the command line argument (the output directory for Version.h)
if (len(sys.argv) < 3):
print "Usage: versiongenerate.py input_dir output_dir"
sys.exit(1)
else:
input_dir = sys.argv[1]
output_dir = sys.argv[2]
# Read the first 1024 characters of the configure.ac file
config_file = open("%s/configure.ac" % input_dir, 'r')
buffer_size = 1024
opening_string = config_file.read(buffer_size)
config_file.close()
# Extract the version string from the AC_INIT macro
# The following init_expression means:
# Extract three integers separated by periods and surrounded by squre
# brackets(e.g. "[1.0.1]") between "AC_INIT(" and ")". Do not be greedy
# (*? is the non-greedy flag) since that would pull in everything between
# the first "(" and the last ")" in the file.
version_expression = re.compile(r"AC_INIT\(.*?\[(\d+)\.(\d+)\.(\d+)\].*?\)",
re.DOTALL)
version_values = version_expression.search(opening_string)
major_version = version_values.group(1)
minor_version = version_values.group(2)
fix_version = version_values.group(3)
# Write the version information to a header file to be included in the
# Info.plist file.
file_data = """//
// DO NOT MODIFY THIS FILE (but you can delete it)
//
// This file is autogenerated by the versiongenerate.py script. This script
// is executed in a "Run Script" build phase when creating gtest.framework. This
// header file is not used during compilation of C-source. Rather, it simply
// defines some version strings for substitution in the Info.plist. Because of
// this, we are not not restricted to C-syntax nor are we using include guards.
//
#define GTEST_VERSIONINFO_SHORT %s.%s
#define GTEST_VERSIONINFO_LONG %s.%s.%s
""" % (major_version, minor_version, major_version, minor_version, fix_version)
version_file = open("%s/Version.h" % output_dir, 'w')
version_file.write(file_data)
version_file.close()
| gpl-3.0 |
Bryukh/ajenti | plugins/squid/main.py | 17 | 1237 | from ajenti.ui import *
from ajenti.com import implements
from ajenti.api import *
from ajenti import apis
from backend import *
class SquidPlugin(apis.services.ServiceControlPlugin):
text = 'Squid'
icon = '/dl/squid/icon.png'
folder = 'servers'
service_name = 'squid'
def get_config(self):
return self.app.get_config(self._cfg)
def on_session_start(self):
self._tab = 0
self._cfg = SquidConfig(self.app)
self._cfg.load()
self._parts = sorted(self.app.grab_plugins(apis.squid.IPluginPart),
key=lambda x: x.weight)
idx = 0
for p in self._parts:
p.init(self, self._cfg, idx)
idx += 1
def get_main_ui(self):
tc = UI.TabControl(active=self._tab)
for p in self._parts:
tc.add(p.title, p.get_ui())
return UI.Pad(tc)
@event('button/click')
def on_click(self, event, params, vars=None):
for p in self._parts:
p.on_click(event, params, vars)
@event('dialog/submit')
@event('form/submit')
def on_submit(self, event, params, vars=None):
for p in self._parts:
p.on_submit(event, params, vars)
| lgpl-3.0 |
mollstam/UnrealPy | UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/pip-7.1.0/pip/_vendor/html5lib/treewalkers/lxmletree.py | 436 | 5992 | from __future__ import absolute_import, division, unicode_literals
from pip._vendor.six import text_type
from lxml import etree
from ..treebuilders.etree import tag_regexp
from . import _base
from .. import ihatexml
def ensure_str(s):
if s is None:
return None
elif isinstance(s, text_type):
return s
else:
return s.decode("utf-8", "strict")
class Root(object):
def __init__(self, et):
self.elementtree = et
self.children = []
if et.docinfo.internalDTD:
self.children.append(Doctype(self,
ensure_str(et.docinfo.root_name),
ensure_str(et.docinfo.public_id),
ensure_str(et.docinfo.system_url)))
root = et.getroot()
node = root
while node.getprevious() is not None:
node = node.getprevious()
while node is not None:
self.children.append(node)
node = node.getnext()
self.text = None
self.tail = None
def __getitem__(self, key):
return self.children[key]
def getnext(self):
return None
def __len__(self):
return 1
class Doctype(object):
def __init__(self, root_node, name, public_id, system_id):
self.root_node = root_node
self.name = name
self.public_id = public_id
self.system_id = system_id
self.text = None
self.tail = None
def getnext(self):
return self.root_node.children[1]
class FragmentRoot(Root):
def __init__(self, children):
self.children = [FragmentWrapper(self, child) for child in children]
self.text = self.tail = None
def getnext(self):
return None
class FragmentWrapper(object):
def __init__(self, fragment_root, obj):
self.root_node = fragment_root
self.obj = obj
if hasattr(self.obj, 'text'):
self.text = ensure_str(self.obj.text)
else:
self.text = None
if hasattr(self.obj, 'tail'):
self.tail = ensure_str(self.obj.tail)
else:
self.tail = None
def __getattr__(self, name):
return getattr(self.obj, name)
def getnext(self):
siblings = self.root_node.children
idx = siblings.index(self)
if idx < len(siblings) - 1:
return siblings[idx + 1]
else:
return None
def __getitem__(self, key):
return self.obj[key]
def __bool__(self):
return bool(self.obj)
def getparent(self):
return None
def __str__(self):
return str(self.obj)
def __unicode__(self):
return str(self.obj)
def __len__(self):
return len(self.obj)
class TreeWalker(_base.NonRecursiveTreeWalker):
def __init__(self, tree):
if hasattr(tree, "getroot"):
tree = Root(tree)
elif isinstance(tree, list):
tree = FragmentRoot(tree)
_base.NonRecursiveTreeWalker.__init__(self, tree)
self.filter = ihatexml.InfosetFilter()
def getNodeDetails(self, node):
if isinstance(node, tuple): # Text node
node, key = node
assert key in ("text", "tail"), "Text nodes are text or tail, found %s" % key
return _base.TEXT, ensure_str(getattr(node, key))
elif isinstance(node, Root):
return (_base.DOCUMENT,)
elif isinstance(node, Doctype):
return _base.DOCTYPE, node.name, node.public_id, node.system_id
elif isinstance(node, FragmentWrapper) and not hasattr(node, "tag"):
return _base.TEXT, node.obj
elif node.tag == etree.Comment:
return _base.COMMENT, ensure_str(node.text)
elif node.tag == etree.Entity:
return _base.ENTITY, ensure_str(node.text)[1:-1] # strip &;
else:
# This is assumed to be an ordinary element
match = tag_regexp.match(ensure_str(node.tag))
if match:
namespace, tag = match.groups()
else:
namespace = None
tag = ensure_str(node.tag)
attrs = {}
for name, value in list(node.attrib.items()):
name = ensure_str(name)
value = ensure_str(value)
match = tag_regexp.match(name)
if match:
attrs[(match.group(1), match.group(2))] = value
else:
attrs[(None, name)] = value
return (_base.ELEMENT, namespace, self.filter.fromXmlName(tag),
attrs, len(node) > 0 or node.text)
def getFirstChild(self, node):
assert not isinstance(node, tuple), "Text nodes have no children"
assert len(node) or node.text, "Node has no children"
if node.text:
return (node, "text")
else:
return node[0]
def getNextSibling(self, node):
if isinstance(node, tuple): # Text node
node, key = node
assert key in ("text", "tail"), "Text nodes are text or tail, found %s" % key
if key == "text":
# XXX: we cannot use a "bool(node) and node[0] or None" construct here
# because node[0] might evaluate to False if it has no child element
if len(node):
return node[0]
else:
return None
else: # tail
return node.getnext()
return (node, "tail") if node.tail else node.getnext()
def getParentNode(self, node):
if isinstance(node, tuple): # Text node
node, key = node
assert key in ("text", "tail"), "Text nodes are text or tail, found %s" % key
if key == "text":
return node
# else: fallback to "normal" processing
return node.getparent()
| mit |
githubashto/browserscope | base/summary_test_set.py | 9 | 2474 | #!/usr/bin/python2.4
#
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the 'License')
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Benchmark Tests Definitions."""
import logging
from categories import test_set_base
from categories import all_test_sets
class SummaryTest(test_set_base.TestBase):
def __init__(self, category, category_name):
test_set_base.TestBase.__init__(
self,
key=category,
name=category_name,
url=None,
doc=None,
min_value=0,
max_value=0)
_TESTS = []
for test_set in all_test_sets.GetVisibleTestSets():
_TESTS.append(SummaryTest(test_set.category, test_set.category_name))
class SummaryTestSet(test_set_base.TestSet):
def GetTestScoreAndDisplayValue(self, test_key, raw_scores):
"""Get a normalized score (0 to 100) and a value to output to the display.
Args:
test_key: a key for a test_set test.
raw_scores: a dict of raw_scores indexed by test keys.
Returns:
score, display_value
# score is from 0 to 100.
# display_value is the text for the cell.
"""
score = raw_scores[test_key]
return score, score
def GetRowScoreAndDisplayValue(self, results):
"""Get the overall score for this row of results data.
Args:
results: A dictionary that looks like:
{
'testkey1': {'score': 1-10, 'median': median, 'display': 'celltext'},
'testkey2': {'score': 1-10, 'median': median, 'display': 'celltext'},
etc...
}
Returns:
A tuple of (score, display)
Where score is a value between 1-100.
And display is the text for the cell.
"""
logging.info('summary getrowscore results: %s' % results)
if not results.has_key('score') or results['score']['median'] is None:
score = 0
else:
score = results['score']['median']
return score, score
TEST_SET = SummaryTestSet(
category='summary',
category_name='Summary',
tests=_TESTS,
test_page=''
)
| apache-2.0 |
lmcro/webserver | qa/util.py | 5 | 7182 | # -*- coding: utf-8 -*-
import os, sys, time, random, fcntl, socket, math
from conf import *
term = os.getenv("TERM")
if term and \
('color' in term or term in ('rxvt', 'xterm')):
MESSAGE_SUCCESS = '\033[0;48;36m Success \033[0m' # Blue
MESSAGE_FAILED = '\033[0;48;31m Failed \033[0m' # Red
MESSAGE_SKIPPED = '\033[0;48;33m Skipped \033[0m' # Yellow
MESSAGE_DISABLED = '\033[0;48;35m Disabled \033[0m' # Purple
else:
MESSAGE_SUCCESS = ' Success'
MESSAGE_FAILED = ' Failed'
MESSAGE_SKIPPED = ' Skipped'
MESSAGE_DISABLED = 'Disabled'
def count_down (msg, nsecs, nl=True):
for s in range(nsecs):
sys.stdout.write ((msg+'\r') % (nsecs - s - 1))
sys.stdout.flush()
time.sleep(1)
if nl:
print
def str_random_generate (n):
c = ""
x = int (random.random() * 100)
y = int (random.random() * 100)
z = int (random.random() * 100)
for i in xrange(n):
x = (171 * x) % 30269
y = (172 * y) % 30307
z = (170 * z) % 30323
c += chr(32 + int((x/30269.0 + y/30307.0 + z/30323.0) * 1000 % 95))
return c
def letters_random_generate (n):
c = ""
x = int (random.random() * 100)
y = int (random.random() * 100)
z = int (random.random() * 100)
for i in xrange(n):
x = (171 * x) % 30269
y = (172 * y) % 30307
z = (170 * z) % 30323
if z%2:
c += chr(65 + int((x/30269.0 + y/30307.0 + z/30323.0) * 1000 % 25))
else:
c += chr(97 + int((x/30269.0 + y/30307.0 + z/30323.0) * 1000 % 25))
return c
str_buf = ""
letters_buf = ""
def letters_random (n):
global letters_buf
letters_len = len(letters_buf)
if letters_len == 0:
letters_random_generate (1000)
letters_len = len(letters_buf)
offset = random.randint (0, letters_len)
if letters_len - offset > n:
return letters_buf[offset:offset+n]
tmp = letters_random_generate (n - (letters_len - offset))
letters_buf += tmp
return letters_buf[offset:]
def str_random (n):
global str_buf
str_len = len(str_buf)
if str_len == 0:
str_random_generate (1000)
str_len = len(str_buf)
offset = random.randint (0, str_len)
if str_len - offset > n:
return str_buf[offset:offset+n]
tmp = str_random_generate (n - (str_len - offset))
str_buf += tmp
return str_buf[offset:]
def check_php_interpreter (fullpath):
f = os.popen ('%s -v' %(fullpath), 'r')
all = f.read()
try: f.close()
except: pass
return 'fcgi' in all
__php_ref = None
def look_for_php():
global __php_ref
if __php_ref != None:
return __php_ref
if PHPCGI_PATH != "auto":
if check_php_interpreter (PHPCGI_PATH):
__php_ref = PHPCGI_PATH
return __php_ref
dirs = os.getenv("PATH").split(":")
dirs += PHP_DIRS
for p in dirs:
for n in PHP_NAMES:
php = os.path.join(p,n)
if os.path.exists(php):
if check_php_interpreter(php):
__php_ref = php
return php
__php_ref = ''
return __php_ref
__python_ref = None
def look_for_python():
global __python_ref
if __python_ref != None:
return __python_ref
if PYTHON_PATH != "auto":
__python_ref = PYTHON_PATH
return __python_ref
dirs = os.getenv("PATH").split(":")
dirs += PYTHON_DIRS
for p in dirs:
for n in PYTHON_NAMES:
py = os.path.join(p,n)
if os.path.exists(py):
__python_ref = py
return py
__python_ref = ''
return __python_ref
def look_for_exec_in_path (name):
for dir in os.getenv("PATH").split(":"):
fp = os.path.join (dir, name)
if os.path.exists (fp):
return fp
def print_key (key, val):
print "%10s: %s" % (key, val)
__free_port = 5000
def get_free_port():
global __free_port
__free_port += 1
return __free_port
__next_source = 0
def get_next_source():
global __next_source
__next_source += 1
return __next_source
#
# Plug-in checking
#
_server_info = None
def cherokee_get_server_info ():
global _server_info
if _server_info == None:
try:
f = os.popen ("%s -i" % (CHEROKEE_SRV_PATH))
_server_info = f.read()
f.close()
except:
pass
return _server_info
_built_in_list = []
_built_in_list_done = False
def cherokee_build_info_has (filter, module):
# Let's see whether it's built-in
global _built_in_list
global _built_in_list_done
if not _built_in_list_done:
_built_in_list_done = True
try:
f = os.popen ("%s -i" % (CHEROKEE_SRV_PATH))
cont = f.read()
f.close()
except:
pass
try:
filter_string = " %s: " % (filter)
for l in cont.split("\n"):
if l.startswith(filter_string):
line = l.replace (filter_string, "")
break
_built_in_list = line.split(" ")
except:
pass
return module in _built_in_list
def cherokee_has_plugin (module):
# Check for the dynamic plug-in
try:
mods = filter(lambda x: module in x, os.listdir(CHEROKEE_PLUGINDIR))
if len(mods) >= 1:
return True
except:
pass
return cherokee_build_info_has ("Built-in", module)
def print_sec (content):
fcntl.flock (sys.stdout, fcntl.LOCK_EX)
print content
fcntl.flock (sys.stdout, fcntl.LOCK_UN)
def ip_is_private(ip):
return (ip.startswith("10.") or \
ip.startswith("172.16.") or \
ip.startswith("192.168."))
def figure_public_ip():
def ip_cmp(x,y):
# Public vs Private
if ip_is_private(x) and (not ip_is_private(y)):
return 1
elif ip_is_private(y) and (not ip_is_private(x)):
return -1
# Private
elif x.startswith('10.') and y.startswith('192.'):
return -1
elif x.startswith('192.') and y.startswith('10.'):
return 1
# Default
return cmp(x,y)
ips = socket.gethostbyname_ex (socket.gethostname())[2]
ips.sort(ip_cmp)
return ips[0]
def get_forwarded_http_header (header):
return 'HTTP_' + header.upper().replace('-','_')
def chunk_encode (txt, size=None, pieces=None):
out = ''
if not pieces:
pieces = random.randint (1, 10)
if not size:
size = int (math.ceil (len(txt)/float(pieces)))
for n in range(pieces):
sub = txt[(n*size):(n+1)*size]
out += hex(len(sub))[2:]
out += '\r\n'
out += sub
out += '\r\n'
out += '0\r\n'
return out
# Workarounds Python's BUG #1515:
# "deepcopy doesn't copy instance methods"
#
# http://bugs.python.org/issue1515
#
import copy
import types
def _deepcopy_method(x, memo):
return type(x)(x.im_func, copy.deepcopy(x.im_self, memo), x.im_class)
copy._deepcopy_dispatch[types.MethodType] = _deepcopy_method
| gpl-2.0 |
daodaoliang/bokeh | bokeh/settings.py | 41 | 3622 | from __future__ import absolute_import
import logging
import os
from os.path import join, abspath, isdir
from .util.paths import ROOT_DIR, bokehjsdir
class Settings(object):
_prefix = "BOKEH_"
debugjs = False
@property
def _environ(self):
return os.environ
def _get(self, key, default=None):
return self._environ.get(self._prefix + key, default)
@property
def _is_dev(self):
return self._get_bool("DEV", False)
def _dev_or_default(self, default, dev):
return dev if dev is not None and self._is_dev else default
def _get_str(self, key, default, dev=None):
return self._get(key, self._dev_or_default(default, dev))
def _get_bool(self, key, default, dev=None):
value = self._get(key)
if value is None:
value = self._dev_or_default(default, dev)
elif value.lower() in ["true", "yes", "on", "1"]:
value = True
elif value.lower() in ["false", "no", "off", "0"]:
value = False
else:
raise ValueError("invalid value %r for boolean property %s%s" % (value, self._prefix, key))
return value
def browser(self, default=None):
return self._get_str("BROWSER", default, "none")
def resources(self, default=None):
return self._get_str("RESOURCES", default, "absolute-dev")
def rootdir(self, default=None):
return self._get_str("ROOTDIR", default)
def version(self, default=None):
return self._get_str("VERSION", default)
def docs_cdn(self, default=None):
return self._get_str("DOCS_CDN", default)
def docs_version(self, default=None):
return self._get_str("DOCS_VERSION", default)
def minified(self, default=None):
return self._get_bool("MINIFIED", default, False)
def log_level(self, default=None):
return self._get_str("LOG_LEVEL", default, "debug")
def py_log_level(self, default='none'):
level = self._get_str("PY_LOG_LEVEL", default, "debug")
LEVELS = {'debug': logging.DEBUG,
'info' : logging.INFO,
'warn' : logging.WARNING,
'error': logging.ERROR,
'fatal': logging.CRITICAL,
'none' : None}
return LEVELS[level]
def pretty(self, default=None):
return self._get_bool("PRETTY", default, True)
def simple_ids(self, default=None):
return self._get_bool("SIMPLE_IDS", default, True)
def strict(self, default=None):
return self._get_bool("STRICT", default, False)
"""
Server settings go here:
"""
def bokehjssrcdir(self):
if self.debugjs:
bokehjssrcdir = abspath(join(ROOT_DIR, '..', 'bokehjs', 'src'))
if isdir(bokehjssrcdir):
return bokehjssrcdir
return None
def bokehjsdir(self):
return bokehjsdir(self.debugjs)
def js_files(self):
bokehjsdir = self.bokehjsdir()
js_files = []
for root, dirnames, files in os.walk(bokehjsdir):
for fname in files:
if fname.endswith(".js") and 'vendor' not in root:
js_files.append(join(root, fname))
return js_files
def css_files(self):
bokehjsdir = self.bokehjsdir()
js_files = []
for root, dirnames, files in os.walk(bokehjsdir):
for fname in files:
if fname.endswith(".css") and 'vendor' not in root:
js_files.append(join(root, fname))
return js_files
settings = Settings()
del Settings
| bsd-3-clause |
nju520/django | tests/null_fk_ordering/models.py | 210 | 1605 | """
Regression tests for proper working of ForeignKey(null=True). Tests these bugs:
* #7512: including a nullable foreign key reference in Meta ordering has un
xpected results
"""
from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
# The first two models represent a very simple null FK ordering case.
class Author(models.Model):
name = models.CharField(max_length=150)
@python_2_unicode_compatible
class Article(models.Model):
title = models.CharField(max_length=150)
author = models.ForeignKey(Author, models.SET_NULL, null=True)
def __str__(self):
return 'Article titled: %s' % (self.title, )
class Meta:
ordering = ['author__name', ]
# These following 4 models represent a far more complex ordering case.
class SystemInfo(models.Model):
system_name = models.CharField(max_length=32)
class Forum(models.Model):
system_info = models.ForeignKey(SystemInfo, models.CASCADE)
forum_name = models.CharField(max_length=32)
@python_2_unicode_compatible
class Post(models.Model):
forum = models.ForeignKey(Forum, models.SET_NULL, null=True)
title = models.CharField(max_length=32)
def __str__(self):
return self.title
@python_2_unicode_compatible
class Comment(models.Model):
post = models.ForeignKey(Post, models.SET_NULL, null=True)
comment_text = models.CharField(max_length=250)
class Meta:
ordering = ['post__forum__system_info__system_name', 'comment_text']
def __str__(self):
return self.comment_text
| bsd-3-clause |
alexhersh/calico | calico/geventutils.py | 2 | 1417 | # -*- coding: utf-8 -*-
# Copyright 2015 Metaswitch Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
calico.geventutils
~~~~~~~~~~~~~~~~~~
Helper utilities for gevent.
"""
import itertools
import logging
import gevent
import gevent.local
_log = logging.getLogger(__name__)
tid_storage = gevent.local.local()
tid_counter = itertools.count()
# Ought to do itertools.count(start=1), but python 2.6 does not support it.
tid_counter.next()
def greenlet_id():
"""
Returns an integer greenlet ID.
itertools.count() is atomic, if the internet is correct.
http://stackoverflow.com/questions/23547604/python-counter-atomic-increment
"""
try:
tid = tid_storage.tid
except:
tid = tid_counter.next()
tid_storage.tid = tid
return tid
class GreenletFilter(logging.Filter):
def filter(self, record):
record.tid = greenlet_id()
return True | apache-2.0 |
jgao54/airflow | tests/utils/log/test_s3_task_handler.py | 18 | 7102 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import mock
import unittest
import os
from airflow import configuration
from airflow.utils.log.s3_task_handler import S3TaskHandler
from airflow.utils.state import State
from airflow.utils.timezone import datetime
from airflow.hooks.S3_hook import S3Hook
from airflow.models import TaskInstance, DAG
from airflow.operators.dummy_operator import DummyOperator
try:
import boto3
import moto
from moto import mock_s3
except ImportError:
mock_s3 = None
@unittest.skipIf(mock_s3 is None,
"Skipping test because moto.mock_s3 is not available")
@mock_s3
class TestS3TaskHandler(unittest.TestCase):
def setUp(self):
super(TestS3TaskHandler, self).setUp()
self.remote_log_base = 's3://bucket/remote/log/location'
self.remote_log_location = 's3://bucket/remote/log/location/1.log'
self.remote_log_key = 'remote/log/location/1.log'
self.local_log_location = 'local/log/location'
self.filename_template = '{try_number}.log'
self.s3_task_handler = S3TaskHandler(
self.local_log_location,
self.remote_log_base,
self.filename_template
)
configuration.load_test_config()
date = datetime(2016, 1, 1)
self.dag = DAG('dag_for_testing_file_task_handler', start_date=date)
task = DummyOperator(task_id='task_for_testing_file_log_handler', dag=self.dag)
self.ti = TaskInstance(task=task, execution_date=date)
self.ti.try_number = 1
self.ti.state = State.RUNNING
self.addCleanup(self.dag.clear)
self.conn = boto3.client('s3')
# We need to create the bucket since this is all in Moto's 'virtual'
# AWS account
moto.core.moto_api_backend.reset()
self.conn.create_bucket(Bucket="bucket")
def tearDown(self):
if self.s3_task_handler.handler:
try:
os.remove(self.s3_task_handler.handler.baseFilename)
except Exception:
pass
def test_hook(self):
self.assertIsInstance(self.s3_task_handler.hook, S3Hook)
def test_hook_raises(self):
handler = self.s3_task_handler
with mock.patch.object(handler.log, 'error') as mock_error:
with mock.patch("airflow.hooks.S3_hook.S3Hook") as mock_hook:
mock_hook.side_effect = Exception('Failed to connect')
# Initialize the hook
handler.hook
mock_error.assert_called_once_with(
'Could not create an S3Hook with connection id "%s". Please make '
'sure that airflow[s3] is installed and the S3 connection exists.',
''
)
def test_log_exists(self):
self.conn.put_object(Bucket='bucket', Key=self.remote_log_key, Body=b'')
self.assertTrue(self.s3_task_handler.s3_log_exists(self.remote_log_location))
def test_log_exists_none(self):
self.assertFalse(self.s3_task_handler.s3_log_exists(self.remote_log_location))
def test_log_exists_raises(self):
self.assertFalse(self.s3_task_handler.s3_log_exists('s3://nonexistentbucket/foo'))
def test_log_exists_no_hook(self):
with mock.patch("airflow.hooks.S3_hook.S3Hook") as mock_hook:
mock_hook.side_effect = Exception('Failed to connect')
self.assertFalse(self.s3_task_handler.s3_log_exists(self.remote_log_location))
def test_read(self):
self.conn.put_object(Bucket='bucket', Key=self.remote_log_key, Body=b'Log line\n')
self.assertEqual(
self.s3_task_handler.read(self.ti),
(['*** Reading remote log from s3://bucket/remote/log/location/1.log.\n'
'Log line\n\n'], [{'end_of_log': True}])
)
def test_read_when_s3_log_missing(self):
log, metadata = self.s3_task_handler.read(self.ti)
self.assertEqual(1, len(log))
self.assertEqual(len(log), len(metadata))
self.assertIn('*** Log file does not exist:', log[0])
self.assertEqual({'end_of_log': True}, metadata[0])
def test_read_raises_return_error(self):
handler = self.s3_task_handler
url = 's3://nonexistentbucket/foo'
with mock.patch.object(handler.log, 'error') as mock_error:
result = handler.s3_read(url, return_error=True)
msg = 'Could not read logs from %s' % url
self.assertEqual(result, msg)
mock_error.assert_called_once_with(msg, exc_info=True)
def test_write(self):
with mock.patch.object(self.s3_task_handler.log, 'error') as mock_error:
self.s3_task_handler.s3_write('text', self.remote_log_location)
# We shouldn't expect any error logs in the default working case.
mock_error.assert_not_called()
body = boto3.resource('s3').Object('bucket', self.remote_log_key).get()['Body'].read()
self.assertEqual(body, b'text')
def test_write_existing(self):
self.conn.put_object(Bucket='bucket', Key=self.remote_log_key, Body=b'previous ')
self.s3_task_handler.s3_write('text', self.remote_log_location)
body = boto3.resource('s3').Object('bucket', self.remote_log_key).get()['Body'].read()
self.assertEqual(body, b'previous \ntext')
def test_write_raises(self):
handler = self.s3_task_handler
url = 's3://nonexistentbucket/foo'
with mock.patch.object(handler.log, 'error') as mock_error:
handler.s3_write('text', url)
self.assertEqual
mock_error.assert_called_once_with(
'Could not write logs to %s', url, exc_info=True)
def test_close(self):
self.s3_task_handler.set_context(self.ti)
self.assertTrue(self.s3_task_handler.upload_on_close)
self.s3_task_handler.close()
# Should not raise
boto3.resource('s3').Object('bucket', self.remote_log_key).get()
def test_close_no_upload(self):
self.ti.raw = True
self.s3_task_handler.set_context(self.ti)
self.assertFalse(self.s3_task_handler.upload_on_close)
self.s3_task_handler.close()
with self.assertRaises(self.conn.exceptions.NoSuchKey):
boto3.resource('s3').Object('bucket', self.remote_log_key).get()
| apache-2.0 |
Jing-Luo/Jing-Luo.github.io | talkmap.py | 205 | 1188 |
# # Leaflet cluster map of talk locations
#
# (c) 2016-2017 R. Stuart Geiger, released under the MIT license
#
# Run this from the _talks/ directory, which contains .md files of all your talks.
# This scrapes the location YAML field from each .md file, geolocates it with
# geopy/Nominatim, and uses the getorg library to output data, HTML,
# and Javascript for a standalone cluster map.
#
# Requires: glob, getorg, geopy
import glob
import getorg
from geopy import Nominatim
g = glob.glob("*.md")
geocoder = Nominatim()
location_dict = {}
location = ""
permalink = ""
title = ""
for file in g:
with open(file, 'r') as f:
lines = f.read()
if lines.find('location: "') > 1:
loc_start = lines.find('location: "') + 11
lines_trim = lines[loc_start:]
loc_end = lines_trim.find('"')
location = lines_trim[:loc_end]
location_dict[location] = geocoder.geocode(location)
print(location, "\n", location_dict[location])
m = getorg.orgmap.create_map_obj()
getorg.orgmap.output_html_cluster_map(location_dict, folder_name="../talkmap", hashed_usernames=False)
| mit |
yongshengwang/hue | desktop/core/ext-py/pysaml2-2.4.0/build/lib/saml2/mdie.py | 34 | 4757 | #!/usr/bin/env python
from saml2 import element_to_extension_element
from saml2 import extension_elements_to_elements
from saml2 import SamlBase
from saml2 import md
__author__ = 'rolandh'
"""
Functions used to import metadata from and export it to a pysaml2 format
"""
IMP_SKIP = ["_certs", "e_e_", "_extatt"]
EXP_SKIP = ["__class__"]
# From pysaml2 SAML2 metadata format to Python dictionary
def _eval(val, onts, mdb_safe):
"""
Convert a value to a basic dict format
:param val: The value
:param onts: Schemas to be used in the conversion
:return: The basic dictionary
"""
if isinstance(val, basestring):
val = val.strip()
if not val:
return None
else:
return val
elif isinstance(val, dict) or isinstance(val, SamlBase):
return to_dict(val, onts, mdb_safe)
elif isinstance(val, list):
lv = []
for v in val:
if isinstance(v, dict) or isinstance(v, SamlBase):
lv.append(to_dict(v, onts, mdb_safe))
else:
lv.append(v)
return lv
return val
def to_dict(_dict, onts, mdb_safe=False):
"""
Convert a pysaml2 SAML2 message class instance into a basic dictionary
format.
The export interface.
:param _dict: The pysaml2 metadata instance
:param onts: List of schemas to use for the conversion
:return: The converted information
"""
res = {}
if isinstance(_dict, SamlBase):
res["__class__"] = "%s&%s" % (_dict.c_namespace, _dict.c_tag)
for key in _dict.keyswv():
if key in IMP_SKIP:
continue
val = getattr(_dict, key)
if key == "extension_elements":
_eel = extension_elements_to_elements(val, onts)
_val = [_eval(_v, onts, mdb_safe) for _v in _eel]
elif key == "extension_attributes":
if mdb_safe:
_val = dict([(k.replace(".", "__"), v) for k, v in
val.items()])
#_val = {k.replace(".", "__"): v for k, v in val.items()}
else:
_val = val
else:
_val = _eval(val, onts, mdb_safe)
if _val:
if mdb_safe:
key = key.replace(".", "__")
res[key] = _val
else:
for key, val in _dict.items():
_val = _eval(val, onts, mdb_safe)
if _val:
if mdb_safe and "." in key:
key = key.replace(".", "__")
res[key] = _val
return res
# From Python dictionary to pysaml2 SAML2 metadata format
def _kwa(val, onts, mdb_safe=False):
"""
Key word argument conversion
:param val: A dictionary
:param onts: dictionary with schemas to use in the conversion
schema namespase is the key in the dictionary
:return: A converted dictionary
"""
if not mdb_safe:
return dict([(k, from_dict(v, onts)) for k, v in val.items()
if k not in EXP_SKIP])
else:
_skip = ["_id"]
_skip.extend(EXP_SKIP)
return dict([(k.replace("__", "."), from_dict(v, onts)) for k, v in
val.items() if k not in _skip])
def from_dict(val, onts, mdb_safe=False):
"""
Converts a dictionary into a pysaml2 object
:param val: A dictionary
:param onts: Dictionary of schemas to use in the conversion
:return: The pysaml2 object instance
"""
if isinstance(val, dict):
if "__class__" in val:
ns, typ = val["__class__"].split("&")
cls = getattr(onts[ns], typ)
if cls is md.Extensions:
lv = []
for key, ditems in val.items():
if key in EXP_SKIP:
continue
for item in ditems:
ns, typ = item["__class__"].split("&")
cls = getattr(onts[ns], typ)
kwargs = _kwa(item, onts, mdb_safe)
inst = cls(**kwargs)
lv.append(element_to_extension_element(inst))
return lv
else:
kwargs = _kwa(val, onts, mdb_safe)
inst = cls(**kwargs)
return inst
else:
res = {}
for key, v in val.items():
if mdb_safe:
key = key.replace("__", ".")
res[key] = from_dict(v, onts)
return res
elif isinstance(val, basestring):
return val
elif isinstance(val, list):
return [from_dict(v, onts) for v in val]
else:
return val
| apache-2.0 |
adobe/avmplus | test/cmdline/testMemstats.py | 8 | 2430 | #!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# import the utilities from cmdutils.py
import os
from cmdutils import *
def run():
r=RunTestLib()
r.run_test(name='memstats',
command="%s -memstats testdata/memstats.abc" % r.avm,
expectedout=[
'gross stats',
'managed overhead',
'gross stats end',
'sweep\\([0-9]+\\) reclaimed [0-9]+ whole pages'
]
)
if os.name!='nt':
print("pyspy requires windows named pipes, does not work with cygwin python or non-windows operating systems")
else:
failed=False
try:
import win32event
import win32pipe
import win32file
except:
failed=True
print("pyspy failed to load python win32 extension FAILED")
if failed==False:
os.putenv('MMGC_PROFILE','1')
proc=r.run_command_async(command="%s testdata/memstats.abc" % r.avm,sleep=2)
# pyspy source
e = "MMgc::MemoryProfiler::DumpFatties"
h = None
try:
h = win32event.OpenEvent(win32event.EVENT_MODIFY_STATE, False, e)
except Exception:
print("Error: No registered event: %s FAILED!" % e)
sys.exit(1)
win32event.SetEvent(h)
pipe = "\\\\.\\pipe\MMgc_Spy"
readHandle = None
while True:
try:
readHandle = win32file.CreateFile(pipe, win32file.GENERIC_READ, 0, None, win32file.OPEN_EXISTING, 0, None)
win32pipe.WaitNamedPipe(pipe, 100)
except Exception:
pass
if readHandle:
break
while True:
try:
data = win32file.ReadFile(readHandle, 128)
sys.stdout.write(data[1])
except:
break
# by specifying a main can run this test individually
if __name__ == '__main__':
r=RunTestLib()
r.compile("testdata/memstats.as",None,"-d")
run()
| mpl-2.0 |
mozilla/moztrap | moztrap/model/tags/migrations/0006_auto__add_index_tag_name__add_index_tag_created_on__add_index_tag_modi.py | 5 | 7330 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding index on 'Tag', fields ['name']
db.create_index('tags_tag', ['name'])
# Adding index on 'Tag', fields ['created_on']
db.create_index('tags_tag', ['created_on'])
# Adding index on 'Tag', fields ['modified_on']
db.create_index('tags_tag', ['modified_on'])
def backwards(self, orm):
# Removing index on 'Tag', fields ['modified_on']
db.delete_index('tags_tag', ['modified_on'])
# Removing index on 'Tag', fields ['created_on']
db.delete_index('tags_tag', ['created_on'])
# Removing index on 'Tag', fields ['name']
db.delete_index('tags_tag', ['name'])
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'core.product': {
'Meta': {'ordering': "['name']", 'object_name': 'Product'},
'cc_version': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['auth.User']"}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 9, 6, 0, 0)', 'db_index': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['auth.User']"}),
'deleted_on': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'has_team': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['auth.User']"}),
'modified_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 9, 6, 0, 0)', 'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'db_index': 'True'}),
'own_team': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'symmetrical': 'False', 'blank': 'True'})
},
'tags.tag': {
'Meta': {'object_name': 'Tag'},
'cc_version': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['auth.User']"}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 9, 6, 0, 0)', 'db_index': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['auth.User']"}),
'deleted_on': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['auth.User']"}),
'modified_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 9, 6, 0, 0)', 'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'db_index': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Product']", 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['tags'] | bsd-2-clause |
nolanliou/tensorflow | tensorflow/contrib/learn/python/learn/estimators/head.py | 18 | 82883 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Abstractions for the head(s) of a model.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
from tensorflow.contrib import framework as framework_lib
from tensorflow.contrib import layers as layers_lib
from tensorflow.contrib.learn.python.learn.estimators import constants
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.learn.python.learn.estimators import prediction_key
from tensorflow.contrib.learn.python.learn.estimators.metric_key import MetricKey as mkey
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import metrics as metrics_lib
from tensorflow.python.ops import nn
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import weights_broadcast_ops
from tensorflow.python.ops.losses import losses as losses_lib
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary
from tensorflow.python.training import training
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
class Head(object):
"""Interface for the head/top of a model.
Given logits (or output of a hidden layer), a Head knows how to compute
predictions, loss, default metric and export signature. It is meant to,
1) Simplify writing model_fn and to make model_fn more configurable
2) Support wide range of machine learning models. Since most heads can work
with logits, they can support DNN, RNN, Wide, Wide&Deep,
Global objectives, Gradient boosted trees and many other types
of machine learning models.
2) To allow users to seamlessly switch between 1 to n heads for multi
objective learning (See _MultiHead implementation for more details)
Common usage:
Here is simplified model_fn to build a multiclass DNN model.
```python
def _my_dnn_model_fn(features, labels, mode, params, config=None):
# Optionally your callers can pass head to model_fn as a param.
head = tf.contrib.learn.multi_class_head(...)
input = tf.contrib.layers.input_from_feature_columns(features, ...)
last_hidden_layer_out = tf.contrib.layers.stack(
input, tf.contrib.layers.fully_connected, [1000, 500])
logits = tf.contrib.layers.fully_connected(
last_hidden_layer_out, head.logits_dimension, activation_fn=None)
def _train_op_fn(loss):
return optimizer.minimize(loss)
return head.create_model_fn_ops(
features=features,
labels=labels,
mode=mode,
train_op_fn=_train_op_fn,
logits=logits,
scope=...)
```
Most heads also support logits_input which is typically the output of the last
hidden layer. Some heads (like heads responsible for candidate sampling or
hierarchical softmax) intrinsically will not support logits and you have
to pass logits_input. Here is a common usage,
```python
return head.create_model_fn_ops(
features=features,
labels=labels,
mode=mode,
train_op_fn=_train_op_fn,
logits_input=last_hidden_layer_out,
scope=...)
```python
There are cases where computing and applying gradients can not be meaningfully
captured with train_op_fn we support (for example, with sync optimizer). In
such case, you can take the responsibility on your own. Here is a common
use case,
```python
model_fn_ops = head.create_model_fn_ops(
features=features,
labels=labels,
mode=mode,
train_op_fn=tf.contrib.learn.no_op_train_fn,
logits=logits,
scope=...)
if mode == tf.contrib.learn.ModeKeys.TRAIN:
optimizer = ...
sync = tf.train.SyncReplicasOptimizer(opt=optimizer, ...)
update_op = tf.contrib.layers.optimize_loss(optimizer=sync,
loss=model_fn_ops.loss, ...)
hooks = [sync.make_session_run_hook(is_chief)]
... update train_op and hooks in ModelFnOps and return
```
"""
__metaclass__ = abc.ABCMeta
@abc.abstractproperty
def logits_dimension(self):
"""Size of the last dimension of the logits `Tensor`.
Typically, logits is of shape `[batch_size, logits_dimension]`.
Returns:
The expected size of the `logits` tensor.
"""
raise NotImplementedError("Calling an abstract method.")
@abc.abstractmethod
def create_model_fn_ops(self,
features,
mode,
labels=None,
train_op_fn=None,
logits=None,
logits_input=None,
scope=None):
"""Returns `ModelFnOps` that a model_fn can return.
Please note that,
+ Exactly one of `logits` and `logits_input` must be provided.
+ All args must be passed via name.
Args:
features: Input `dict` of `Tensor` objects.
mode: Estimator's `ModeKeys`.
labels: Labels `Tensor`, or `dict` of same.
train_op_fn: Function that takes a scalar loss `Tensor` and returns an op
to optimize the model with the loss. This is used in TRAIN mode and
must not be None. None is allowed in other modes. If you want to
optimize loss yourself you can pass `no_op_train_fn` and then use
ModeFnOps.loss to compute and apply gradients.
logits: logits `Tensor` to be used by the head.
logits_input: `Tensor` from which to build logits, often needed when you
don't want to compute the logits. Typically this is the activation of
the last hidden layer in a DNN. Some heads (like the ones responsible
for candidate sampling) intrinsically avoid computing full logits and
only accepts logits_input.
scope: Optional scope for `variable_scope`.
Returns:
An instance of `ModelFnOps`.
Raises:
ValueError: If `mode` is not recognized.
ValueError: If neither or both of `logits` and `logits_input` is provided.
"""
raise NotImplementedError("Calling an abstract method.")
def regression_head(label_name=None,
weight_column_name=None,
label_dimension=1,
enable_centered_bias=False,
head_name=None):
"""Creates a `Head` for linear regression.
Args:
label_name: String, name of the key in label dict. Can be null if label
is a tensor (single headed models).
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
label_dimension: Number of regression labels per example. This is the size
of the last dimension of the labels `Tensor` (typically, this has shape
`[batch_size, label_dimension]`).
enable_centered_bias: A bool. If True, estimator will learn a centered
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
head_name: name of the head. If provided, predictions, summary and metrics
keys will be suffixed by `"/" + head_name` and the default variable scope
will be `head_name`.
Returns:
An instance of `Head` for linear regression.
"""
return _RegressionHead(
label_name=label_name,
weight_column_name=weight_column_name,
label_dimension=label_dimension,
enable_centered_bias=enable_centered_bias,
head_name=head_name,
loss_fn=_mean_squared_loss,
link_fn=array_ops.identity)
def poisson_regression_head(label_name=None,
weight_column_name=None,
label_dimension=1,
enable_centered_bias=False,
head_name=None):
"""Creates a `Head` for poisson regression.
Args:
label_name: String, name of the key in label dict. Can be null if label
is a tensor (single headed models).
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
label_dimension: Number of regression labels per example. This is the size
of the last dimension of the labels `Tensor` (typically, this has shape
`[batch_size, label_dimension]`).
enable_centered_bias: A bool. If True, estimator will learn a centered
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
head_name: name of the head. If provided, predictions, summary and metrics
keys will be suffixed by `"/" + head_name` and the default variable scope
will be `head_name`.
Returns:
An instance of `Head` for poisson regression.
"""
return _RegressionHead(
label_name=label_name,
weight_column_name=weight_column_name,
label_dimension=label_dimension,
enable_centered_bias=enable_centered_bias,
head_name=head_name,
loss_fn=_poisson_loss,
link_fn=math_ops.exp)
# TODO(zakaria): Consider adding a _RegressionHead for logistic_regression
def multi_class_head(n_classes,
label_name=None,
weight_column_name=None,
enable_centered_bias=False,
head_name=None,
thresholds=None,
metric_class_ids=None,
loss_fn=None,
label_keys=None):
"""Creates a `Head` for multi class single label classification.
The Head uses softmax cross entropy loss.
This head expects to be fed integer labels specifying the class index. But
if `label_keys` is specified, then labels must be strings from this
vocabulary, and the predicted classes will be strings from the same
vocabulary.
Args:
n_classes: Integer, number of classes, must be >= 2
label_name: String, name of the key in label dict. Can be null if label
is a tensor (single headed models).
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
enable_centered_bias: A bool. If True, estimator will learn a centered
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
head_name: name of the head. If provided, predictions, summary and metrics
keys will be suffixed by `"/" + head_name` and the default variable scope
will be `head_name`.
thresholds: thresholds for eval metrics, defaults to [.5]
metric_class_ids: List of class IDs for which we should report per-class
metrics. Must all be in the range `[0, n_classes)`. Invalid if
`n_classes` is 2.
loss_fn: Optional function that takes (`labels`, `logits`, `weights`) as
parameter and returns a weighted scalar loss. `weights` should be
optional. See `tf.losses`
label_keys: Optional list of strings with size `[n_classes]` defining the
label vocabulary. Only supported for `n_classes` > 2.
Returns:
An instance of `Head` for multi class classification.
Raises:
ValueError: if `n_classes` is < 2.
ValueError: If `metric_class_ids` is provided when `n_classes` is 2.
ValueError: If `len(label_keys) != n_classes`.
"""
if (n_classes is None) or (n_classes < 2):
raise ValueError("n_classes must be > 1 for classification: %s." %
n_classes)
if loss_fn:
_verify_loss_fn_args(loss_fn)
loss_fn = _wrap_custom_loss_fn(loss_fn) if loss_fn else None
if n_classes == 2:
if metric_class_ids:
raise ValueError("metric_class_ids invalid for n_classes==2.")
if label_keys:
raise ValueError("label_keys is not supported for n_classes=2.")
return _BinaryLogisticHead(
label_name=label_name,
weight_column_name=weight_column_name,
enable_centered_bias=enable_centered_bias,
head_name=head_name,
thresholds=thresholds,
loss_fn=loss_fn)
return _MultiClassHead(
n_classes=n_classes,
label_name=label_name,
weight_column_name=weight_column_name,
enable_centered_bias=enable_centered_bias,
head_name=head_name,
thresholds=thresholds,
metric_class_ids=metric_class_ids,
loss_fn=loss_fn,
label_keys=label_keys)
def binary_svm_head(
label_name=None,
weight_column_name=None,
enable_centered_bias=False,
head_name=None,
thresholds=None,):
"""Creates a `Head` for binary classification with SVMs.
The head uses binary hinge loss.
Args:
label_name: String, name of the key in label dict. Can be null if label
is a tensor (single headed models).
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
enable_centered_bias: A bool. If True, estimator will learn a centered
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
head_name: name of the head. If provided, predictions, summary and metrics
keys will be suffixed by `"/" + head_name` and the default variable scope
will be `head_name`.
thresholds: thresholds for eval metrics, defaults to [.5]
Returns:
An instance of `Head` for binary classification with SVM.
"""
return _BinarySvmHead(
label_name=label_name,
weight_column_name=weight_column_name,
enable_centered_bias=enable_centered_bias,
head_name=head_name,
thresholds=thresholds)
def multi_label_head(n_classes,
label_name=None,
weight_column_name=None,
enable_centered_bias=False,
head_name=None,
thresholds=None,
metric_class_ids=None,
loss_fn=None):
"""Creates a Head for multi label classification.
Multi-label classification handles the case where each example may have zero
or more associated labels, from a discrete set. This is distinct from
`multi_class_head` which has exactly one label from a discrete set.
This head by default uses sigmoid cross entropy loss, which expects as input
a multi-hot tensor of shape `(batch_size, num_classes)`.
Args:
n_classes: Integer, number of classes, must be >= 2
label_name: String, name of the key in label dict. Can be null if label
is a tensor (single headed models).
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
enable_centered_bias: A bool. If True, estimator will learn a centered
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
head_name: name of the head. If provided, predictions, summary and metrics
keys will be suffixed by `"/" + head_name` and the default variable scope
will be `head_name`.
thresholds: thresholds for eval metrics, defaults to [.5]
metric_class_ids: List of class IDs for which we should report per-class
metrics. Must all be in the range `[0, n_classes)`.
loss_fn: Optional function that takes (`labels`, `logits`, `weights`) as
parameter and returns a weighted scalar loss. `weights` should be
optional. See `tf.losses`
Returns:
An instance of `Head` for multi label classification.
Raises:
ValueError: If n_classes is < 2
ValueError: If loss_fn does not have expected signature.
"""
if n_classes < 2:
raise ValueError("n_classes must be > 1 for classification.")
if loss_fn:
_verify_loss_fn_args(loss_fn)
return _MultiLabelHead(
n_classes=n_classes,
label_name=label_name,
weight_column_name=weight_column_name,
enable_centered_bias=enable_centered_bias,
head_name=head_name,
thresholds=thresholds,
metric_class_ids=metric_class_ids,
loss_fn=_wrap_custom_loss_fn(loss_fn) if loss_fn else None)
def loss_only_head(loss_fn, head_name=None):
"""Creates a Head that contains only loss terms.
Loss only head holds additional loss terms to be added to other heads and
usually represents additional regularization terms in the objective function.
Args:
loss_fn: a function that takes no argument and returns a list of
scalar tensors.
head_name: a name for the head.
Returns:
An instance of `Head` to hold the additional losses.
"""
return _LossOnlyHead(loss_fn, head_name=head_name)
def multi_head(heads, loss_weights=None):
"""Creates a MultiHead stemming from same logits/hidden layer.
Args:
heads: list of Head objects.
loss_weights: optional list of weights to be used to merge losses from
each head. All losses are weighted equally if not provided.
Returns:
A instance of `Head` that merges multiple heads.
Raises:
ValueError: if heads and loss_weights have different size.
"""
if loss_weights:
if len(loss_weights) != len(heads):
raise ValueError("heads and loss_weights must have same size")
def _weighted_loss_merger(losses):
if loss_weights:
if len(losses) != len(loss_weights):
raise ValueError("losses and loss_weights must have same size")
weighted_losses = []
for loss, weight in zip(losses, loss_weights):
weighted_losses.append(math_ops.multiply(loss, weight))
return math_ops.add_n(weighted_losses)
else:
return math_ops.add_n(losses)
return _MultiHead(heads, loss_merger=_weighted_loss_merger)
def no_op_train_fn(loss):
del loss
return control_flow_ops.no_op()
class _SingleHead(Head):
"""Interface for a single head/top of a model."""
__metaclass__ = abc.ABCMeta
def __init__(
self, problem_type, logits_dimension, label_name=None,
weight_column_name=None, head_name=None):
if problem_type is None:
raise ValueError("Invalid problem_type %s." % problem_type)
if logits_dimension is None or logits_dimension < 1:
raise ValueError("Invalid logits_dimension %s." % logits_dimension)
self._problem_type = problem_type
self._logits_dimension = logits_dimension
self._label_name = label_name
self._weight_column_name = weight_column_name
self._head_name = head_name
@property
def logits_dimension(self):
return self._logits_dimension
@property
def label_name(self):
return self._label_name
@property
def weight_column_name(self):
return self._weight_column_name
@property
def head_name(self):
return self._head_name
def _create_output_alternatives(self, predictions):
"""Creates output alternative for the Head.
Args:
predictions: a dict of {tensor_name: Tensor}, where 'tensor_name' is a
symbolic name for an output Tensor possibly but not necessarily taken
from `PredictionKey`, and 'Tensor' is the corresponding output Tensor
itself.
Returns:
`dict` of {submodel_name: (problem_type, {tensor_name: Tensor})}, where
'submodel_name' is a submodel identifier that should be consistent across
the pipeline (here likely taken from the head_name),
'problem_type' is a `ProblemType`,
'tensor_name' is a symbolic name for an output Tensor possibly but not
necessarily taken from `PredictionKey`, and
'Tensor' is the corresponding output Tensor itself.
"""
return {self._head_name: (self._problem_type, predictions)}
# TODO(zakaria): use contrib losses.
def _mean_squared_loss(labels, logits, weights=None):
with ops.name_scope(None, "mean_squared_loss", (logits, labels)) as name:
logits = ops.convert_to_tensor(logits)
labels = ops.convert_to_tensor(labels)
# To prevent broadcasting inside "-".
if len(labels.get_shape()) == 1:
labels = array_ops.expand_dims(labels, dim=(1,))
# TODO(zakaria): make sure it does not recreate the broadcast bug.
if len(logits.get_shape()) == 1:
logits = array_ops.expand_dims(logits, dim=(1,))
logits.get_shape().assert_is_compatible_with(labels.get_shape())
loss = math_ops.square(logits - math_ops.to_float(labels), name=name)
return _compute_weighted_loss(loss, weights)
def _poisson_loss(labels, logits, weights=None):
"""Computes poisson loss from logits."""
with ops.name_scope(None, "_poisson_loss", (logits, labels)) as name:
logits = ops.convert_to_tensor(logits)
labels = ops.convert_to_tensor(labels)
# To prevent broadcasting inside "-".
if len(labels.get_shape()) == 1:
labels = array_ops.expand_dims(labels, dim=(1,))
# TODO(zakaria): make sure it does not recreate the broadcast bug.
if len(logits.get_shape()) == 1:
logits = array_ops.expand_dims(logits, dim=(1,))
logits.get_shape().assert_is_compatible_with(labels.get_shape())
loss = nn.log_poisson_loss(labels, logits, compute_full_loss=True,
name=name)
return _compute_weighted_loss(loss, weights)
def _logits(logits_input, logits, logits_dimension):
"""Validate logits args, and create `logits` if necessary.
Exactly one of `logits_input` and `logits` must be provided.
Args:
logits_input: `Tensor` input to `logits`.
logits: `Tensor` output.
logits_dimension: Integer, last dimension of `logits`. This is used to
create `logits` from `logits_input` if `logits` is `None`; otherwise, it's
used to validate `logits`.
Returns:
`logits` `Tensor`.
Raises:
ValueError: if neither or both of `logits` and `logits_input` are supplied.
"""
if (logits_dimension is None) or (logits_dimension < 1):
raise ValueError("Invalid logits_dimension %s." % logits_dimension)
# If not provided, create logits.
if logits is None:
if logits_input is None:
raise ValueError("Neither logits nor logits_input supplied.")
return layers_lib.linear(logits_input, logits_dimension, scope="logits")
if logits_input is not None:
raise ValueError("Both logits and logits_input supplied.")
logits = ops.convert_to_tensor(logits, name="logits")
logits_dims = logits.get_shape().dims
if logits_dims is not None:
logits_dims[-1].assert_is_compatible_with(logits_dimension)
return logits
def _create_model_fn_ops(features,
mode,
loss_fn,
logits_to_predictions_fn,
metrics_fn,
create_output_alternatives_fn,
labels=None,
train_op_fn=None,
logits=None,
logits_dimension=None,
head_name=None,
weight_column_name=None,
enable_centered_bias=False):
"""Returns a `ModelFnOps` object."""
_check_mode_valid(mode)
centered_bias = None
if enable_centered_bias:
centered_bias = _centered_bias(logits_dimension, head_name)
logits = nn.bias_add(logits, centered_bias)
predictions = logits_to_predictions_fn(logits)
loss = None
train_op = None
eval_metric_ops = None
if (mode != model_fn.ModeKeys.INFER) and (labels is not None):
weight_tensor = _weight_tensor(features, weight_column_name)
loss, weighted_average_loss = loss_fn(labels, logits, weight_tensor)
# The name_scope escapism is needed to maintain the same summary tag
# after switching away from the now unsupported API.
with ops.name_scope(""):
summary_loss = array_ops.identity(weighted_average_loss)
summary.scalar(_summary_key(head_name, mkey.LOSS), summary_loss)
if mode == model_fn.ModeKeys.TRAIN:
if train_op_fn is None:
raise ValueError("train_op_fn can not be None in TRAIN mode")
batch_size = array_ops.shape(logits)[0]
train_op = _train_op(loss, labels, train_op_fn, centered_bias,
batch_size, loss_fn, weight_tensor)
eval_metric_ops = metrics_fn(
weighted_average_loss, predictions, labels, weight_tensor)
return model_fn.ModelFnOps(
mode=mode,
predictions=predictions,
loss=loss,
train_op=train_op,
eval_metric_ops=eval_metric_ops,
output_alternatives=create_output_alternatives_fn(predictions))
class _RegressionHead(_SingleHead):
"""`Head` for regression with a generalized linear model."""
def __init__(self,
label_dimension,
loss_fn,
link_fn,
logits_dimension=None,
label_name=None,
weight_column_name=None,
enable_centered_bias=False,
head_name=None):
"""`Head` for regression.
Args:
label_dimension: Number of regression labels per example. This is the
size of the last dimension of the labels `Tensor` (typically, this has
shape `[batch_size, label_dimension]`).
loss_fn: Loss function, takes logits and labels and returns loss.
link_fn: Link function, takes a logits tensor and returns the output.
logits_dimension: Number of logits per example. This is the
size of the last dimension of the logits `Tensor` (typically, this has
shape `[batch_size, label_dimension]`).
Default value: `label_dimension`.
label_name: String, name of the key in label dict. Can be null if label
is a tensor (single headed models).
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
enable_centered_bias: A bool. If True, estimator will learn a centered
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
head_name: name of the head. Predictions, summary and metrics keys are
suffixed by `"/" + head_name` and the default variable scope is
`head_name`.
"""
super(_RegressionHead, self).__init__(
problem_type=constants.ProblemType.LINEAR_REGRESSION,
logits_dimension=(logits_dimension if logits_dimension is not None
else label_dimension),
label_name=label_name,
weight_column_name=weight_column_name,
head_name=head_name)
self._loss_fn = loss_fn
self._link_fn = link_fn
self._enable_centered_bias = enable_centered_bias
def create_model_fn_ops(self,
features,
mode,
labels=None,
train_op_fn=None,
logits=None,
logits_input=None,
scope=None):
"""See `Head`."""
with variable_scope.variable_scope(
scope,
default_name=self.head_name or "regression_head",
values=(tuple(six.itervalues(features)) +
(labels, logits, logits_input))):
labels = self._transform_labels(mode=mode, labels=labels)
logits = _logits(logits_input, logits, self.logits_dimension)
return _create_model_fn_ops(
features=features,
mode=mode,
loss_fn=self._loss_fn,
logits_to_predictions_fn=self._logits_to_predictions,
metrics_fn=self._metrics,
create_output_alternatives_fn=self._create_output_alternatives,
labels=labels,
train_op_fn=train_op_fn,
logits=logits,
logits_dimension=self.logits_dimension,
head_name=self.head_name,
weight_column_name=self.weight_column_name,
enable_centered_bias=self._enable_centered_bias)
def _transform_labels(self, mode, labels):
"""Applies transformations to labels tensor."""
if (mode == model_fn.ModeKeys.INFER) or (labels is None):
return None
labels_tensor = _to_labels_tensor(labels, self._label_name)
_check_no_sparse_tensor(labels_tensor)
return labels_tensor
def _logits_to_predictions(self, logits):
"""Returns a dict of predictions.
Args:
logits: logits `Tensor` after applying possible centered bias.
Returns:
Dict of prediction `Tensor` keyed by `PredictionKey`.
"""
key = prediction_key.PredictionKey.SCORES
with ops.name_scope(None, "predictions", (logits,)):
if self.logits_dimension == 1:
logits = array_ops.squeeze(logits, squeeze_dims=(1,), name=key)
return {key: self._link_fn(logits)}
def _metrics(self, eval_loss, predictions, labels, weights):
"""Returns a dict of metrics keyed by name."""
del predictions, labels, weights # Unused by this head.
with ops.name_scope("metrics", values=[eval_loss]):
return {
_summary_key(self.head_name, mkey.LOSS):
metrics_lib.mean(eval_loss)}
def _log_loss_with_two_classes(labels, logits, weights=None):
with ops.name_scope(None, "log_loss_with_two_classes",
(logits, labels)) as name:
logits = ops.convert_to_tensor(logits)
labels = math_ops.to_float(labels)
# TODO(ptucker): This will break for dynamic shapes.
# sigmoid_cross_entropy_with_logits requires [batch_size, 1] labels.
if len(labels.get_shape()) == 1:
labels = array_ops.expand_dims(labels, dim=(1,))
loss = nn.sigmoid_cross_entropy_with_logits(labels=labels, logits=logits,
name=name)
return _compute_weighted_loss(loss, weights)
def _one_class_to_two_class_logits(logits):
return array_ops.concat((array_ops.zeros_like(logits), logits), 1)
class _BinaryLogisticHead(_SingleHead):
"""`Head` for binary classification with logistic regression."""
def __init__(self,
label_name=None,
weight_column_name=None,
enable_centered_bias=False,
head_name=None,
loss_fn=None,
thresholds=None):
"""`Head` for binary classification with logistic regression.
Args:
label_name: String, name of the key in label dict. Can be `None` if label
is a tensor (single headed models).
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
enable_centered_bias: A bool. If True, estimator will learn a centered
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
head_name: name of the head. Predictions, summary, metrics keys are
suffixed by `"/" + head_name` and the default variable scope is
`head_name`.
loss_fn: Loss function.
thresholds: thresholds for eval.
Raises:
ValueError: if n_classes is invalid.
"""
super(_BinaryLogisticHead, self).__init__(
problem_type=constants.ProblemType.LOGISTIC_REGRESSION,
logits_dimension=1,
label_name=label_name,
weight_column_name=weight_column_name,
head_name=head_name)
self._thresholds = thresholds if thresholds else (.5,)
self._loss_fn = loss_fn if loss_fn else _log_loss_with_two_classes
self._enable_centered_bias = enable_centered_bias
def create_model_fn_ops(self,
features,
mode,
labels=None,
train_op_fn=None,
logits=None,
logits_input=None,
scope=None):
"""See `Head`."""
with variable_scope.variable_scope(
scope,
default_name=self.head_name or "binary_logistic_head",
values=(tuple(six.itervalues(features)) +
(labels, logits, logits_input))):
labels = self._transform_labels(mode=mode, labels=labels)
logits = _logits(logits_input, logits, self.logits_dimension)
return _create_model_fn_ops(
features=features,
mode=mode,
loss_fn=self._loss_fn,
logits_to_predictions_fn=self._logits_to_predictions,
metrics_fn=self._metrics,
create_output_alternatives_fn=_classification_output_alternatives(
self.head_name, self._problem_type),
labels=labels,
train_op_fn=train_op_fn,
logits=logits,
logits_dimension=self.logits_dimension,
head_name=self.head_name,
weight_column_name=self.weight_column_name,
enable_centered_bias=self._enable_centered_bias)
def _transform_labels(self, mode, labels):
"""Applies transformations to labels tensor."""
if (mode == model_fn.ModeKeys.INFER) or (labels is None):
return None
labels_tensor = _to_labels_tensor(labels, self._label_name)
_check_no_sparse_tensor(labels_tensor)
return labels_tensor
def _logits_to_predictions(self, logits):
"""Returns a dict of predictions.
Args:
logits: logits `Output` after applying possible centered bias.
Returns:
Dict of prediction `Output` keyed by `PredictionKey`.
"""
with ops.name_scope(None, "predictions", (logits,)):
two_class_logits = _one_class_to_two_class_logits(logits)
return {
prediction_key.PredictionKey.LOGITS:
logits,
prediction_key.PredictionKey.LOGISTIC:
math_ops.sigmoid(
logits, name=prediction_key.PredictionKey.LOGISTIC),
prediction_key.PredictionKey.PROBABILITIES:
nn.softmax(
two_class_logits,
name=prediction_key.PredictionKey.PROBABILITIES),
prediction_key.PredictionKey.CLASSES:
math_ops.argmax(
two_class_logits,
1,
name=prediction_key.PredictionKey.CLASSES)
}
def _metrics(self, eval_loss, predictions, labels, weights):
"""Returns a dict of metrics keyed by name."""
with ops.name_scope("metrics", values=(
[eval_loss, labels, weights] + list(six.itervalues(predictions)))):
classes = predictions[prediction_key.PredictionKey.CLASSES]
logistic = predictions[prediction_key.PredictionKey.LOGISTIC]
metrics = {_summary_key(self.head_name, mkey.LOSS):
metrics_lib.mean(eval_loss)}
# TODO(b/29366811): This currently results in both an "accuracy" and an
# "accuracy/threshold_0.500000_mean" metric for binary classification.
metrics[_summary_key(self.head_name, mkey.ACCURACY)] = (
metrics_lib.accuracy(labels, classes, weights))
metrics[_summary_key(self.head_name, mkey.PREDICTION_MEAN)] = (
_predictions_streaming_mean(logistic, weights))
metrics[_summary_key(self.head_name, mkey.LABEL_MEAN)] = (
_indicator_labels_streaming_mean(labels, weights))
# Also include the streaming mean of the label as an accuracy baseline, as
# a reminder to users.
metrics[_summary_key(self.head_name, mkey.ACCURACY_BASELINE)] = (
_indicator_labels_streaming_mean(labels, weights))
metrics[_summary_key(self.head_name, mkey.AUC)] = (
_streaming_auc(logistic, labels, weights))
metrics[_summary_key(self.head_name, mkey.AUC_PR)] = (
_streaming_auc(logistic, labels, weights, curve="PR"))
for threshold in self._thresholds:
metrics[_summary_key(
self.head_name, mkey.ACCURACY_MEAN % threshold)] = (
_streaming_accuracy_at_threshold(logistic, labels, weights,
threshold))
# Precision for positive examples.
metrics[_summary_key(
self.head_name, mkey.PRECISION_MEAN % threshold)] = (
_streaming_precision_at_threshold(logistic, labels, weights,
threshold))
# Recall for positive examples.
metrics[_summary_key(
self.head_name, mkey.RECALL_MEAN % threshold)] = (
_streaming_recall_at_threshold(logistic, labels, weights,
threshold))
return metrics
def _softmax_cross_entropy_loss(labels, logits, weights=None):
with ops.name_scope(
None, "softmax_cross_entropy_loss", (logits, labels,)) as name:
labels = ops.convert_to_tensor(labels)
# Check that we got integer for classification.
if not labels.dtype.is_integer:
raise ValueError("Labels dtype should be integer "
"Instead got %s." % labels.dtype)
# sparse_softmax_cross_entropy_with_logits requires [batch_size] labels.
is_squeezed_labels = False
# TODO(ptucker): This will break for dynamic shapes.
if len(labels.get_shape()) == 2:
labels = array_ops.squeeze(labels, squeeze_dims=(1,))
is_squeezed_labels = True
loss = nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits, name=name)
# Restore squeezed dimension, if necessary, so loss matches weights shape.
if is_squeezed_labels:
loss = array_ops.expand_dims(loss, axis=(1,))
return _compute_weighted_loss(loss, weights)
class _MultiClassHead(_SingleHead):
"""'Head' for multi class classification."""
def __init__(self,
n_classes,
label_name=None,
weight_column_name=None,
enable_centered_bias=False,
head_name=None,
loss_fn=None,
thresholds=None,
metric_class_ids=None,
label_keys=None):
"""'Head' for multi class classification.
This head expects to be fed integer labels specifying the class index. But
if `label_keys` is specified, then labels must be strings from this
vocabulary, and the predicted classes will be strings from the same
vocabulary.
Args:
n_classes: Number of classes, must be greater than 2 (for 2 classes, use
`_BinaryLogisticHead`).
label_name: String, name of the key in label dict. Can be null if label
is a tensor (single headed models).
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
enable_centered_bias: A bool. If True, estimator will learn a centered
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
head_name: name of the head. If provided, predictions, summary, metrics
keys will be suffixed by `"/" + head_name` and the default variable
scope will be `head_name`.
loss_fn: Loss function. Defaults to softmax cross entropy loss.
thresholds: thresholds for eval.
metric_class_ids: List of class IDs for which we should report per-class
metrics. Must all be in the range `[0, n_classes)`.
label_keys: Optional list of strings with size `[n_classes]` defining the
label vocabulary.
Raises:
ValueError: if `n_classes`, `metric_class_ids` or `label_keys` is invalid.
"""
super(_MultiClassHead, self).__init__(
problem_type=constants.ProblemType.CLASSIFICATION,
logits_dimension=n_classes,
label_name=label_name,
weight_column_name=weight_column_name,
head_name=head_name)
if (n_classes is None) or (n_classes <= 2):
raise ValueError("n_classes must be > 2: %s." % n_classes)
self._thresholds = thresholds if thresholds else (.5,)
self._loss_fn = loss_fn if loss_fn else _softmax_cross_entropy_loss
self._enable_centered_bias = enable_centered_bias
self._metric_class_ids = tuple([] if metric_class_ids is None else
metric_class_ids)
for class_id in self._metric_class_ids:
if (class_id < 0) or (class_id >= n_classes):
raise ValueError("Class ID %s not in [0, %s)." % (class_id, n_classes))
if label_keys and len(label_keys) != n_classes:
raise ValueError("Length of label_keys must equal n_classes.")
self._label_keys = label_keys
def create_model_fn_ops(self,
features,
mode,
labels=None,
train_op_fn=None,
logits=None,
logits_input=None,
scope=None):
"""See `Head`."""
with variable_scope.variable_scope(
scope,
default_name=self.head_name or "multi_class_head",
values=(tuple(six.itervalues(features)) +
(labels, logits, logits_input))):
labels = self._transform_labels(mode=mode, labels=labels)
logits = _logits(logits_input, logits, self.logits_dimension)
return _create_model_fn_ops(
features=features,
mode=mode,
loss_fn=self._wrapped_loss_fn,
logits_to_predictions_fn=self._logits_to_predictions,
metrics_fn=self._metrics,
create_output_alternatives_fn=_classification_output_alternatives(
self.head_name, self._problem_type, self._label_keys),
labels=labels,
train_op_fn=train_op_fn,
logits=logits,
logits_dimension=self.logits_dimension,
head_name=self.head_name,
weight_column_name=self.weight_column_name,
enable_centered_bias=self._enable_centered_bias)
def _transform_labels(self, mode, labels):
"""Returns a dict that contains both the original labels and label IDs."""
if (mode == model_fn.ModeKeys.INFER) or (labels is None):
return None
labels_tensor = _to_labels_tensor(labels, self._label_name)
_check_no_sparse_tensor(labels_tensor)
if self._label_keys:
table = lookup_ops.index_table_from_tensor(
self._label_keys, name="label_id_lookup")
return {
"labels": labels_tensor,
"label_ids": table.lookup(labels_tensor),
}
return {
"labels": labels_tensor,
"label_ids": labels_tensor,
}
def _labels(self, labels_dict):
"""Returns labels `Tensor` of the same type as classes."""
return labels_dict["labels"]
def _label_ids(self, labels_dict):
"""Returns integer label ID `Tensor`."""
return labels_dict["label_ids"]
def _wrapped_loss_fn(self, labels, logits, weights=None):
return self._loss_fn(self._label_ids(labels), logits, weights=weights)
def _logits_to_predictions(self, logits):
"""Returns a dict of predictions.
Args:
logits: logits `Tensor` after applying possible centered bias.
Returns:
Dict of prediction `Tensor` keyed by `PredictionKey`.
"""
with ops.name_scope(None, "predictions", (logits,)):
class_ids = math_ops.argmax(
logits, 1, name=prediction_key.PredictionKey.CLASSES)
if self._label_keys:
table = lookup_ops.index_to_string_table_from_tensor(
self._label_keys, name="class_string_lookup")
classes = table.lookup(class_ids)
else:
classes = class_ids
return {
prediction_key.PredictionKey.LOGITS: logits,
prediction_key.PredictionKey.PROBABILITIES:
nn.softmax(
logits, name=prediction_key.PredictionKey.PROBABILITIES),
prediction_key.PredictionKey.CLASSES: classes
}
def _metrics(self, eval_loss, predictions, labels, weights):
"""Returns a dict of metrics keyed by name."""
with ops.name_scope(
"metrics",
values=((eval_loss, self._labels(labels), self._label_ids(labels),
weights) + tuple(six.itervalues(predictions)))):
logits = predictions[prediction_key.PredictionKey.LOGITS]
probabilities = predictions[prediction_key.PredictionKey.PROBABILITIES]
classes = predictions[prediction_key.PredictionKey.CLASSES]
metrics = {_summary_key(self.head_name, mkey.LOSS):
metrics_lib.mean(eval_loss)}
# TODO(b/29366811): This currently results in both an "accuracy" and an
# "accuracy/threshold_0.500000_mean" metric for binary classification.
metrics[_summary_key(self.head_name, mkey.ACCURACY)] = (
metrics_lib.accuracy(self._labels(labels), classes, weights))
if not self._label_keys:
# Classes are IDs. Add some metrics.
for class_id in self._metric_class_ids:
metrics[_summary_key(
self.head_name, mkey.CLASS_PREDICTION_MEAN % class_id)] = (
_class_predictions_streaming_mean(classes, weights, class_id))
# TODO(ptucker): Add per-class accuracy, precision, recall.
metrics[_summary_key(
self.head_name, mkey.CLASS_LABEL_MEAN % class_id)] = (
_class_labels_streaming_mean(
self._label_ids(labels), weights, class_id))
metrics[_summary_key(
self.head_name, mkey.CLASS_PROBABILITY_MEAN % class_id)] = (
_predictions_streaming_mean(probabilities, weights, class_id))
metrics[_summary_key(
self.head_name, mkey.CLASS_LOGITS_MEAN % class_id)] = (
_predictions_streaming_mean(logits, weights, class_id))
return metrics
def _to_labels_tensor(labels, label_name):
"""Returns label as a tensor.
Args:
labels: Label `Tensor` or `SparseTensor` or a dict containing labels.
label_name: Label name if labels is a dict.
Returns:
Label `Tensor` or `SparseTensor`.
"""
labels = labels[label_name] if isinstance(labels, dict) else labels
return framework_lib.convert_to_tensor_or_sparse_tensor(labels)
def _check_no_sparse_tensor(x):
"""Raises ValueError if the given tensor is `SparseTensor`."""
if isinstance(x, sparse_tensor.SparseTensor):
raise ValueError("SparseTensor is not supported.")
def _sparse_labels_to_indicator(labels, num_classes):
"""If labels is `SparseTensor`, converts it to indicator `Tensor`.
Args:
labels: Label `Tensor` or `SparseTensor`.
num_classes: Number of classes.
Returns:
Dense label `Tensor`.
Raises:
ValueError: If labels is `SparseTensor` and `num_classes` < 2.
"""
if isinstance(labels, sparse_tensor.SparseTensor):
if num_classes < 2:
raise ValueError("Must set num_classes >= 2 when passing labels as a "
"SparseTensor.")
return math_ops.to_int64(
sparse_ops.sparse_to_indicator(labels, num_classes))
return labels
def _assert_labels_rank(labels):
return control_flow_ops.Assert(
math_ops.less_equal(array_ops.rank(labels), 2),
("labels shape should be either [batch_size, 1] or [batch_size]",))
class _BinarySvmHead(_SingleHead):
"""`Head` for binary classification using SVM."""
def __init__(self, label_name, weight_column_name, enable_centered_bias,
head_name, thresholds):
def _loss_fn(labels, logits, weights=None):
with ops.name_scope(None, "hinge_loss", (logits, labels)) as name:
with ops.control_dependencies((_assert_labels_rank(labels),)):
labels = array_ops.reshape(labels, shape=(-1, 1))
loss = losses_lib.hinge_loss(labels=labels, logits=logits, scope=name,
reduction=losses_lib.Reduction.NONE)
return _compute_weighted_loss(loss, weights)
super(_BinarySvmHead, self).__init__(
problem_type=constants.ProblemType.LOGISTIC_REGRESSION,
logits_dimension=1,
label_name=label_name,
weight_column_name=weight_column_name,
head_name=head_name)
self._thresholds = thresholds if thresholds else (.5,)
self._loss_fn = _loss_fn
self._enable_centered_bias = enable_centered_bias
def create_model_fn_ops(self,
features,
mode,
labels=None,
train_op_fn=None,
logits=None,
logits_input=None,
scope=None):
"""See `Head`."""
with variable_scope.variable_scope(
scope,
default_name=self.head_name or "binary_svm_head",
values=(tuple(six.itervalues(features)) +
(labels, logits, logits_input))):
labels = self._transform_labels(mode=mode, labels=labels)
logits = _logits(logits_input, logits, self.logits_dimension)
return _create_model_fn_ops(
features=features,
mode=mode,
loss_fn=self._loss_fn,
logits_to_predictions_fn=self._logits_to_predictions,
metrics_fn=self._metrics,
# TODO(zakaria): Handle labels for export.
create_output_alternatives_fn=self._create_output_alternatives,
labels=labels,
train_op_fn=train_op_fn,
logits=logits,
logits_dimension=self.logits_dimension,
head_name=self.head_name,
weight_column_name=self.weight_column_name,
enable_centered_bias=self._enable_centered_bias)
def _transform_labels(self, mode, labels):
"""Applies transformations to labels tensor."""
if (mode == model_fn.ModeKeys.INFER) or (labels is None):
return None
labels_tensor = _to_labels_tensor(labels, self._label_name)
_check_no_sparse_tensor(labels_tensor)
return labels_tensor
def _logits_to_predictions(self, logits):
"""See `_MultiClassHead`."""
with ops.name_scope(None, "predictions", (logits,)):
return {
prediction_key.PredictionKey.LOGITS:
logits,
prediction_key.PredictionKey.CLASSES:
math_ops.argmax(
_one_class_to_two_class_logits(logits),
1,
name=prediction_key.PredictionKey.CLASSES)
}
def _metrics(self, eval_loss, predictions, labels, weights):
"""See `_MultiClassHead`."""
with ops.name_scope("metrics", values=(
[eval_loss, labels, weights] + list(six.itervalues(predictions)))):
metrics = {_summary_key(self.head_name, mkey.LOSS):
metrics_lib.mean(eval_loss)}
# TODO(b/29366811): This currently results in both an "accuracy" and an
# "accuracy/threshold_0.500000_mean" metric for binary classification.
classes = predictions[prediction_key.PredictionKey.CLASSES]
metrics[_summary_key(self.head_name, mkey.ACCURACY)] = (
metrics_lib.accuracy(labels, classes, weights))
# TODO(sibyl-vie3Poto): add more metrics relevant for svms.
return metrics
class _MultiLabelHead(_SingleHead):
"""`Head` for multi-label classification."""
# TODO(zakaria): add signature and metric for multilabel.
def __init__(self,
n_classes,
label_name,
weight_column_name,
enable_centered_bias,
head_name,
thresholds,
metric_class_ids=None,
loss_fn=None):
super(_MultiLabelHead, self).__init__(
problem_type=constants.ProblemType.CLASSIFICATION,
logits_dimension=n_classes,
label_name=label_name,
weight_column_name=weight_column_name,
head_name=head_name)
self._thresholds = thresholds if thresholds else (.5,)
self._loss_fn = loss_fn if loss_fn else _sigmoid_cross_entropy_loss
self._enable_centered_bias = enable_centered_bias
self._metric_class_ids = tuple([] if metric_class_ids is None else
metric_class_ids)
for class_id in self._metric_class_ids:
if (class_id < 0) or (class_id >= n_classes):
raise ValueError("Class ID %s not in [0, %s)." % (class_id, n_classes))
def create_model_fn_ops(self,
features,
mode,
labels=None,
train_op_fn=None,
logits=None,
logits_input=None,
scope=None):
"""See `Head`."""
with variable_scope.variable_scope(
scope,
default_name=self.head_name or "multi_label_head",
values=(tuple(six.itervalues(features)) +
(labels, logits, logits_input))):
labels = self._transform_labels(mode=mode, labels=labels)
logits = _logits(logits_input, logits, self.logits_dimension)
return _create_model_fn_ops(
features=features,
mode=mode,
loss_fn=self._loss_fn,
logits_to_predictions_fn=self._logits_to_predictions,
metrics_fn=self._metrics,
create_output_alternatives_fn=_classification_output_alternatives(
self.head_name, self._problem_type),
labels=labels,
train_op_fn=train_op_fn,
logits=logits,
logits_dimension=self.logits_dimension,
head_name=self.head_name,
weight_column_name=self.weight_column_name,
enable_centered_bias=self._enable_centered_bias)
def _transform_labels(self, mode, labels):
"""Applies transformations to labels tensor."""
if (mode == model_fn.ModeKeys.INFER) or (labels is None):
return None
labels_tensor = _to_labels_tensor(labels, self._label_name)
labels_tensor = _sparse_labels_to_indicator(labels_tensor,
self._logits_dimension)
return labels_tensor
def _logits_to_predictions(self, logits):
"""See `_MultiClassHead`."""
with ops.name_scope(None, "predictions", (logits,)):
return {
prediction_key.PredictionKey.LOGITS:
logits,
prediction_key.PredictionKey.PROBABILITIES:
math_ops.sigmoid(
logits, name=prediction_key.PredictionKey.PROBABILITIES),
prediction_key.PredictionKey.CLASSES:
math_ops.to_int64(
math_ops.greater(logits, 0),
name=prediction_key.PredictionKey.CLASSES)
}
def _metrics(self, eval_loss, predictions, labels, weights):
"""Returns a dict of metrics keyed by name."""
with ops.name_scope("metrics", values=(
[eval_loss, labels, weights] + list(six.itervalues(predictions)))):
classes = predictions[prediction_key.PredictionKey.CLASSES]
probabilities = predictions[prediction_key.PredictionKey.PROBABILITIES]
logits = predictions[prediction_key.PredictionKey.LOGITS]
metrics = {_summary_key(self.head_name, mkey.LOSS):
metrics_lib.mean(eval_loss)}
# TODO(b/29366811): This currently results in both an "accuracy" and an
# "accuracy/threshold_0.500000_mean" metric for binary classification.
metrics[_summary_key(self.head_name, mkey.ACCURACY)] = (
metrics_lib.accuracy(labels, classes, weights))
metrics[_summary_key(self.head_name, mkey.AUC)] = _streaming_auc(
probabilities, labels, weights)
metrics[_summary_key(self.head_name, mkey.AUC_PR)] = _streaming_auc(
probabilities, labels, weights, curve="PR")
for class_id in self._metric_class_ids:
# TODO(ptucker): Add per-class accuracy, precision, recall.
metrics[_summary_key(
self.head_name, mkey.CLASS_PREDICTION_MEAN % class_id)] = (
_predictions_streaming_mean(classes, weights, class_id))
metrics[_summary_key(
self.head_name, mkey.CLASS_LABEL_MEAN % class_id)] = (
_indicator_labels_streaming_mean(labels, weights, class_id))
metrics[_summary_key(
self.head_name, mkey.CLASS_PROBABILITY_MEAN % class_id)] = (
_predictions_streaming_mean(probabilities, weights, class_id))
metrics[_summary_key(
self.head_name, mkey.CLASS_LOGITS_MEAN % class_id)] = (
_predictions_streaming_mean(logits, weights, class_id))
metrics[_summary_key(self.head_name, mkey.CLASS_AUC % class_id)] = (
_streaming_auc(probabilities, labels, weights, class_id))
metrics[_summary_key(self.head_name, mkey.CLASS_AUC_PR % class_id)] = (
_streaming_auc(probabilities, labels, weights, class_id,
curve="PR"))
return metrics
class _LossOnlyHead(Head):
"""`Head` implementation for additional loss terms.
This class only holds loss terms unrelated to any other heads (labels),
e.g. regularization.
Common usage:
This is oftem combine with other heads in a multi head setup.
```python
head = multi_head([
head1, head2, loss_only_head('regularizer', regularizer)])
```
"""
def __init__(self, loss_fn, head_name=None):
self._loss_fn = loss_fn
self.head_name = head_name or "loss_only_head"
@property
def logits_dimension(self):
return 0
def create_model_fn_ops(self,
features,
mode,
labels=None,
train_op_fn=None,
logits=None,
logits_input=None,
scope=None):
"""See `_Head.create_model_fn_ops`.
Args:
features: Not been used.
mode: Estimator's `ModeKeys`.
labels: Labels `Tensor`, or `dict` of same.
train_op_fn: Function that takes a scalar loss and returns an op to
optimize with the loss.
logits: Not been used.
logits_input: Not been used.
scope: Optional scope for variable_scope. If provided, will be passed to
all heads. Most users will want to set this to `None`, so each head
constructs a separate variable_scope according to its `head_name`.
Returns:
A `ModelFnOps` object.
Raises:
ValueError: if `mode` is not recognition.
"""
_check_mode_valid(mode)
loss = None
train_op = None
if mode != model_fn.ModeKeys.INFER:
with variable_scope.variable_scope(scope, default_name=self.head_name):
loss = self._loss_fn()
if isinstance(loss, list):
loss = math_ops.add_n(loss)
# The name_scope escapism is needed to maintain the same summary tag
# after switching away from the now unsupported API.
with ops.name_scope(""):
summary_loss = array_ops.identity(loss)
summary.scalar(_summary_key(self.head_name, mkey.LOSS),
summary_loss)
if mode == model_fn.ModeKeys.TRAIN:
if train_op_fn is None:
raise ValueError("train_op_fn can not be None in TRAIN mode")
with ops.name_scope(None, "train_op", (loss,)):
train_op = train_op_fn(loss)
return model_fn.ModelFnOps(
mode=mode,
loss=loss,
train_op=train_op,
predictions={},
eval_metric_ops={})
class _MultiHead(Head):
"""`Head` implementation for multi objective learning.
This class is responsible for using and merging the output of multiple
`Head` objects.
All heads stem from the same logits/logit_input tensor.
Common usage:
For simple use cases you can pass the activation of hidden layer like
this from your model_fn,
```python
last_hidden_layer_activation = ... Build your model.
multi_head = ...
return multi_head.create_model_fn_ops(
..., logits_input=last_hidden_layer_activation, ...)
```
Or you can create a logits tensor of
[batch_size, multi_head.logits_dimension] shape. _MultiHead will split the
logits for you.
return multi_head.create_model_fn_ops(..., logits=logits, ...)
For more complex use cases like a multi-task/multi-tower model or when logits
for each head has to be created separately, you can pass a dict of logits
where the keys match the name of the single heads.
```python
logits = {"head1": logits1, "head2": logits2}
return multi_head.create_model_fn_ops(..., logits=logits, ...)
```
Here is what this class does,
+ For training, merges losses of each heads according a function provided by
user, calls user provided train_op_fn with this final loss.
+ For eval, merges metrics by adding head_name suffix to the keys in eval
metrics.
+ For inference, updates keys in prediction dict to a 2-tuple,
(head_name, prediction_key)
"""
def __init__(self, heads, loss_merger):
"""_Head to merges multiple _Head objects.
Args:
heads: list of _Head objects.
loss_merger: function that takes a list of loss tensors for the heads
and returns the final loss tensor for the multi head.
Raises:
ValueError: if any head does not have a name.
"""
self._logits_dimension = 0
for head in heads:
if not head.head_name:
raise ValueError("Members of MultiHead must have names.")
self._logits_dimension += head.logits_dimension
self._heads = heads
self._loss_merger = loss_merger
@property
def logits_dimension(self):
return self._logits_dimension
def create_model_fn_ops(self,
features,
mode,
labels=None,
train_op_fn=None,
logits=None,
logits_input=None,
scope=None):
"""See `_Head.create_model_fn_ops`.
Args:
features: Input `dict` of `Tensor` objects.
mode: Estimator's `ModeKeys`.
labels: Labels `Tensor`, or `dict` of same.
train_op_fn: Function that takes a scalar loss and returns an op to
optimize with the loss.
logits: Concatenated logits for all heads or a dict of head name to logits
tensor. If concatenated logits, it should have (batchsize, x) shape
where x is the sum of `logits_dimension` of all the heads,
i.e., same as `logits_dimension` of this class. create_model_fn_ops
will split the logits tensor and pass logits of proper size to each
head. This is useful if we want to be agnostic about whether you
creating a single versus multihead. logits can also be a dict for
convenience where you are creating the head specific logits explicitly
and don't want to concatenate them yourself.
logits_input: tensor to build logits from.
scope: Optional scope for variable_scope. If provided, will be passed to
all heads. Most users will want to set this to `None`, so each head
constructs a separate variable_scope according to its `head_name`.
Returns:
`ModelFnOps`.
Raises:
ValueError: if `mode` is not recognized, or neither or both of `logits`
and `logits_input` is provided.
"""
_check_mode_valid(mode)
all_model_fn_ops = []
if logits is None:
# Use logits_input.
for head in self._heads:
all_model_fn_ops.append(
head.create_model_fn_ops(
features=features,
mode=mode,
labels=labels,
train_op_fn=no_op_train_fn,
logits_input=logits_input,
scope=scope))
else:
head_logits_pairs = []
if isinstance(logits, dict):
head_logits_pairs = []
for head in self._heads:
if isinstance(head, _LossOnlyHead):
head_logits_pairs.append((head, None))
else:
head_logits_pairs.append((head, logits[head.head_name]))
else:
# Split logits for each head.
head_logits_pairs = zip(self._heads, self._split_logits(logits))
for head, head_logits in head_logits_pairs:
all_model_fn_ops.append(
head.create_model_fn_ops(
features=features,
mode=mode,
labels=labels,
train_op_fn=no_op_train_fn,
logits=head_logits,
scope=scope))
if mode == model_fn.ModeKeys.TRAIN:
if train_op_fn is None:
raise ValueError("train_op_fn can not be None in TRAIN mode.")
return self._merge_train(all_model_fn_ops, train_op_fn)
if mode == model_fn.ModeKeys.INFER:
return self._merge_infer(all_model_fn_ops)
if mode == model_fn.ModeKeys.EVAL:
return self._merge_eval(all_model_fn_ops)
raise ValueError("mode=%s unrecognized" % str(mode))
def _split_logits(self, logits):
"""Splits logits for heads.
Args:
logits: the logits tensor.
Returns:
A list of logits for the individual heads.
"""
all_logits = []
begin = 0
for head in self._heads:
current_logits_size = head.logits_dimension
current_logits = array_ops.slice(logits, [0, begin],
[-1, current_logits_size])
all_logits.append(current_logits)
begin += current_logits_size
return all_logits
def _merge_train(self, all_model_fn_ops, train_op_fn):
"""Merges list of ModelFnOps for training.
Args:
all_model_fn_ops: list of ModelFnOps for the individual heads.
train_op_fn: Function to create train op. See `create_model_fn_ops`
documentation for more details.
Returns:
ModelFnOps that merges all heads for TRAIN.
"""
losses = []
metrics = {}
additional_train_ops = []
for m in all_model_fn_ops:
losses.append(m.loss)
if m.eval_metric_ops is not None:
for k, v in six.iteritems(m.eval_metric_ops):
# metrics["%s/%s" % (k, head_name)] = v
metrics[k] = v
additional_train_ops.append(m.train_op)
loss = self._loss_merger(losses)
train_op = train_op_fn(loss)
train_op = control_flow_ops.group(train_op, *additional_train_ops)
return model_fn.ModelFnOps(
mode=model_fn.ModeKeys.TRAIN,
loss=loss,
train_op=train_op,
eval_metric_ops=metrics)
def _merge_infer(self, all_model_fn_ops):
"""Merges list of ModelFnOps for inference.
Args:
all_model_fn_ops: list of ModelFnOps for the individual heads.
Returns:
ModelFnOps that Merges all the heads for INFER.
"""
predictions = {}
output_alternatives = {}
for head, m in zip(self._heads, all_model_fn_ops):
if isinstance(head, _LossOnlyHead):
continue
head_name = head.head_name
output_alternatives[head_name] = m.output_alternatives[head_name]
for k, v in m.predictions.items():
predictions[(head_name, k)] = v
return model_fn.ModelFnOps(
mode=model_fn.ModeKeys.INFER,
predictions=predictions,
output_alternatives=output_alternatives)
def _merge_eval(self, all_model_fn_ops):
"""Merges list of ModelFnOps for eval.
Args:
all_model_fn_ops: list of ModelFnOps for the individual heads.
Returns:
ModelFnOps that merges all the heads for EVAL.
"""
predictions = {}
metrics = {}
losses = []
for head, m in zip(self._heads, all_model_fn_ops):
losses.append(m.loss)
head_name = head.head_name
for k, v in m.predictions.items():
predictions[(head_name, k)] = v
for k, v in m.eval_metric_ops.items():
# metrics["%s/%s" % (k, head_name)] = v
metrics[k] = v
loss = self._loss_merger(losses)
return model_fn.ModelFnOps(
mode=model_fn.ModeKeys.EVAL,
predictions=predictions,
loss=loss,
eval_metric_ops=metrics)
def _weight_tensor(features, weight_column_name):
"""Returns weights as `Tensor` of rank 0, or at least 2."""
if not weight_column_name:
return None
if weight_column_name not in features:
raise ValueError("Weights {} missing from features.".format(
weight_column_name))
with ops.name_scope(None, "weight_tensor", tuple(six.itervalues(features))):
weight_tensor = math_ops.to_float(features[weight_column_name])
shape = weight_tensor.get_shape()
rank = shape.ndims
# We don't bother with expanding dims of non-staticly shaped tensors or
# scalars, and >1d is already in a good format.
if rank == 1:
logging.warning("Weights {} has shape {}, expanding to make it 2d.".
format(weight_column_name, shape))
return (
sparse_ops.sparse_reshape(weight_tensor, (-1, 1))
if isinstance(weight_tensor, sparse_tensor.SparseTensor) else
array_ops.reshape(weight_tensor, (-1, 1)))
return weight_tensor
# TODO(zakaria): This function is needed for backward compatibility and should
# be removed when we migrate to core.
def _compute_weighted_loss(loss_unweighted, weight, name="loss"):
"""Returns a tuple of (loss_train, loss_report).
loss is used for gradient descent while weighted_average_loss is used for
summaries to be backward compatible.
loss is different from the loss reported on the tensorboard as we
should respect the example weights when computing the gradient.
L = sum_{i} w_{i} * l_{i} / B
where B is the number of examples in the batch, l_{i}, w_{i} are individual
losses, and example weight.
Args:
loss_unweighted: Unweighted loss
weight: Weight tensor
name: Optional name
Returns:
A tuple of losses. First one for training and the second one for reporting.
"""
with ops.name_scope(name, values=(loss_unweighted, weight)) as name_scope:
if weight is None:
loss = math_ops.reduce_mean(loss_unweighted, name=name_scope)
return loss, loss
weight = weights_broadcast_ops.broadcast_weights(weight, loss_unweighted)
with ops.name_scope(None, "weighted_loss",
(loss_unweighted, weight)) as name:
weighted_loss = math_ops.multiply(loss_unweighted, weight, name=name)
weighted_loss_mean = math_ops.reduce_mean(weighted_loss, name=name_scope)
weighted_loss_normalized = math_ops.div(
math_ops.reduce_sum(weighted_loss),
math_ops.to_float(math_ops.reduce_sum(weight)),
name="weighted_average_loss")
return weighted_loss_mean, weighted_loss_normalized
def _wrap_custom_loss_fn(loss_fn):
def _wrapper(labels, logits, weights=None):
if weights is None:
loss = loss_fn(labels, logits)
else:
loss = loss_fn(labels, logits, weights)
return loss, loss
return _wrapper
def _check_mode_valid(mode):
"""Raises ValueError if the given mode is invalid."""
if (mode != model_fn.ModeKeys.TRAIN and mode != model_fn.ModeKeys.INFER and
mode != model_fn.ModeKeys.EVAL):
raise ValueError("mode=%s unrecognized." % str(mode))
def _get_arguments(func):
"""Returns a spec of given func."""
_, func = tf_decorator.unwrap(func)
if hasattr(func, "__code__"):
# Regular function.
return tf_inspect.getargspec(func)
elif hasattr(func, "__call__"):
# Callable object.
return _get_arguments(func.__call__)
elif hasattr(func, "func"):
# Partial function.
return _get_arguments(func.func)
def _verify_loss_fn_args(loss_fn):
args = _get_arguments(loss_fn).args
for arg_name in ["labels", "logits", "weights"]:
if arg_name not in args:
raise ValueError("Argument %s not found in loss_fn." % arg_name)
def _centered_bias(logits_dimension, head_name=None):
"""Returns centered_bias `Variable`.
Args:
logits_dimension: Last dimension of `logits`. Must be >= 1.
head_name: Optional name of the head.
Returns:
`Variable` with shape `[logits_dimension]`.
Raises:
ValueError: if `logits_dimension` is invalid.
"""
if (logits_dimension is None) or (logits_dimension < 1):
raise ValueError("Invalid logits_dimension %s." % logits_dimension)
# Do not create a variable with variable_scope.get_variable, because that may
# create a PartitionedVariable, which does not support indexing, so
# summary.scalar will not work.
centered_bias = variable_scope.variable(
name="centered_bias_weight",
initial_value=array_ops.zeros(shape=(logits_dimension,)),
trainable=True)
for dim in range(logits_dimension):
if head_name:
summary.scalar("centered_bias/bias_%d/%s" % (dim, head_name),
centered_bias[dim])
else:
summary.scalar("centered_bias/bias_%d" % dim, centered_bias[dim])
return centered_bias
def _centered_bias_step(centered_bias, batch_size, labels, loss_fn, weights):
"""Creates and returns training op for centered bias."""
with ops.name_scope(None, "centered_bias_step", (labels,)) as name:
logits_dimension = array_ops.shape(centered_bias)[0]
logits = array_ops.reshape(
array_ops.tile(centered_bias, (batch_size,)),
(batch_size, logits_dimension))
with ops.name_scope(None, "centered_bias", (labels, logits)):
centered_bias_loss = math_ops.reduce_mean(
loss_fn(labels, logits, weights), name="training_loss")
# Learn central bias by an optimizer. 0.1 is a convervative lr for a
# single variable.
return training.AdagradOptimizer(0.1).minimize(
centered_bias_loss, var_list=(centered_bias,), name=name)
def _summary_key(head_name, val):
return "%s/%s" % (val, head_name) if head_name else val
def _train_op(loss, labels, train_op_fn, centered_bias, batch_size, loss_fn,
weights):
"""Returns op for the training step."""
if centered_bias is not None:
centered_bias_step = _centered_bias_step(
centered_bias=centered_bias,
batch_size=batch_size,
labels=labels,
loss_fn=loss_fn,
weights=weights)
else:
centered_bias_step = None
with ops.name_scope(None, "train_op", (loss, labels)):
train_op = train_op_fn(loss)
if centered_bias_step is not None:
train_op = control_flow_ops.group(train_op, centered_bias_step)
return train_op
def _sigmoid_cross_entropy_loss(labels, logits, weights=None):
with ops.name_scope(None, "sigmoid_cross_entropy_loss",
(logits, labels)) as name:
# sigmoid_cross_entropy_with_logits requires [batch_size, n_classes] labels.
loss = nn.sigmoid_cross_entropy_with_logits(
labels=math_ops.to_float(labels), logits=logits, name=name)
return _compute_weighted_loss(loss, weights)
def _float_weights_or_none(weights):
if weights is None:
return None
with ops.name_scope(None, "float_weights", (weights,)) as name:
return math_ops.to_float(weights, name=name)
def _indicator_labels_streaming_mean(labels, weights=None, class_id=None):
labels = math_ops.to_float(labels)
weights = _float_weights_or_none(weights)
if weights is not None:
weights = weights_broadcast_ops.broadcast_weights(weights, labels)
if class_id is not None:
if weights is not None:
weights = weights[:, class_id]
labels = labels[:, class_id]
return metrics_lib.mean(labels, weights)
def _predictions_streaming_mean(predictions,
weights=None,
class_id=None):
predictions = math_ops.to_float(predictions)
weights = _float_weights_or_none(weights)
if weights is not None:
weights = weights_broadcast_ops.broadcast_weights(weights, predictions)
if class_id is not None:
if weights is not None:
weights = weights[:, class_id]
predictions = predictions[:, class_id]
return metrics_lib.mean(predictions, weights)
# TODO(ptucker): Add support for SparseTensor labels.
def _class_id_labels_to_indicator(labels, num_classes):
if (num_classes is None) or (num_classes < 2):
raise ValueError("Invalid num_classes %s." % num_classes)
with ops.control_dependencies((_assert_labels_rank(labels),)):
labels = array_ops.reshape(labels, (-1,))
return array_ops.one_hot(labels, depth=num_classes, axis=-1)
def _class_predictions_streaming_mean(predictions, weights, class_id):
return metrics_lib.mean(
array_ops.where(
math_ops.equal(
math_ops.to_int32(class_id), math_ops.to_int32(predictions)),
array_ops.ones_like(predictions),
array_ops.zeros_like(predictions)),
weights=weights)
def _class_labels_streaming_mean(labels, weights, class_id):
return metrics_lib.mean(
array_ops.where(
math_ops.equal(
math_ops.to_int32(class_id), math_ops.to_int32(labels)),
array_ops.ones_like(labels), array_ops.zeros_like(labels)),
weights=weights)
def _streaming_auc(predictions, labels, weights=None, class_id=None,
curve="ROC"):
# pylint: disable=missing-docstring
predictions = math_ops.to_float(predictions)
if labels.dtype.base_dtype != dtypes.bool:
logging.warning("Casting %s labels to bool.", labels.dtype)
labels = math_ops.cast(labels, dtypes.bool)
weights = _float_weights_or_none(weights)
if weights is not None:
weights = weights_broadcast_ops.broadcast_weights(weights, predictions)
if class_id is not None:
if weights is not None:
weights = weights[:, class_id]
predictions = predictions[:, class_id]
labels = labels[:, class_id]
return metrics_lib.auc(labels, predictions, weights, curve=curve)
def _assert_class_id(class_id, num_classes=None):
"""Average label value for class `class_id`."""
if (class_id is None) or (class_id < 0):
raise ValueError("Invalid class_id %s." % class_id)
if num_classes is not None:
if num_classes < 2:
raise ValueError("Invalid num_classes %s." % num_classes)
if class_id >= num_classes:
raise ValueError("Invalid class_id %s." % class_id)
def _streaming_accuracy_at_threshold(predictions, labels, weights, threshold):
threshold_predictions = math_ops.to_float(
math_ops.greater_equal(predictions, threshold))
return metrics_lib.accuracy(labels, threshold_predictions, weights)
def _streaming_precision_at_threshold(predictions, labels, weights, threshold):
precision_tensor, update_op = metrics_lib.precision_at_thresholds(
labels, predictions, (threshold,), _float_weights_or_none(weights))
return array_ops.squeeze(precision_tensor), array_ops.squeeze(update_op)
def _streaming_recall_at_threshold(predictions, labels, weights, threshold):
precision_tensor, update_op = metrics_lib.recall_at_thresholds(
labels, predictions, (threshold,), _float_weights_or_none(weights))
return array_ops.squeeze(precision_tensor), array_ops.squeeze(update_op)
def _classification_output_alternatives(head_name, problem_type,
label_keys=None):
"""Creates a func to generate output alternatives for classification.
Servo expects classes to be a string tensor, and have the same dimensions
as the probabilities tensor. It should contain the labels of the corresponding
entries in probabilities. This function creates a new classes tensor that
satisfies these conditions and can be exported.
Args:
head_name: Name of the head.
problem_type: `ProblemType`
label_keys: Optional label keys
Returns:
A function to generate output alternatives.
"""
def _create_output_alternatives(predictions):
"""Creates output alternative for the Head.
Args:
predictions: a dict of {tensor_name: Tensor}, where 'tensor_name' is a
symbolic name for an output Tensor possibly but not necessarily taken
from `PredictionKey`, and 'Tensor' is the corresponding output Tensor
itself.
Returns:
`dict` of {submodel_name: (problem_type, {tensor_name: Tensor})}, where
'submodel_name' is a submodel identifier that should be consistent across
the pipeline (here likely taken from the head_name),
'problem_type' is a `ProblemType`,
'tensor_name' is a symbolic name for an output Tensor possibly but not
necessarily taken from `PredictionKey`, and
'Tensor' is the corresponding output Tensor itself.
Raises:
ValueError: if predictions does not have PredictionKey.PROBABILITIES key.
"""
probabilities = predictions.get(prediction_key.PredictionKey.PROBABILITIES)
if probabilities is None:
raise ValueError("%s missing in predictions" %
prediction_key.PredictionKey.PROBABILITIES)
with ops.name_scope(None, "_classification_output_alternatives",
(probabilities,)):
batch_size = array_ops.shape(probabilities)[0]
if label_keys:
classes = array_ops.tile(
input=array_ops.expand_dims(input=label_keys, axis=0),
multiples=[batch_size, 1],
name="classes_tensor")
else:
n = array_ops.shape(probabilities)[1]
classes = array_ops.tile(
input=array_ops.expand_dims(input=math_ops.range(n), axis=0),
multiples=[batch_size, 1])
classes = string_ops.as_string(classes, name="classes_tensor")
exported_predictions = {
prediction_key.PredictionKey.PROBABILITIES: probabilities,
prediction_key.PredictionKey.CLASSES: classes}
return {head_name: (problem_type, exported_predictions)}
return _create_output_alternatives
# Aliases
# TODO(zakaria): Remove these aliases, See b/34751732
_regression_head = regression_head
_poisson_regression_head = poisson_regression_head
_multi_class_head = multi_class_head
_binary_svm_head = binary_svm_head
_multi_label_head = multi_label_head
_multi_head = multi_head
_Head = Head
| apache-2.0 |
ehashman/oh-mainline | vendor/packages/Django/tests/regressiontests/generic_views/urls.py | 41 | 12119 | from __future__ import absolute_import
from django.conf.urls import patterns, url
from django.views.decorators.cache import cache_page
from django.views.generic import TemplateView
from . import models
from . import views
urlpatterns = patterns('',
# base
#(r'^about/login-required/$',
# views.DecoratedAboutView()),
# TemplateView
(r'^template/no_template/$',
TemplateView.as_view()),
(r'^template/simple/(?P<foo>\w+)/$',
TemplateView.as_view(template_name='generic_views/about.html')),
(r'^template/custom/(?P<foo>\w+)/$',
views.CustomTemplateView.as_view(template_name='generic_views/about.html')),
(r'^template/content_type/$',
TemplateView.as_view(template_name='generic_views/robots.txt', content_type='text/plain')),
(r'^template/cached/(?P<foo>\w+)/$',
cache_page(2.0)(TemplateView.as_view(template_name='generic_views/about.html'))),
# DetailView
(r'^detail/obj/$',
views.ObjectDetail.as_view()),
url(r'^detail/artist/(?P<pk>\d+)/$',
views.ArtistDetail.as_view(),
name="artist_detail"),
url(r'^detail/author/(?P<pk>\d+)/$',
views.AuthorDetail.as_view(),
name="author_detail"),
(r'^detail/author/bycustompk/(?P<foo>\d+)/$',
views.AuthorDetail.as_view(pk_url_kwarg='foo')),
(r'^detail/author/byslug/(?P<slug>[\w-]+)/$',
views.AuthorDetail.as_view()),
(r'^detail/author/bycustomslug/(?P<foo>[\w-]+)/$',
views.AuthorDetail.as_view(slug_url_kwarg='foo')),
(r'^detail/author/(?P<pk>\d+)/template_name_suffix/$',
views.AuthorDetail.as_view(template_name_suffix='_view')),
(r'^detail/author/(?P<pk>\d+)/template_name/$',
views.AuthorDetail.as_view(template_name='generic_views/about.html')),
(r'^detail/author/(?P<pk>\d+)/context_object_name/$',
views.AuthorDetail.as_view(context_object_name='thingy')),
(r'^detail/author/(?P<pk>\d+)/dupe_context_object_name/$',
views.AuthorDetail.as_view(context_object_name='object')),
(r'^detail/page/(?P<pk>\d+)/field/$',
views.PageDetail.as_view()),
(r'^detail/author/invalid/url/$',
views.AuthorDetail.as_view()),
(r'^detail/author/invalid/qs/$',
views.AuthorDetail.as_view(queryset=None)),
(r'^detail/nonmodel/1/$',
views.NonModelDetail.as_view()),
# FormView
(r'^contact/$',
views.ContactView.as_view()),
# Create/UpdateView
(r'^edit/artists/create/$',
views.ArtistCreate.as_view()),
(r'^edit/artists/(?P<pk>\d+)/update/$',
views.ArtistUpdate.as_view()),
(r'^edit/authors/create/naive/$',
views.NaiveAuthorCreate.as_view()),
(r'^edit/authors/create/redirect/$',
views.NaiveAuthorCreate.as_view(success_url='/edit/authors/create/')),
(r'^edit/authors/create/interpolate_redirect/$',
views.NaiveAuthorCreate.as_view(success_url='/edit/author/%(id)d/update/')),
(r'^edit/authors/create/restricted/$',
views.AuthorCreateRestricted.as_view()),
(r'^edit/authors/create/$',
views.AuthorCreate.as_view()),
(r'^edit/authors/create/special/$',
views.SpecializedAuthorCreate.as_view()),
(r'^edit/author/(?P<pk>\d+)/update/naive/$',
views.NaiveAuthorUpdate.as_view()),
(r'^edit/author/(?P<pk>\d+)/update/redirect/$',
views.NaiveAuthorUpdate.as_view(success_url='/edit/authors/create/')),
(r'^edit/author/(?P<pk>\d+)/update/interpolate_redirect/$',
views.NaiveAuthorUpdate.as_view(success_url='/edit/author/%(id)d/update/')),
(r'^edit/author/(?P<pk>\d+)/update/$',
views.AuthorUpdate.as_view()),
(r'^edit/author/update/$',
views.OneAuthorUpdate.as_view()),
(r'^edit/author/(?P<pk>\d+)/update/special/$',
views.SpecializedAuthorUpdate.as_view()),
(r'^edit/author/(?P<pk>\d+)/delete/naive/$',
views.NaiveAuthorDelete.as_view()),
(r'^edit/author/(?P<pk>\d+)/delete/redirect/$',
views.NaiveAuthorDelete.as_view(success_url='/edit/authors/create/')),
(r'^edit/author/(?P<pk>\d+)/delete/$',
views.AuthorDelete.as_view()),
(r'^edit/author/(?P<pk>\d+)/delete/special/$',
views.SpecializedAuthorDelete.as_view()),
# ArchiveIndexView
(r'^dates/books/$',
views.BookArchive.as_view()),
(r'^dates/books/context_object_name/$',
views.BookArchive.as_view(context_object_name='thingies')),
(r'^dates/books/allow_empty/$',
views.BookArchive.as_view(allow_empty=True)),
(r'^dates/books/template_name/$',
views.BookArchive.as_view(template_name='generic_views/list.html')),
(r'^dates/books/template_name_suffix/$',
views.BookArchive.as_view(template_name_suffix='_detail')),
(r'^dates/books/invalid/$',
views.BookArchive.as_view(queryset=None)),
(r'^dates/books/paginated/$',
views.BookArchive.as_view(paginate_by=10)),
(r'^dates/books/reverse/$',
views.BookArchive.as_view(queryset=models.Book.objects.order_by('pubdate'))),
(r'^dates/books/by_month/$',
views.BookArchive.as_view(date_list_period='month')),
(r'^dates/booksignings/$',
views.BookSigningArchive.as_view()),
# ListView
(r'^list/dict/$',
views.DictList.as_view()),
(r'^list/dict/paginated/$',
views.DictList.as_view(paginate_by=1)),
url(r'^list/artists/$',
views.ArtistList.as_view(),
name="artists_list"),
url(r'^list/authors/$',
views.AuthorList.as_view(),
name="authors_list"),
(r'^list/authors/paginated/$',
views.AuthorList.as_view(paginate_by=30)),
(r'^list/authors/paginated/(?P<page>\d+)/$',
views.AuthorList.as_view(paginate_by=30)),
(r'^list/authors/notempty/$',
views.AuthorList.as_view(allow_empty=False)),
(r'^list/authors/notempty/paginated/$',
views.AuthorList.as_view(allow_empty=False, paginate_by=2)),
(r'^list/authors/template_name/$',
views.AuthorList.as_view(template_name='generic_views/list.html')),
(r'^list/authors/template_name_suffix/$',
views.AuthorList.as_view(template_name_suffix='_objects')),
(r'^list/authors/context_object_name/$',
views.AuthorList.as_view(context_object_name='author_list')),
(r'^list/authors/dupe_context_object_name/$',
views.AuthorList.as_view(context_object_name='object_list')),
(r'^list/authors/invalid/$',
views.AuthorList.as_view(queryset=None)),
(r'^list/authors/paginated/custom_class/$',
views.AuthorList.as_view(paginate_by=5, paginator_class=views.CustomPaginator)),
(r'^list/authors/paginated/custom_page_kwarg/$',
views.AuthorList.as_view(paginate_by=30, page_kwarg='pagina')),
(r'^list/authors/paginated/custom_constructor/$',
views.AuthorListCustomPaginator.as_view()),
# YearArchiveView
# Mixing keyword and possitional captures below is intentional; the views
# ought to be able to accept either.
(r'^dates/books/(?P<year>\d{4})/$',
views.BookYearArchive.as_view()),
(r'^dates/books/(?P<year>\d{4})/make_object_list/$',
views.BookYearArchive.as_view(make_object_list=True)),
(r'^dates/books/(?P<year>\d{4})/allow_empty/$',
views.BookYearArchive.as_view(allow_empty=True)),
(r'^dates/books/(?P<year>\d{4})/allow_future/$',
views.BookYearArchive.as_view(allow_future=True)),
(r'^dates/books/(?P<year>\d{4})/paginated/$',
views.BookYearArchive.as_view(make_object_list=True, paginate_by=30)),
(r'^dates/books/no_year/$',
views.BookYearArchive.as_view()),
(r'^dates/books/(?P<year>\d{4})/reverse/$',
views.BookYearArchive.as_view(queryset=models.Book.objects.order_by('pubdate'))),
(r'^dates/booksignings/(?P<year>\d{4})/$',
views.BookSigningYearArchive.as_view()),
# MonthArchiveView
(r'^dates/books/(?P<year>\d{4})/(?P<month>[a-z]{3})/$',
views.BookMonthArchive.as_view()),
(r'^dates/books/(?P<year>\d{4})/(?P<month>\d{1,2})/$',
views.BookMonthArchive.as_view(month_format='%m')),
(r'^dates/books/(?P<year>\d{4})/(?P<month>[a-z]{3})/allow_empty/$',
views.BookMonthArchive.as_view(allow_empty=True)),
(r'^dates/books/(?P<year>\d{4})/(?P<month>[a-z]{3})/allow_future/$',
views.BookMonthArchive.as_view(allow_future=True)),
(r'^dates/books/(?P<year>\d{4})/(?P<month>[a-z]{3})/paginated/$',
views.BookMonthArchive.as_view(paginate_by=30)),
(r'^dates/books/(?P<year>\d{4})/no_month/$',
views.BookMonthArchive.as_view()),
(r'^dates/booksignings/(?P<year>\d{4})/(?P<month>[a-z]{3})/$',
views.BookSigningMonthArchive.as_view()),
# WeekArchiveView
(r'^dates/books/(?P<year>\d{4})/week/(?P<week>\d{1,2})/$',
views.BookWeekArchive.as_view()),
(r'^dates/books/(?P<year>\d{4})/week/(?P<week>\d{1,2})/allow_empty/$',
views.BookWeekArchive.as_view(allow_empty=True)),
(r'^dates/books/(?P<year>\d{4})/week/(?P<week>\d{1,2})/allow_future/$',
views.BookWeekArchive.as_view(allow_future=True)),
(r'^dates/books/(?P<year>\d{4})/week/(?P<week>\d{1,2})/paginated/$',
views.BookWeekArchive.as_view(paginate_by=30)),
(r'^dates/books/(?P<year>\d{4})/week/no_week/$',
views.BookWeekArchive.as_view()),
(r'^dates/books/(?P<year>\d{4})/week/(?P<week>\d{1,2})/monday/$',
views.BookWeekArchive.as_view(week_format='%W')),
(r'^dates/booksignings/(?P<year>\d{4})/week/(?P<week>\d{1,2})/$',
views.BookSigningWeekArchive.as_view()),
# DayArchiveView
(r'^dates/books/(?P<year>\d{4})/(?P<month>[a-z]{3})/(?P<day>\d{1,2})/$',
views.BookDayArchive.as_view()),
(r'^dates/books/(?P<year>\d{4})/(?P<month>\d{1,2})/(?P<day>\d{1,2})/$',
views.BookDayArchive.as_view(month_format='%m')),
(r'^dates/books/(?P<year>\d{4})/(?P<month>[a-z]{3})/(?P<day>\d{1,2})/allow_empty/$',
views.BookDayArchive.as_view(allow_empty=True)),
(r'^dates/books/(?P<year>\d{4})/(?P<month>[a-z]{3})/(?P<day>\d{1,2})/allow_future/$',
views.BookDayArchive.as_view(allow_future=True)),
(r'^dates/books/(?P<year>\d{4})/(?P<month>[a-z]{3})/(?P<day>\d{1,2})/allow_empty_and_future/$',
views.BookDayArchive.as_view(allow_empty=True, allow_future=True)),
(r'^dates/books/(?P<year>\d{4})/(?P<month>[a-z]{3})/(?P<day>\d{1,2})/paginated/$',
views.BookDayArchive.as_view(paginate_by=True)),
(r'^dates/books/(?P<year>\d{4})/(?P<month>[a-z]{3})/no_day/$',
views.BookDayArchive.as_view()),
(r'^dates/booksignings/(?P<year>\d{4})/(?P<month>[a-z]{3})/(?P<day>\d{1,2})/$',
views.BookSigningDayArchive.as_view()),
# TodayArchiveView
(r'^dates/books/today/$',
views.BookTodayArchive.as_view()),
(r'^dates/books/today/allow_empty/$',
views.BookTodayArchive.as_view(allow_empty=True)),
(r'^dates/booksignings/today/$',
views.BookSigningTodayArchive.as_view()),
# DateDetailView
(r'^dates/books/(?P<year>\d{4})/(?P<month>[a-z]{3})/(?P<day>\d{1,2})/(?P<pk>\d+)/$',
views.BookDetail.as_view()),
(r'^dates/books/(?P<year>\d{4})/(?P<month>\d{1,2})/(?P<day>\d{1,2})/(?P<pk>\d+)/$',
views.BookDetail.as_view(month_format='%m')),
(r'^dates/books/(?P<year>\d{4})/(?P<month>[a-z]{3})/(?P<day>\d{1,2})/(?P<pk>\d+)/allow_future/$',
views.BookDetail.as_view(allow_future=True)),
(r'^dates/books/(?P<year>\d{4})/(?P<month>[a-z]{3})/(?P<day>\d{1,2})/nopk/$',
views.BookDetail.as_view()),
(r'^dates/books/(?P<year>\d{4})/(?P<month>[a-z]{3})/(?P<day>\d{1,2})/byslug/(?P<slug>[\w-]+)/$',
views.BookDetail.as_view()),
(r'^dates/books/get_object_custom_queryset/(?P<year>\d{4})/(?P<month>[a-z]{3})/(?P<day>\d{1,2})/(?P<pk>\d+)/$',
views.BookDetailGetObjectCustomQueryset.as_view()),
(r'^dates/booksignings/(?P<year>\d{4})/(?P<month>[a-z]{3})/(?P<day>\d{1,2})/(?P<pk>\d+)/$',
views.BookSigningDetail.as_view()),
# Useful for testing redirects
(r'^accounts/login/$', 'django.contrib.auth.views.login')
)
| agpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.