repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
dlozeve/reveal_CommunityDetection | node_modules/node-gyp/gyp/pylib/gyp/MSVSUserFile.py | 2710 | 5094 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Visual Studio user preferences file writer."""
import os
import re
import socket # for gethostname
import gyp.common
import gyp.easy_xml as easy_xml
#------------------------------------------------------------------------------
def _FindCommandInPath(command):
"""If there are no slashes in the command given, this function
searches the PATH env to find the given command, and converts it
to an absolute path. We have to do this because MSVS is looking
for an actual file to launch a debugger on, not just a command
line. Note that this happens at GYP time, so anything needing to
be built needs to have a full path."""
if '/' in command or '\\' in command:
# If the command already has path elements (either relative or
# absolute), then assume it is constructed properly.
return command
else:
# Search through the path list and find an existing file that
# we can access.
paths = os.environ.get('PATH','').split(os.pathsep)
for path in paths:
item = os.path.join(path, command)
if os.path.isfile(item) and os.access(item, os.X_OK):
return item
return command
def _QuoteWin32CommandLineArgs(args):
new_args = []
for arg in args:
# Replace all double-quotes with double-double-quotes to escape
# them for cmd shell, and then quote the whole thing if there
# are any.
if arg.find('"') != -1:
arg = '""'.join(arg.split('"'))
arg = '"%s"' % arg
# Otherwise, if there are any spaces, quote the whole arg.
elif re.search(r'[ \t\n]', arg):
arg = '"%s"' % arg
new_args.append(arg)
return new_args
class Writer(object):
"""Visual Studio XML user user file writer."""
def __init__(self, user_file_path, version, name):
"""Initializes the user file.
Args:
user_file_path: Path to the user file.
version: Version info.
name: Name of the user file.
"""
self.user_file_path = user_file_path
self.version = version
self.name = name
self.configurations = {}
def AddConfig(self, name):
"""Adds a configuration to the project.
Args:
name: Configuration name.
"""
self.configurations[name] = ['Configuration', {'Name': name}]
def AddDebugSettings(self, config_name, command, environment = {},
working_directory=""):
"""Adds a DebugSettings node to the user file for a particular config.
Args:
command: command line to run. First element in the list is the
executable. All elements of the command will be quoted if
necessary.
working_directory: other files which may trigger the rule. (optional)
"""
command = _QuoteWin32CommandLineArgs(command)
abs_command = _FindCommandInPath(command[0])
if environment and isinstance(environment, dict):
env_list = ['%s="%s"' % (key, val)
for (key,val) in environment.iteritems()]
environment = ' '.join(env_list)
else:
environment = ''
n_cmd = ['DebugSettings',
{'Command': abs_command,
'WorkingDirectory': working_directory,
'CommandArguments': " ".join(command[1:]),
'RemoteMachine': socket.gethostname(),
'Environment': environment,
'EnvironmentMerge': 'true',
# Currently these are all "dummy" values that we're just setting
# in the default manner that MSVS does it. We could use some of
# these to add additional capabilities, I suppose, but they might
# not have parity with other platforms then.
'Attach': 'false',
'DebuggerType': '3', # 'auto' debugger
'Remote': '1',
'RemoteCommand': '',
'HttpUrl': '',
'PDBPath': '',
'SQLDebugging': '',
'DebuggerFlavor': '0',
'MPIRunCommand': '',
'MPIRunArguments': '',
'MPIRunWorkingDirectory': '',
'ApplicationCommand': '',
'ApplicationArguments': '',
'ShimCommand': '',
'MPIAcceptMode': '',
'MPIAcceptFilter': ''
}]
# Find the config, and add it if it doesn't exist.
if config_name not in self.configurations:
self.AddConfig(config_name)
# Add the DebugSettings onto the appropriate config.
self.configurations[config_name].append(n_cmd)
def WriteIfChanged(self):
"""Writes the user file."""
configs = ['Configurations']
for config, spec in sorted(self.configurations.iteritems()):
configs.append(spec)
content = ['VisualStudioUserFile',
{'Version': self.version.ProjectVersion(),
'Name': self.name
},
configs]
easy_xml.WriteXmlIfChanged(content, self.user_file_path,
encoding="Windows-1252")
| mit |
ifesdjeen/cassandra | pylib/cqlshlib/wcwidth.py | 20 | 15875 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# adapted from http://www.cl.cam.ac.uk/~mgk25/ucs/wcwidth.c
# -thepaul
# This is an implementation of wcwidth() and wcswidth() (defined in
# IEEE Std 1002.1-2001) for Unicode.
#
# http://www.opengroup.org/onlinepubs/007904975/functions/wcwidth.html
# http://www.opengroup.org/onlinepubs/007904975/functions/wcswidth.html
#
# In fixed-width output devices, Latin characters all occupy a single
# "cell" position of equal width, whereas ideographic CJK characters
# occupy two such cells. Interoperability between terminal-line
# applications and (teletype-style) character terminals using the
# UTF-8 encoding requires agreement on which character should advance
# the cursor by how many cell positions. No established formal
# standards exist at present on which Unicode character shall occupy
# how many cell positions on character terminals. These routines are
# a first attempt of defining such behavior based on simple rules
# applied to data provided by the Unicode Consortium.
#
# For some graphical characters, the Unicode standard explicitly
# defines a character-cell width via the definition of the East Asian
# FullWidth (F), Wide (W), Half-width (H), and Narrow (Na) classes.
# In all these cases, there is no ambiguity about which width a
# terminal shall use. For characters in the East Asian Ambiguous (A)
# class, the width choice depends purely on a preference of backward
# compatibility with either historic CJK or Western practice.
# Choosing single-width for these characters is easy to justify as
# the appropriate long-term solution, as the CJK practice of
# displaying these characters as double-width comes from historic
# implementation simplicity (8-bit encoded characters were displayed
# single-width and 16-bit ones double-width, even for Greek,
# Cyrillic, etc.) and not any typographic considerations.
#
# Much less clear is the choice of width for the Not East Asian
# (Neutral) class. Existing practice does not dictate a width for any
# of these characters. It would nevertheless make sense
# typographically to allocate two character cells to characters such
# as for instance EM SPACE or VOLUME INTEGRAL, which cannot be
# represented adequately with a single-width glyph. The following
# routines at present merely assign a single-cell width to all
# neutral characters, in the interest of simplicity. This is not
# entirely satisfactory and should be reconsidered before
# establishing a formal standard in this area. At the moment, the
# decision which Not East Asian (Neutral) characters should be
# represented by double-width glyphs cannot yet be answered by
# applying a simple rule from the Unicode database content. Setting
# up a proper standard for the behavior of UTF-8 character terminals
# will require a careful analysis not only of each Unicode character,
# but also of each presentation form, something the author of these
# routines has avoided to do so far.
#
# http://www.unicode.org/unicode/reports/tr11/
#
# Markus Kuhn -- 2007-05-26 (Unicode 5.0)
#
# Permission to use, copy, modify, and distribute this software
# for any purpose and without fee is hereby granted. The author
# disclaims all warranties with regard to this software.
#
# Latest C version: http://www.cl.cam.ac.uk/~mgk25/ucs/wcwidth.c
# auxiliary function for binary search in interval table
def bisearch(ucs, table):
min = 0
max = len(table) - 1
if ucs < table[0][0] or ucs > table[max][1]:
return 0
while max >= min:
mid = (min + max) / 2
if ucs > table[mid][1]:
min = mid + 1
elif ucs < table[mid][0]:
max = mid - 1
else:
return 1
return 0
# The following two functions define the column width of an ISO 10646
# character as follows:
#
# - The null character (U+0000) has a column width of 0.
#
# - Other C0/C1 control characters and DEL will lead to a return
# value of -1.
#
# - Non-spacing and enclosing combining characters (general
# category code Mn or Me in the Unicode database) have a
# column width of 0.
#
# - SOFT HYPHEN (U+00AD) has a column width of 1.
#
# - Other format characters (general category code Cf in the Unicode
# database) and ZERO WIDTH SPACE (U+200B) have a column width of 0.
#
# - Hangul Jamo medial vowels and final consonants (U+1160-U+11FF)
# have a column width of 0.
#
# - Spacing characters in the East Asian Wide (W) or East Asian
# Full-width (F) category as defined in Unicode Technical
# Report #11 have a column width of 2.
#
# - All remaining characters (including all printable
# ISO 8859-1 and WGL4 characters, Unicode control characters,
# etc.) have a column width of 1.
#
# This implementation assumes that wchar_t characters are encoded
# in ISO 10646.
# sorted list of non-overlapping intervals of non-spacing characters
# generated by "uniset +cat=Me +cat=Mn +cat=Cf -00AD +1160-11FF +200B c"
combining = (
(0x0300, 0x036F), (0x0483, 0x0486), (0x0488, 0x0489),
(0x0591, 0x05BD), (0x05BF, 0x05BF), (0x05C1, 0x05C2),
(0x05C4, 0x05C5), (0x05C7, 0x05C7), (0x0600, 0x0603),
(0x0610, 0x0615), (0x064B, 0x065E), (0x0670, 0x0670),
(0x06D6, 0x06E4), (0x06E7, 0x06E8), (0x06EA, 0x06ED),
(0x070F, 0x070F), (0x0711, 0x0711), (0x0730, 0x074A),
(0x07A6, 0x07B0), (0x07EB, 0x07F3), (0x0901, 0x0902),
(0x093C, 0x093C), (0x0941, 0x0948), (0x094D, 0x094D),
(0x0951, 0x0954), (0x0962, 0x0963), (0x0981, 0x0981),
(0x09BC, 0x09BC), (0x09C1, 0x09C4), (0x09CD, 0x09CD),
(0x09E2, 0x09E3), (0x0A01, 0x0A02), (0x0A3C, 0x0A3C),
(0x0A41, 0x0A42), (0x0A47, 0x0A48), (0x0A4B, 0x0A4D),
(0x0A70, 0x0A71), (0x0A81, 0x0A82), (0x0ABC, 0x0ABC),
(0x0AC1, 0x0AC5), (0x0AC7, 0x0AC8), (0x0ACD, 0x0ACD),
(0x0AE2, 0x0AE3), (0x0B01, 0x0B01), (0x0B3C, 0x0B3C),
(0x0B3F, 0x0B3F), (0x0B41, 0x0B43), (0x0B4D, 0x0B4D),
(0x0B56, 0x0B56), (0x0B82, 0x0B82), (0x0BC0, 0x0BC0),
(0x0BCD, 0x0BCD), (0x0C3E, 0x0C40), (0x0C46, 0x0C48),
(0x0C4A, 0x0C4D), (0x0C55, 0x0C56), (0x0CBC, 0x0CBC),
(0x0CBF, 0x0CBF), (0x0CC6, 0x0CC6), (0x0CCC, 0x0CCD),
(0x0CE2, 0x0CE3), (0x0D41, 0x0D43), (0x0D4D, 0x0D4D),
(0x0DCA, 0x0DCA), (0x0DD2, 0x0DD4), (0x0DD6, 0x0DD6),
(0x0E31, 0x0E31), (0x0E34, 0x0E3A), (0x0E47, 0x0E4E),
(0x0EB1, 0x0EB1), (0x0EB4, 0x0EB9), (0x0EBB, 0x0EBC),
(0x0EC8, 0x0ECD), (0x0F18, 0x0F19), (0x0F35, 0x0F35),
(0x0F37, 0x0F37), (0x0F39, 0x0F39), (0x0F71, 0x0F7E),
(0x0F80, 0x0F84), (0x0F86, 0x0F87), (0x0F90, 0x0F97),
(0x0F99, 0x0FBC), (0x0FC6, 0x0FC6), (0x102D, 0x1030),
(0x1032, 0x1032), (0x1036, 0x1037), (0x1039, 0x1039),
(0x1058, 0x1059), (0x1160, 0x11FF), (0x135F, 0x135F),
(0x1712, 0x1714), (0x1732, 0x1734), (0x1752, 0x1753),
(0x1772, 0x1773), (0x17B4, 0x17B5), (0x17B7, 0x17BD),
(0x17C6, 0x17C6), (0x17C9, 0x17D3), (0x17DD, 0x17DD),
(0x180B, 0x180D), (0x18A9, 0x18A9), (0x1920, 0x1922),
(0x1927, 0x1928), (0x1932, 0x1932), (0x1939, 0x193B),
(0x1A17, 0x1A18), (0x1B00, 0x1B03), (0x1B34, 0x1B34),
(0x1B36, 0x1B3A), (0x1B3C, 0x1B3C), (0x1B42, 0x1B42),
(0x1B6B, 0x1B73), (0x1DC0, 0x1DCA), (0x1DFE, 0x1DFF),
(0x200B, 0x200F), (0x202A, 0x202E), (0x2060, 0x2063),
(0x206A, 0x206F), (0x20D0, 0x20EF), (0x302A, 0x302F),
(0x3099, 0x309A), (0xA806, 0xA806), (0xA80B, 0xA80B),
(0xA825, 0xA826), (0xFB1E, 0xFB1E), (0xFE00, 0xFE0F),
(0xFE20, 0xFE23), (0xFEFF, 0xFEFF), (0xFFF9, 0xFFFB),
(0x10A01, 0x10A03), (0x10A05, 0x10A06), (0x10A0C, 0x10A0F),
(0x10A38, 0x10A3A), (0x10A3F, 0x10A3F), (0x1D167, 0x1D169),
(0x1D173, 0x1D182), (0x1D185, 0x1D18B), (0x1D1AA, 0x1D1AD),
(0x1D242, 0x1D244), (0xE0001, 0xE0001), (0xE0020, 0xE007F),
(0xE0100, 0xE01EF)
)
# sorted list of non-overlapping intervals of East Asian Ambiguous
# characters, generated by "uniset +WIDTH-A -cat=Me -cat=Mn -cat=Cf c"
ambiguous = (
(0x00A1, 0x00A1), (0x00A4, 0x00A4), (0x00A7, 0x00A8),
(0x00AA, 0x00AA), (0x00AE, 0x00AE), (0x00B0, 0x00B4),
(0x00B6, 0x00BA), (0x00BC, 0x00BF), (0x00C6, 0x00C6),
(0x00D0, 0x00D0), (0x00D7, 0x00D8), (0x00DE, 0x00E1),
(0x00E6, 0x00E6), (0x00E8, 0x00EA), (0x00EC, 0x00ED),
(0x00F0, 0x00F0), (0x00F2, 0x00F3), (0x00F7, 0x00FA),
(0x00FC, 0x00FC), (0x00FE, 0x00FE), (0x0101, 0x0101),
(0x0111, 0x0111), (0x0113, 0x0113), (0x011B, 0x011B),
(0x0126, 0x0127), (0x012B, 0x012B), (0x0131, 0x0133),
(0x0138, 0x0138), (0x013F, 0x0142), (0x0144, 0x0144),
(0x0148, 0x014B), (0x014D, 0x014D), (0x0152, 0x0153),
(0x0166, 0x0167), (0x016B, 0x016B), (0x01CE, 0x01CE),
(0x01D0, 0x01D0), (0x01D2, 0x01D2), (0x01D4, 0x01D4),
(0x01D6, 0x01D6), (0x01D8, 0x01D8), (0x01DA, 0x01DA),
(0x01DC, 0x01DC), (0x0251, 0x0251), (0x0261, 0x0261),
(0x02C4, 0x02C4), (0x02C7, 0x02C7), (0x02C9, 0x02CB),
(0x02CD, 0x02CD), (0x02D0, 0x02D0), (0x02D8, 0x02DB),
(0x02DD, 0x02DD), (0x02DF, 0x02DF), (0x0391, 0x03A1),
(0x03A3, 0x03A9), (0x03B1, 0x03C1), (0x03C3, 0x03C9),
(0x0401, 0x0401), (0x0410, 0x044F), (0x0451, 0x0451),
(0x2010, 0x2010), (0x2013, 0x2016), (0x2018, 0x2019),
(0x201C, 0x201D), (0x2020, 0x2022), (0x2024, 0x2027),
(0x2030, 0x2030), (0x2032, 0x2033), (0x2035, 0x2035),
(0x203B, 0x203B), (0x203E, 0x203E), (0x2074, 0x2074),
(0x207F, 0x207F), (0x2081, 0x2084), (0x20AC, 0x20AC),
(0x2103, 0x2103), (0x2105, 0x2105), (0x2109, 0x2109),
(0x2113, 0x2113), (0x2116, 0x2116), (0x2121, 0x2122),
(0x2126, 0x2126), (0x212B, 0x212B), (0x2153, 0x2154),
(0x215B, 0x215E), (0x2160, 0x216B), (0x2170, 0x2179),
(0x2190, 0x2199), (0x21B8, 0x21B9), (0x21D2, 0x21D2),
(0x21D4, 0x21D4), (0x21E7, 0x21E7), (0x2200, 0x2200),
(0x2202, 0x2203), (0x2207, 0x2208), (0x220B, 0x220B),
(0x220F, 0x220F), (0x2211, 0x2211), (0x2215, 0x2215),
(0x221A, 0x221A), (0x221D, 0x2220), (0x2223, 0x2223),
(0x2225, 0x2225), (0x2227, 0x222C), (0x222E, 0x222E),
(0x2234, 0x2237), (0x223C, 0x223D), (0x2248, 0x2248),
(0x224C, 0x224C), (0x2252, 0x2252), (0x2260, 0x2261),
(0x2264, 0x2267), (0x226A, 0x226B), (0x226E, 0x226F),
(0x2282, 0x2283), (0x2286, 0x2287), (0x2295, 0x2295),
(0x2299, 0x2299), (0x22A5, 0x22A5), (0x22BF, 0x22BF),
(0x2312, 0x2312), (0x2460, 0x24E9), (0x24EB, 0x254B),
(0x2550, 0x2573), (0x2580, 0x258F), (0x2592, 0x2595),
(0x25A0, 0x25A1), (0x25A3, 0x25A9), (0x25B2, 0x25B3),
(0x25B6, 0x25B7), (0x25BC, 0x25BD), (0x25C0, 0x25C1),
(0x25C6, 0x25C8), (0x25CB, 0x25CB), (0x25CE, 0x25D1),
(0x25E2, 0x25E5), (0x25EF, 0x25EF), (0x2605, 0x2606),
(0x2609, 0x2609), (0x260E, 0x260F), (0x2614, 0x2615),
(0x261C, 0x261C), (0x261E, 0x261E), (0x2640, 0x2640),
(0x2642, 0x2642), (0x2660, 0x2661), (0x2663, 0x2665),
(0x2667, 0x266A), (0x266C, 0x266D), (0x266F, 0x266F),
(0x273D, 0x273D), (0x2776, 0x277F), (0xE000, 0xF8FF),
(0xFFFD, 0xFFFD), (0xF0000, 0xFFFFD), (0x100000, 0x10FFFD)
)
def mk_wcwidth(ucs):
# test for 8-bit control characters
if ucs == 0:
return 0
if ucs < 32 or (ucs >= 0x7f and ucs < 0xa0):
return -1
# binary search in table of non-spacing characters
if bisearch(ucs, combining):
return 0
# if we arrive here, ucs is not a combining or C0/C1 control character
return 1 + \
int(ucs >= 0x1100 and
(ucs <= 0x115f or # Hangul Jamo init. consonants
ucs == 0x2329 or ucs == 0x232a or
(ucs >= 0x2e80 and ucs <= 0xa4cf and
ucs != 0x303f) or # CJK ... Yi
(ucs >= 0xac00 and ucs <= 0xd7a3) or # Hangul Syllables
(ucs >= 0xf900 and ucs <= 0xfaff) or # CJK Compatibility Ideographs
(ucs >= 0xfe10 and ucs <= 0xfe19) or # Vertical forms
(ucs >= 0xfe30 and ucs <= 0xfe6f) or # CJK Compatibility Forms
(ucs >= 0xff00 and ucs <= 0xff60) or # Fullwidth Forms
(ucs >= 0xffe0 and ucs <= 0xffe6) or
(ucs >= 0x20000 and ucs <= 0x2fffd) or
(ucs >= 0x30000 and ucs <= 0x3fffd)))
def mk_wcswidth(pwcs):
width = 0
for c in pwcs:
w = mk_wcwidth(c)
if w < 0:
return -1
else:
width += w
return width
# The following functions are the same as mk_wcwidth() and
# mk_wcswidth(), except that spacing characters in the East Asian
# Ambiguous (A) category as defined in Unicode Technical Report #11
# have a column width of 2. This variant might be useful for users of
# CJK legacy encodings who want to migrate to UCS without changing
# the traditional terminal character-width behaviour. It is not
# otherwise recommended for general use.
def mk_wcwidth_cjk(ucs):
# binary search in table of non-spacing characters
if bisearch(ucs, ambiguous):
return 2
return mk_wcwidth(ucs)
def mk_wcswidth_cjk(pwcs):
width = 0
for c in pwcs:
w = mk_wcwidth_cjk(c)
if w < 0:
return -1
width += w
return width
# python-y versions, dealing with unicode objects
def wcwidth(c):
return mk_wcwidth(ord(c))
def wcswidth(s):
return mk_wcswidth(map(ord, s))
def wcwidth_cjk(c):
return mk_wcwidth_cjk(ord(c))
def wcswidth_cjk(s):
return mk_wcswidth_cjk(map(ord, s))
if __name__ == "__main__":
samples = (
('MUSIC SHARP SIGN', 1),
('FULLWIDTH POUND SIGN', 2),
('FULLWIDTH LATIN CAPITAL LETTER P', 2),
('CJK RADICAL BOLT OF CLOTH', 2),
('LATIN SMALL LETTER A', 1),
('LATIN SMALL LETTER AE', 1),
('SPACE', 1),
('NO-BREAK SPACE', 1),
('CJK COMPATIBILITY IDEOGRAPH-F920', 2),
('MALAYALAM VOWEL SIGN UU', 0),
('ZERO WIDTH SPACE', 0),
('ZERO WIDTH NO-BREAK SPACE', 0),
('COMBINING PALATALIZED HOOK BELOW', 0),
('COMBINING GRAVE ACCENT', 0),
)
nonprinting = u'\r\n\t\a\b\f\v\x7f'
import unicodedata
for name, printwidth in samples:
uchr = unicodedata.lookup(name)
calculatedwidth = wcwidth(uchr)
assert calculatedwidth == printwidth, \
'width for %r should be %d, but is %d?' % (uchr, printwidth, calculatedwidth)
for c in nonprinting:
calculatedwidth = wcwidth(c)
assert calculatedwidth < 0, \
'%r is a control character, but wcwidth gives %d' % (c, calculatedwidth)
assert wcwidth('\0') == 0 # special case
# depending on how python is compiled, code points above U+FFFF may not be
# treated as single characters, so ord() won't work. test a few of these
# manually.
assert mk_wcwidth(0xe01ef) == 0
assert mk_wcwidth(0x10ffff) == 1
assert mk_wcwidth(0x3fffd) == 2
teststr = u'B\0ig br\u00f8wn moose\ub143\u200b'
calculatedwidth = wcswidth(teststr)
assert calculatedwidth == 17, 'expected 17, got %d' % calculatedwidth
calculatedwidth = wcswidth_cjk(teststr)
assert calculatedwidth == 18, 'expected 18, got %d' % calculatedwidth
assert wcswidth(u'foobar\u200b\a') < 0
print 'tests pass.'
| apache-2.0 |
Abi1ity/uniclust2.0 | flask/lib/python2.7/site-packages/sqlalchemy/dialects/access/base.py | 35 | 16054 | # access/base.py
# Copyright (C) 2007-2011 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2007 Paul Johnston, paj@pajhome.org.uk
# Portions derived from jet2sql.py by Matt Keranen, mksql@yahoo.com
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
Support for the Microsoft Access database.
.. note::
The Access dialect is **non-functional as of SQLAlchemy 0.6**,
pending development efforts to bring it up-to-date.
"""
from sqlalchemy import sql, schema, types, exc, pool
from sqlalchemy.sql import compiler, expression
from sqlalchemy.engine import default, base, reflection
from sqlalchemy import processors
class AcNumeric(types.Numeric):
def get_col_spec(self):
return "NUMERIC"
def bind_processor(self, dialect):
return processors.to_str
def result_processor(self, dialect, coltype):
return None
class AcFloat(types.Float):
def get_col_spec(self):
return "FLOAT"
def bind_processor(self, dialect):
"""By converting to string, we can use Decimal types round-trip."""
return processors.to_str
class AcInteger(types.Integer):
def get_col_spec(self):
return "INTEGER"
class AcTinyInteger(types.Integer):
def get_col_spec(self):
return "TINYINT"
class AcSmallInteger(types.SmallInteger):
def get_col_spec(self):
return "SMALLINT"
class AcDateTime(types.DateTime):
def get_col_spec(self):
return "DATETIME"
class AcDate(types.Date):
def get_col_spec(self):
return "DATETIME"
class AcText(types.Text):
def get_col_spec(self):
return "MEMO"
class AcString(types.String):
def get_col_spec(self):
return "TEXT" + (self.length and ("(%d)" % self.length) or "")
class AcUnicode(types.Unicode):
def get_col_spec(self):
return "TEXT" + (self.length and ("(%d)" % self.length) or "")
def bind_processor(self, dialect):
return None
def result_processor(self, dialect, coltype):
return None
class AcChar(types.CHAR):
def get_col_spec(self):
return "TEXT" + (self.length and ("(%d)" % self.length) or "")
class AcBinary(types.LargeBinary):
def get_col_spec(self):
return "BINARY"
class AcBoolean(types.Boolean):
def get_col_spec(self):
return "YESNO"
class AcTimeStamp(types.TIMESTAMP):
def get_col_spec(self):
return "TIMESTAMP"
class AccessExecutionContext(default.DefaultExecutionContext):
def _has_implicit_sequence(self, column):
if column.primary_key and column.autoincrement:
if isinstance(column.type, types.Integer) and \
not column.foreign_keys:
if column.default is None or \
(isinstance(column.default, schema.Sequence) and \
column.default.optional):
return True
return False
def post_exec(self):
"""If we inserted into a row with a COUNTER column, fetch the ID"""
if self.compiled.isinsert:
tbl = self.compiled.statement.table
if not hasattr(tbl, 'has_sequence'):
tbl.has_sequence = None
for column in tbl.c:
if getattr(column, 'sequence', False) or \
self._has_implicit_sequence(column):
tbl.has_sequence = column
break
if bool(tbl.has_sequence):
# TBD: for some reason _last_inserted_ids doesn't exist here
# (but it does at corresponding point in mssql???)
#if not len(self._last_inserted_ids) or
# self._last_inserted_ids[0] is None:
self.cursor.execute("SELECT @@identity AS lastrowid")
row = self.cursor.fetchone()
self._last_inserted_ids = [int(row[0])]
#+ self._last_inserted_ids[1:]
# print "LAST ROW ID", self._last_inserted_ids
super(AccessExecutionContext, self).post_exec()
const, daoEngine = None, None
class AccessDialect(default.DefaultDialect):
colspecs = {
types.Unicode : AcUnicode,
types.Integer : AcInteger,
types.SmallInteger: AcSmallInteger,
types.Numeric : AcNumeric,
types.Float : AcFloat,
types.DateTime : AcDateTime,
types.Date : AcDate,
types.String : AcString,
types.LargeBinary : AcBinary,
types.Boolean : AcBoolean,
types.Text : AcText,
types.CHAR: AcChar,
types.TIMESTAMP: AcTimeStamp,
}
name = 'access'
supports_sane_rowcount = False
supports_sane_multi_rowcount = False
ported_sqla_06 = False
def type_descriptor(self, typeobj):
newobj = types.adapt_type(typeobj, self.colspecs)
return newobj
def __init__(self, **params):
super(AccessDialect, self).__init__(**params)
self.text_as_varchar = False
self._dtbs = None
@classmethod
def dbapi(cls):
import win32com.client, pythoncom
global const, daoEngine
if const is None:
const = win32com.client.constants
for suffix in (".36", ".35", ".30"):
try:
daoEngine = win32com.client.\
gencache.\
EnsureDispatch("DAO.DBEngine" + suffix)
break
except pythoncom.com_error:
pass
else:
raise exc.InvalidRequestError(
"Can't find a DB engine. Check "
"http://support.microsoft.com/kb/239114 for details.")
import pyodbc as module
return module
def create_connect_args(self, url):
opts = url.translate_connect_args()
connectors = ["Driver={Microsoft Access Driver (*.mdb)}"]
connectors.append("Dbq=%s" % opts["database"])
user = opts.get("username", None)
if user:
connectors.append("UID=%s" % user)
connectors.append("PWD=%s" % opts.get("password", ""))
return [[";".join(connectors)], {}]
def last_inserted_ids(self):
return self.context.last_inserted_ids
def do_execute(self, cursor, statement, params, context=None):
if params == {}:
params = ()
super(AccessDialect, self).\
do_execute(cursor, statement, params, **kwargs)
def _execute(self, c, statement, parameters):
try:
if parameters == {}:
parameters = ()
c.execute(statement, parameters)
self.context.rowcount = c.rowcount
except Exception, e:
raise exc.DBAPIError.instance(statement, parameters, e)
def has_table(self, connection, tablename, schema=None):
# This approach seems to be more reliable that using DAO
try:
connection.execute('select top 1 * from [%s]' % tablename)
return True
except Exception, e:
return False
def reflecttable(self, connection, table, include_columns):
# This is defined in the function, as it relies on win32com constants,
# that aren't imported until dbapi method is called
if not hasattr(self, 'ischema_names'):
self.ischema_names = {
const.dbByte: AcBinary,
const.dbInteger: AcInteger,
const.dbLong: AcInteger,
const.dbSingle: AcFloat,
const.dbDouble: AcFloat,
const.dbDate: AcDateTime,
const.dbLongBinary: AcBinary,
const.dbMemo: AcText,
const.dbBoolean: AcBoolean,
const.dbText: AcUnicode, # All Access strings are
# unicode
const.dbCurrency: AcNumeric,
}
# A fresh DAO connection is opened for each reflection
# This is necessary, so we get the latest updates
dtbs = daoEngine.OpenDatabase(connection.engine.url.database)
try:
for tbl in dtbs.TableDefs:
if tbl.Name.lower() == table.name.lower():
break
else:
raise exc.NoSuchTableError(table.name)
for col in tbl.Fields:
coltype = self.ischema_names[col.Type]
if col.Type == const.dbText:
coltype = coltype(col.Size)
colargs = \
{
'nullable': not(col.Required or
col.Attributes & const.dbAutoIncrField),
}
default = col.DefaultValue
if col.Attributes & const.dbAutoIncrField:
colargs['default'] = schema.Sequence(col.Name + '_seq')
elif default:
if col.Type == const.dbBoolean:
default = default == 'Yes' and '1' or '0'
colargs['server_default'] = \
schema.DefaultClause(sql.text(default))
table.append_column(
schema.Column(col.Name, coltype, **colargs))
# TBD: check constraints
# Find primary key columns first
for idx in tbl.Indexes:
if idx.Primary:
for col in idx.Fields:
thecol = table.c[col.Name]
table.primary_key.add(thecol)
if isinstance(thecol.type, AcInteger) and \
not (thecol.default and
isinstance(
thecol.default.arg,
schema.Sequence
)):
thecol.autoincrement = False
# Then add other indexes
for idx in tbl.Indexes:
if not idx.Primary:
if len(idx.Fields) == 1:
col = table.c[idx.Fields[0].Name]
if not col.primary_key:
col.index = True
col.unique = idx.Unique
else:
pass # TBD: multi-column indexes
for fk in dtbs.Relations:
if fk.ForeignTable != table.name:
continue
scols = [c.ForeignName for c in fk.Fields]
rcols = ['%s.%s' % (fk.Table, c.Name) for c in fk.Fields]
table.append_constraint(
schema.ForeignKeyConstraint(scols, rcols,\
link_to_name=True))
finally:
dtbs.Close()
@reflection.cache
def get_table_names(self, connection, schema=None, **kw):
# A fresh DAO connection is opened for each reflection
# This is necessary, so we get the latest updates
dtbs = daoEngine.OpenDatabase(connection.engine.url.database)
names = [t.Name for t in dtbs.TableDefs
if t.Name[:4] != "MSys" and t.Name[:4] != "~TMP"]
dtbs.Close()
return names
class AccessCompiler(compiler.SQLCompiler):
extract_map = compiler.SQLCompiler.extract_map.copy()
extract_map.update ({
'month': 'm',
'day': 'd',
'year': 'yyyy',
'second': 's',
'hour': 'h',
'doy': 'y',
'minute': 'n',
'quarter': 'q',
'dow': 'w',
'week': 'ww'
})
def visit_select_precolumns(self, select):
"""Access puts TOP, it's version of LIMIT here """
s = select.distinct and "DISTINCT " or ""
if select.limit:
s += "TOP %s " % (select.limit)
if select.offset:
raise exc.InvalidRequestError(
'Access does not support LIMIT with an offset')
return s
def limit_clause(self, select):
"""Limit in access is after the select keyword"""
return ""
def binary_operator_string(self, binary):
"""Access uses "mod" instead of "%" """
return binary.operator == '%' and 'mod' or binary.operator
def label_select_column(self, select, column, asfrom):
if isinstance(column, expression.Function):
return column.label()
else:
return super(AccessCompiler, self).\
label_select_column(select, column, asfrom)
function_rewrites = {'current_date': 'now',
'current_timestamp': 'now',
'length': 'len',
}
def visit_function(self, func):
"""Access function names differ from the ANSI SQL names;
rewrite common ones"""
func.name = self.function_rewrites.get(func.name, func.name)
return super(AccessCompiler, self).visit_function(func)
def for_update_clause(self, select):
"""FOR UPDATE is not supported by Access; silently ignore"""
return ''
# Strip schema
def visit_table(self, table, asfrom=False, **kwargs):
if asfrom:
return self.preparer.quote(table.name, table.quote)
else:
return ""
def visit_join(self, join, asfrom=False, **kwargs):
return (self.process(join.left, asfrom=True) + \
(join.isouter and " LEFT OUTER JOIN " or " INNER JOIN ") + \
self.process(join.right, asfrom=True) + " ON " + \
self.process(join.onclause))
def visit_extract(self, extract, **kw):
field = self.extract_map.get(extract.field, extract.field)
return 'DATEPART("%s", %s)' % \
(field, self.process(extract.expr, **kw))
class AccessDDLCompiler(compiler.DDLCompiler):
def get_column_specification(self, column, **kwargs):
colspec = self.preparer.format_column(column) + " " + \
column.type.dialect_impl(self.dialect).get_col_spec()
# install a sequence if we have an implicit IDENTITY column
if (not getattr(column.table, 'has_sequence', False)) and \
column.primary_key and \
column.autoincrement and \
isinstance(column.type, types.Integer) and \
not column.foreign_keys:
if column.default is None or \
(isinstance(column.default, schema.Sequence) and
column.default.optional):
column.sequence = schema.Sequence(column.name + '_seq')
if not column.nullable:
colspec += " NOT NULL"
if hasattr(column, 'sequence'):
column.table.has_sequence = column
colspec = self.preparer.format_column(column) + " counter"
else:
default = self.get_column_default_string(column)
if default is not None:
colspec += " DEFAULT " + default
return colspec
def visit_drop_index(self, drop):
index = drop.element
self.append("\nDROP INDEX [%s].[%s]" % \
(index.table.name,
self._index_identifier(index.name)))
class AccessIdentifierPreparer(compiler.IdentifierPreparer):
reserved_words = compiler.RESERVED_WORDS.copy()
reserved_words.update(['value', 'text'])
def __init__(self, dialect):
super(AccessIdentifierPreparer, self).\
__init__(dialect, initial_quote='[', final_quote=']')
dialect = AccessDialect
dialect.poolclass = pool.SingletonThreadPool
dialect.statement_compiler = AccessCompiler
dialect.ddlcompiler = AccessDDLCompiler
dialect.preparer = AccessIdentifierPreparer
dialect.execution_ctx_cls = AccessExecutionContext
| bsd-3-clause |
leshchevds/ganeti | test/py/ganeti.utils.lvm_unittest.py | 10 | 4711 | #!/usr/bin/python
#
# Copyright (C) 2013 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Script for testing ganeti.utils.lvm"""
import unittest
from ganeti import constants
from ganeti import utils
from ganeti.objects import LvmPvInfo
import testutils
class TestLvmExclusiveCheckNodePvs(unittest.TestCase):
"""Test cases for LvmExclusiveCheckNodePvs()"""
_VG = "vg"
_SMALL_PV = LvmPvInfo(name="small", vg_name=_VG, size=100e3, free=40e3,
attributes="a-")
_MED_PV = LvmPvInfo(name="medium", vg_name=_VG, size=400e3, free=40e3,
attributes="a-")
_BIG_PV = LvmPvInfo(name="big", vg_name=_VG, size=1e6, free=400e3,
attributes="a-")
# Allowance for rounding
_EPS = 1e-4
def testOnePv(self):
(errmsgs, (small, big)) = utils.LvmExclusiveCheckNodePvs([self._MED_PV])
self.assertFalse(errmsgs)
self.assertEqual(small, self._MED_PV.size)
self.assertEqual(big, self._MED_PV.size)
def testEqualPvs(self):
(errmsgs, (small, big)) = utils.LvmExclusiveCheckNodePvs(
[self._MED_PV] * 2)
self.assertFalse(errmsgs)
self.assertEqual(small, self._MED_PV.size)
self.assertEqual(big, self._MED_PV.size)
(errmsgs, (small, big)) = utils.LvmExclusiveCheckNodePvs(
[self._SMALL_PV] * 3)
self.assertFalse(errmsgs)
self.assertEqual(small, self._SMALL_PV.size)
self.assertEqual(big, self._SMALL_PV.size)
def testTooDifferentPvs(self):
(errmsgs, (small, big)) = utils.LvmExclusiveCheckNodePvs(
[self._MED_PV, self._BIG_PV])
self.assertEqual(len(errmsgs), 1)
self.assertEqual(small, self._MED_PV.size)
self.assertEqual(big, self._BIG_PV.size)
(errmsgs, (small, big)) = utils.LvmExclusiveCheckNodePvs(
[self._MED_PV, self._SMALL_PV])
self.assertEqual(len(errmsgs), 1)
self.assertEqual(small, self._SMALL_PV.size)
self.assertEqual(big, self._MED_PV.size)
def testBoundarySizeCases(self):
medpv1 = self._MED_PV.Copy()
medpv2 = self._MED_PV.Copy()
(errmsgs, (small, big)) = utils.LvmExclusiveCheckNodePvs(
[medpv1, medpv2, self._MED_PV])
self.assertFalse(errmsgs)
self.assertEqual(small, self._MED_PV.size)
self.assertEqual(big, self._MED_PV.size)
# Just within the margins
medpv1.size = self._MED_PV.size * (1 - constants.PART_MARGIN + self._EPS)
medpv2.size = self._MED_PV.size * (1 + constants.PART_MARGIN - self._EPS)
(errmsgs, (small, big)) = utils.LvmExclusiveCheckNodePvs(
[medpv1, medpv2, self._MED_PV])
self.assertFalse(errmsgs)
self.assertEqual(small, medpv1.size)
self.assertEqual(big, medpv2.size)
# Just outside the margins
medpv1.size = self._MED_PV.size * (1 - constants.PART_MARGIN - self._EPS)
medpv2.size = self._MED_PV.size * (1 + constants.PART_MARGIN)
(errmsgs, (small, big)) = utils.LvmExclusiveCheckNodePvs(
[medpv1, medpv2, self._MED_PV])
self.assertTrue(errmsgs)
self.assertEqual(small, medpv1.size)
self.assertEqual(big, medpv2.size)
medpv1.size = self._MED_PV.size * (1 - constants.PART_MARGIN)
medpv2.size = self._MED_PV.size * (1 + constants.PART_MARGIN + self._EPS)
(errmsgs, (small, big)) = utils.LvmExclusiveCheckNodePvs(
[medpv1, medpv2, self._MED_PV])
self.assertTrue(errmsgs)
self.assertEqual(small, medpv1.size)
self.assertEqual(big, medpv2.size)
if __name__ == "__main__":
testutils.GanetiTestProgram()
| bsd-2-clause |
yeraydiazdiaz/nonrel-blog | django/contrib/localflavor/ru/ru_regions.py | 199 | 4156 | # -*- encoding: utf-8 -*-
"""
Sources:
http://ru.wikipedia.org/wiki/Коды_субъектов_Российской_Федерации
http://ru.wikipedia.org/wiki/Федеральные_округа_Российской_Федерации
"""
from django.utils.translation import ugettext_lazy as _
RU_COUNTY_CHOICES = (
("Central Federal County", _("Central Federal County")),
("South Federal County", _("South Federal County")),
("North-West Federal County", _("North-West Federal County")),
("Far-East Federal County", _("Far-East Federal County")),
("Siberian Federal County", _("Siberian Federal County")),
("Ural Federal County", _("Ural Federal County")),
("Privolzhsky Federal County", _("Privolzhsky Federal County")),
("North-Caucasian Federal County", _("North-Caucasian Federal County"))
)
RU_REGIONS_CHOICES = (
("77", _("Moskva")),
("78", _("Saint-Peterburg")),
("50", _("Moskovskaya oblast'")),
("01", _("Adygeya, Respublika")),
("02", _("Bashkortostan, Respublika")),
("03", _("Buryatia, Respublika")),
("04", _("Altay, Respublika")),
("05", _("Dagestan, Respublika")),
("06", _("Ingushskaya Respublika")),
("07", _("Kabardino-Balkarskaya Respublika")),
("08", _("Kalmykia, Respublika")),
("09", _("Karachaevo-Cherkesskaya Respublika")),
("10", _("Karelia, Respublika")),
("11", _("Komi, Respublika")),
("12", _("Mariy Ehl, Respublika")),
("13", _("Mordovia, Respublika")),
("14", _("Sakha, Respublika (Yakutiya)")),
("15", _("Severnaya Osetia, Respublika (Alania)")),
("16", _("Tatarstan, Respublika")),
("17", _("Tyva, Respublika (Tuva)")),
("18", _("Udmurtskaya Respublika")),
("19", _("Khakassiya, Respublika")),
("95", _("Chechenskaya Respublika")),
("21", _("Chuvashskaya Respublika")),
("22", _("Altayskiy Kray")),
("80", _("Zabaykalskiy Kray")),
("82", _("Kamchatskiy Kray")),
("23", _("Krasnodarskiy Kray")),
("24", _("Krasnoyarskiy Kray")),
("81", _("Permskiy Kray")),
("25", _("Primorskiy Kray")),
("26", _("Stavropol'siyy Kray")),
("27", _("Khabarovskiy Kray")),
("28", _("Amurskaya oblast'")),
("29", _("Arkhangel'skaya oblast'")),
("30", _("Astrakhanskaya oblast'")),
("31", _("Belgorodskaya oblast'")),
("32", _("Bryanskaya oblast'")),
("33", _("Vladimirskaya oblast'")),
("34", _("Volgogradskaya oblast'")),
("35", _("Vologodskaya oblast'")),
("36", _("Voronezhskaya oblast'")),
("37", _("Ivanovskaya oblast'")),
("38", _("Irkutskaya oblast'")),
("39", _("Kaliningradskaya oblast'")),
("40", _("Kaluzhskaya oblast'")),
("42", _("Kemerovskaya oblast'")),
("43", _("Kirovskaya oblast'")),
("44", _("Kostromskaya oblast'")),
("45", _("Kurganskaya oblast'")),
("46", _("Kurskaya oblast'")),
("47", _("Leningradskaya oblast'")),
("48", _("Lipeckaya oblast'")),
("49", _("Magadanskaya oblast'")),
("51", _("Murmanskaya oblast'")),
("52", _("Nizhegorodskaja oblast'")),
("53", _("Novgorodskaya oblast'")),
("54", _("Novosibirskaya oblast'")),
("55", _("Omskaya oblast'")),
("56", _("Orenburgskaya oblast'")),
("57", _("Orlovskaya oblast'")),
("58", _("Penzenskaya oblast'")),
("60", _("Pskovskaya oblast'")),
("61", _("Rostovskaya oblast'")),
("62", _("Rjazanskaya oblast'")),
("63", _("Samarskaya oblast'")),
("64", _("Saratovskaya oblast'")),
("65", _("Sakhalinskaya oblast'")),
("66", _("Sverdlovskaya oblast'")),
("67", _("Smolenskaya oblast'")),
("68", _("Tambovskaya oblast'")),
("69", _("Tverskaya oblast'")),
("70", _("Tomskaya oblast'")),
("71", _("Tul'skaya oblast'")),
("72", _("Tyumenskaya oblast'")),
("73", _("Ul'ianovskaya oblast'")),
("74", _("Chelyabinskaya oblast'")),
("76", _("Yaroslavskaya oblast'")),
("79", _("Evreyskaya avtonomnaja oblast'")),
("83", _("Neneckiy autonomnyy okrug")),
("86", _("Khanty-Mansiyskiy avtonomnyy okrug - Yugra")),
("87", _("Chukotskiy avtonomnyy okrug")),
("89", _("Yamalo-Neneckiy avtonomnyy okrug"))
)
| bsd-3-clause |
kivy/kivy | kivy/uix/gesturesurface.py | 3 | 23287 | '''
Gesture Surface
===============
.. versionadded::
1.9.0
.. warning::
This is experimental and subject to change as long as this warning notice
is present.
See :file:`kivy/examples/demo/multistroke/main.py` for a complete application
example.
'''
__all__ = ('GestureSurface', 'GestureContainer')
from random import random
from kivy.event import EventDispatcher
from kivy.clock import Clock
from kivy.vector import Vector
from kivy.uix.floatlayout import FloatLayout
from kivy.graphics import Color, Line, Rectangle
from kivy.properties import (NumericProperty, BooleanProperty,
DictProperty, ColorProperty)
from colorsys import hsv_to_rgb
# Clock undershoot margin, FIXME: this is probably too high?
UNDERSHOOT_MARGIN = 0.1
class GestureContainer(EventDispatcher):
'''Container object that stores information about a gesture. It has
various properties that are updated by `GestureSurface` as drawing
progresses.
:Arguments:
`touch`
Touch object (as received by on_touch_down) used to initialize
the gesture container. Required.
:Properties:
`active`
Set to False once the gesture is complete (meets
`max_stroke` setting or `GestureSurface.temporal_window`)
:attr:`active` is a
:class:`~kivy.properties.BooleanProperty`
`active_strokes`
Number of strokes currently active in the gesture, ie
concurrent touches associated with this gesture.
:attr:`active_strokes` is a
:class:`~kivy.properties.NumericProperty`
`max_strokes`
Max number of strokes allowed in the gesture. This
is set by `GestureSurface.max_strokes` but can
be overridden for example from `on_gesture_start`.
:attr:`max_strokes` is a
:class:`~kivy.properties.NumericProperty`
`was_merged`
Indicates that this gesture has been merged with another
gesture and should be considered discarded.
:attr:`was_merged` is a
:class:`~kivy.properties.BooleanProperty`
`bbox`
Dictionary with keys minx, miny, maxx, maxy. Represents the size
of the gesture bounding box.
:attr:`bbox` is a
:class:`~kivy.properties.DictProperty`
`width`
Represents the width of the gesture.
:attr:`width` is a
:class:`~kivy.properties.NumericProperty`
`height`
Represents the height of the gesture.
:attr:`height` is a
:class:`~kivy.properties.NumericProperty`
'''
active = BooleanProperty(True)
active_strokes = NumericProperty(0)
max_strokes = NumericProperty(0)
was_merged = BooleanProperty(False)
bbox = DictProperty({'minx': float('inf'), 'miny': float('inf'),
'maxx': float('-inf'), 'maxy': float('-inf')})
width = NumericProperty(0)
height = NumericProperty(0)
def __init__(self, touch, **kwargs):
# The color is applied to all canvas items of this gesture
self.color = kwargs.pop('color', [1., 1., 1.])
super(GestureContainer, self).__init__(**kwargs)
# This is the touch.uid of the oldest touch represented
self.id = str(touch.uid)
# Store various timestamps for decision making
self._create_time = Clock.get_time()
self._update_time = None
self._cleanup_time = None
self._cache_time = 0
# We can cache the candidate here to save zip()/Vector instantiation
self._vectors = None
# Key is touch.uid; value is a kivy.graphics.Line(); it's used even
# if line_width is 0 (i.e. not actually drawn anywhere)
self._strokes = {}
# Make sure the bbox is up to date with the first touch position
self.update_bbox(touch)
def get_vectors(self, **kwargs):
'''Return strokes in a format that is acceptable for
`kivy.multistroke.Recognizer` as a gesture candidate or template. The
result is cached automatically; the cache is invalidated at the start
and end of a stroke and if `update_bbox` is called. If you are going
to analyze a gesture mid-stroke, you may need to set the `no_cache`
argument to True.'''
if self._cache_time == self._update_time and \
not kwargs.get('no_cache'):
return self._vectors
vecs = []
append = vecs.append
for tuid, l in self._strokes.items():
lpts = l.points
append([Vector(*pts) for pts in zip(lpts[::2], lpts[1::2])])
self._vectors = vecs
self._cache_time = self._update_time
return vecs
def handles(self, touch):
'''Returns True if this container handles the given touch'''
if not self.active:
return False
return str(touch.uid) in self._strokes
def accept_stroke(self, count=1):
'''Returns True if this container can accept `count` new strokes'''
if not self.max_strokes:
return True
return len(self._strokes) + count <= self.max_strokes
def update_bbox(self, touch):
'''Update gesture bbox from a touch coordinate'''
x, y = touch.x, touch.y
bb = self.bbox
if x < bb['minx']:
bb['minx'] = x
if y < bb['miny']:
bb['miny'] = y
if x > bb['maxx']:
bb['maxx'] = x
if y > bb['maxy']:
bb['maxy'] = y
self.width = bb['maxx'] - bb['minx']
self.height = bb['maxy'] - bb['miny']
self._update_time = Clock.get_time()
def add_stroke(self, touch, line):
'''Associate a list of points with a touch.uid; the line itself is
created by the caller, but subsequent move/up events look it
up via us. This is done to avoid problems during merge.'''
self._update_time = Clock.get_time()
self._strokes[str(touch.uid)] = line
self.active_strokes += 1
def complete_stroke(self):
'''Called on touch up events to keep track of how many strokes
are active in the gesture (we only want to dispatch event when
the *last* stroke in the gesture is released)'''
self._update_time = Clock.get_time()
self.active_strokes -= 1
def single_points_test(self):
'''Returns True if the gesture consists only of single-point strokes,
we must discard it in this case, or an exception will be raised'''
for tuid, l in self._strokes.items():
if len(l.points) > 2:
return False
return True
class GestureSurface(FloatLayout):
'''Simple gesture surface to track/draw touch movements. Typically used
to gather user input suitable for :class:`kivy.multistroke.Recognizer`.
:Properties:
`temporal_window`
Time to wait from the last touch_up event before attempting
to recognize the gesture. If you set this to 0, the
`on_gesture_complete` event is not fired unless the
:attr:`max_strokes` condition is met.
:attr:`temporal_window` is a
:class:`~kivy.properties.NumericProperty` and defaults to 2.0
`max_strokes`
Max number of strokes in a single gesture; if this is reached,
recognition will start immediately on the final touch_up event.
If this is set to 0, the `on_gesture_complete` event is not
fired unless the :attr:`temporal_window` expires.
:attr:`max_strokes` is a
:class:`~kivy.properties.NumericProperty` and defaults to 2.0
`bbox_margin`
Bounding box margin for detecting gesture collisions, in
pixels.
:attr:`bbox_margin` is a
:class:`~kivy.properties.NumericProperty` and defaults to 30
`draw_timeout`
Number of seconds to keep lines/bbox on canvas after the
`on_gesture_complete` event is fired. If this is set to 0,
gestures are immediately removed from the surface when
complete.
:attr:`draw_timeout` is a
:class:`~kivy.properties.NumericProperty` and defaults to 3.0
`color`
Color used to draw the gesture, in RGB. This option does not
have an effect if :attr:`use_random_color` is True.
:attr:`color` is a
:class:`~kivy.properties.ColorProperty` and defaults to
[1, 1, 1, 1] (white)
.. versionchanged:: 2.0.0
Changed from :class:`~kivy.properties.ListProperty` to
:class:`~kivy.properties.ColorProperty`.
`use_random_color`
Set to True to pick a random color for each gesture, if you do
this then `color` is ignored. Defaults to False.
:attr:`use_random_color` is a
:class:`~kivy.properties.BooleanProperty` and defaults to False
`line_width`
Line width used for tracing touches on the surface. Set to 0
if you only want to detect gestures without drawing anything.
If you use 1.0, OpenGL GL_LINE is used for drawing; values > 1
will use an internal drawing method based on triangles (less
efficient), see :mod:`kivy.graphics`.
:attr:`line_width` is a
:class:`~kivy.properties.NumericProperty` and defaults to 2
`draw_bbox`
Set to True if you want to draw bounding box behind gestures.
This only works if `line_width` >= 1. Default is False.
:attr:`draw_bbox` is a
:class:`~kivy.properties.BooleanProperty` and defaults to True
`bbox_alpha`
Opacity for bounding box if `draw_bbox` is True. Default 0.1
:attr:`bbox_alpha` is a
:class:`~kivy.properties.NumericProperty` and defaults to 0.1
:Events:
`on_gesture_start` :class:`GestureContainer`
Fired when a new gesture is initiated on the surface, i.e. the
first on_touch_down that does not collide with an existing
gesture on the surface.
`on_gesture_extend` :class:`GestureContainer`
Fired when a touch_down event occurs within an existing gesture.
`on_gesture_merge` :class:`GestureContainer`, :class:`GestureContainer`
Fired when two gestures collide and get merged to one gesture.
The first argument is the gesture that has been merged (no longer
valid); the second is the combined (resulting) gesture.
`on_gesture_complete` :class:`GestureContainer`
Fired when a set of strokes is considered a complete gesture,
this happens when `temporal_window` expires or `max_strokes`
is reached. Typically you will bind to this event and use
the provided `GestureContainer` get_vectors() method to
match against your gesture database.
`on_gesture_cleanup` :class:`GestureContainer`
Fired `draw_timeout` seconds after `on_gesture_complete`,
The gesture will be removed from the canvas (if line_width > 0 or
draw_bbox is True) and the internal gesture list before this.
`on_gesture_discard` :class:`GestureContainer`
Fired when a gesture does not meet the minimum size requirements
for recognition (width/height < 5, or consists only of single-
point strokes).
'''
temporal_window = NumericProperty(2.0)
draw_timeout = NumericProperty(3.0)
max_strokes = NumericProperty(4)
bbox_margin = NumericProperty(30)
line_width = NumericProperty(2)
color = ColorProperty([1., 1., 1., 1.])
use_random_color = BooleanProperty(False)
draw_bbox = BooleanProperty(False)
bbox_alpha = NumericProperty(0.1)
def __init__(self, **kwargs):
super(GestureSurface, self).__init__(**kwargs)
# A list of GestureContainer objects (all gestures on the surface)
self._gestures = []
self.register_event_type('on_gesture_start')
self.register_event_type('on_gesture_extend')
self.register_event_type('on_gesture_merge')
self.register_event_type('on_gesture_complete')
self.register_event_type('on_gesture_cleanup')
self.register_event_type('on_gesture_discard')
# -----------------------------------------------------------------------------
# Touch Events
# -----------------------------------------------------------------------------
def on_touch_down(self, touch):
'''When a new touch is registered, the first thing we do is to test if
it collides with the bounding box of another known gesture. If so, it
is assumed to be part of that gesture.
'''
# If the touch originates outside the surface, ignore it.
if not self.collide_point(touch.x, touch.y):
return
touch.grab(self)
# Add the stroke to existing gesture, or make a new one
g = self.find_colliding_gesture(touch)
new = False
if g is None:
g = self.init_gesture(touch)
new = True
# We now belong to a gesture (new or old); start a new stroke.
self.init_stroke(g, touch)
if new:
self.dispatch('on_gesture_start', g, touch)
else:
self.dispatch('on_gesture_extend', g, touch)
return True
def on_touch_move(self, touch):
'''When a touch moves, we add a point to the line on the canvas so the
path is updated. We must also check if the new point collides with the
bounding box of another gesture - if so, they should be merged.'''
if touch.grab_current is not self:
return
if not self.collide_point(touch.x, touch.y):
return
# Retrieve the GestureContainer object that handles this touch, and
# test for colliding gestures. If found, merge them to one.
g = self.get_gesture(touch)
collision = self.find_colliding_gesture(touch)
if collision is not None and g.accept_stroke(len(collision._strokes)):
merge = self.merge_gestures(g, collision)
if g.was_merged:
self.dispatch('on_gesture_merge', g, collision)
else:
self.dispatch('on_gesture_merge', collision, g)
g = merge
else:
g.update_bbox(touch)
# Add the new point to gesture stroke list and update the canvas line
g._strokes[str(touch.uid)].points += (touch.x, touch.y)
# Draw the gesture bounding box; if it is a single press that
# does not trigger a move event, we would miss it otherwise.
if self.draw_bbox:
self._update_canvas_bbox(g)
return True
def on_touch_up(self, touch):
if touch.grab_current is not self:
return
touch.ungrab(self)
g = self.get_gesture(touch)
g.complete_stroke()
# If this stroke hit the maximum limit, dispatch immediately
if not g.accept_stroke():
self._complete_dispatcher(0)
# dispatch later only if we have a window
elif self.temporal_window > 0:
Clock.schedule_once(self._complete_dispatcher,
self.temporal_window)
# -----------------------------------------------------------------------------
# Gesture related methods
# -----------------------------------------------------------------------------
def init_gesture(self, touch):
'''Create a new gesture from touch, i.e. it's the first on
surface, or was not close enough to any existing gesture (yet)'''
col = self.color
if self.use_random_color:
col = hsv_to_rgb(random(), 1., 1.)
g = GestureContainer(touch, max_strokes=self.max_strokes, color=col)
# Create the bounding box Rectangle for the gesture
if self.draw_bbox:
bb = g.bbox
with self.canvas:
Color(col[0], col[1], col[2], self.bbox_alpha, mode='rgba',
group=g.id)
g._bbrect = Rectangle(
group=g.id,
pos=(bb['minx'], bb['miny']),
size=(bb['maxx'] - bb['minx'],
bb['maxy'] - bb['miny']))
self._gestures.append(g)
return g
def init_stroke(self, g, touch):
points = [touch.x, touch.y]
col = g.color
new_line = Line(
points=points,
width=self.line_width,
group=g.id)
g._strokes[str(touch.uid)] = new_line
if self.line_width:
canvas_add = self.canvas.add
canvas_add(Color(col[0], col[1], col[2], mode='rgb', group=g.id))
canvas_add(new_line)
# Update the bbox in case; this will normally be done in on_touch_move,
# but we want to update it also for a single press, force that here:
g.update_bbox(touch)
if self.draw_bbox:
self._update_canvas_bbox(g)
# Register the stroke in GestureContainer so we can look it up later
g.add_stroke(touch, new_line)
def get_gesture(self, touch):
'''Returns GestureContainer associated with given touch'''
for g in self._gestures:
if g.active and g.handles(touch):
return g
raise Exception('get_gesture() failed to identify ' + str(touch.uid))
def find_colliding_gesture(self, touch):
'''Checks if a touch x/y collides with the bounding box of an existing
gesture. If so, return it (otherwise returns None)
'''
touch_x, touch_y = touch.pos
for g in self._gestures:
if g.active and not g.handles(touch) and g.accept_stroke():
bb = g.bbox
margin = self.bbox_margin
minx = bb['minx'] - margin
miny = bb['miny'] - margin
maxx = bb['maxx'] + margin
maxy = bb['maxy'] + margin
if minx <= touch_x <= maxx and miny <= touch_y <= maxy:
return g
return None
def merge_gestures(self, g, other):
'''Merges two gestures together, the oldest one is retained and the
newer one gets the `GestureContainer.was_merged` flag raised.'''
# Swap order depending on gesture age (the merged gesture gets
# the color from the oldest one of the two).
swap = other._create_time < g._create_time
a = swap and other or g
b = swap and g or other
# Apply the outer limits of bbox to the merged gesture
abbox = a.bbox
bbbox = b.bbox
if bbbox['minx'] < abbox['minx']:
abbox['minx'] = bbbox['minx']
if bbbox['miny'] < abbox['miny']:
abbox['miny'] = bbbox['miny']
if bbbox['maxx'] > abbox['maxx']:
abbox['maxx'] = bbbox['maxx']
if bbbox['maxy'] > abbox['maxy']:
abbox['maxy'] = bbbox['maxy']
# Now transfer the coordinates from old to new gesture;
# FIXME: This can probably be copied more efficiently?
astrokes = a._strokes
lw = self.line_width
a_id = a.id
col = a.color
self.canvas.remove_group(b.id)
canv_add = self.canvas.add
for uid, old in b._strokes.items():
# FIXME: Can't figure out how to change group= for existing Line()
new_line = Line(
points=old.points,
width=old.width,
group=a_id)
astrokes[uid] = new_line
if lw:
canv_add(Color(col[0], col[1], col[2], mode='rgb', group=a_id))
canv_add(new_line)
b.active = False
b.was_merged = True
a.active_strokes += b.active_strokes
a._update_time = Clock.get_time()
return a
def _update_canvas_bbox(self, g):
# If draw_bbox is changed while two gestures are active,
# we might not have a bbrect member
if not hasattr(g, '_bbrect'):
return
bb = g.bbox
g._bbrect.pos = (bb['minx'], bb['miny'])
g._bbrect.size = (bb['maxx'] - bb['minx'],
bb['maxy'] - bb['miny'])
# -----------------------------------------------------------------------------
# Timeout callbacks
# -----------------------------------------------------------------------------
def _complete_dispatcher(self, dt):
'''This method is scheduled on all touch up events. It will dispatch
the `on_gesture_complete` event for all completed gestures, and remove
merged gestures from the internal gesture list.'''
need_cleanup = False
gest = self._gestures
timeout = self.draw_timeout
twin = self.temporal_window
get_time = Clock.get_time
for idx, g in enumerate(gest):
# Gesture is part of another gesture, just delete it
if g.was_merged:
del gest[idx]
continue
# Not active == already handled, or has active strokes (it cannot
# possibly be complete). Proceed to next gesture on surface.
if not g.active or g.active_strokes != 0:
continue
t1 = g._update_time + twin
t2 = get_time() + UNDERSHOOT_MARGIN
# max_strokes reached, or temporal window has expired. The gesture
# is complete; need to dispatch _complete or _discard event.
if not g.accept_stroke() or t1 <= t2:
discard = False
if g.width < 5 and g.height < 5:
discard = True
elif g.single_points_test():
discard = True
need_cleanup = True
g.active = False
g._cleanup_time = get_time() + timeout
if discard:
self.dispatch('on_gesture_discard', g)
else:
self.dispatch('on_gesture_complete', g)
if need_cleanup:
Clock.schedule_once(self._cleanup, timeout)
def _cleanup(self, dt):
'''This method is scheduled from _complete_dispatcher to clean up the
canvas and internal gesture list after a gesture is completed.'''
m = UNDERSHOOT_MARGIN
rg = self.canvas.remove_group
gestures = self._gestures
for idx, g in enumerate(gestures):
if g._cleanup_time is None:
continue
if g._cleanup_time <= Clock.get_time() + m:
rg(g.id)
del gestures[idx]
self.dispatch('on_gesture_cleanup', g)
def on_gesture_start(self, *l):
pass
def on_gesture_extend(self, *l):
pass
def on_gesture_merge(self, *l):
pass
def on_gesture_complete(self, *l):
pass
def on_gesture_discard(self, *l):
pass
def on_gesture_cleanup(self, *l):
pass
| mit |
CSC-ORG/Dynamic-Dashboard-2015 | engine/lib/python2.7/site-packages/pip/backwardcompat/__init__.py | 394 | 3756 | """Stuff that differs in different Python versions and platform
distributions."""
import os
import imp
import sys
import site
__all__ = ['WindowsError']
uses_pycache = hasattr(imp, 'cache_from_source')
class NeverUsedException(Exception):
"""this exception should never be raised"""
try:
WindowsError = WindowsError
except NameError:
WindowsError = NeverUsedException
try:
#new in Python 3.3
PermissionError = PermissionError
except NameError:
PermissionError = NeverUsedException
console_encoding = sys.__stdout__.encoding
if sys.version_info >= (3,):
from io import StringIO, BytesIO
from functools import reduce
from urllib.error import URLError, HTTPError
from queue import Queue, Empty
from urllib.request import url2pathname, urlretrieve, pathname2url
from email import message as emailmessage
import urllib.parse as urllib
import urllib.request as urllib2
import configparser as ConfigParser
import xmlrpc.client as xmlrpclib
import urllib.parse as urlparse
import http.client as httplib
def cmp(a, b):
return (a > b) - (a < b)
def b(s):
return s.encode('utf-8')
def u(s):
return s.decode('utf-8')
def console_to_str(s):
try:
return s.decode(console_encoding)
except UnicodeDecodeError:
return s.decode('utf_8')
def get_http_message_param(http_message, param, default_value):
return http_message.get_param(param, default_value)
bytes = bytes
string_types = (str,)
raw_input = input
else:
from cStringIO import StringIO
from urllib2 import URLError, HTTPError
from Queue import Queue, Empty
from urllib import url2pathname, urlretrieve, pathname2url
from email import Message as emailmessage
import urllib
import urllib2
import urlparse
import ConfigParser
import xmlrpclib
import httplib
def b(s):
return s
def u(s):
return s
def console_to_str(s):
return s
def get_http_message_param(http_message, param, default_value):
result = http_message.getparam(param)
return result or default_value
bytes = str
string_types = (basestring,)
reduce = reduce
cmp = cmp
raw_input = raw_input
BytesIO = StringIO
from distutils.sysconfig import get_python_lib, get_python_version
#site.USER_SITE was created in py2.6
user_site = getattr(site, 'USER_SITE', None)
def product(*args, **kwds):
# product('ABCD', 'xy') --> Ax Ay Bx By Cx Cy Dx Dy
# product(range(2), repeat=3) --> 000 001 010 011 100 101 110 111
pools = list(map(tuple, args)) * kwds.get('repeat', 1)
result = [[]]
for pool in pools:
result = [x + [y] for x in result for y in pool]
for prod in result:
yield tuple(prod)
def get_path_uid(path):
"""
Return path's uid.
Does not follow symlinks: https://github.com/pypa/pip/pull/935#discussion_r5307003
Placed this function in backwardcompat due to differences on AIX and Jython,
that should eventually go away.
:raises OSError: When path is a symlink or can't be read.
"""
if hasattr(os, 'O_NOFOLLOW'):
fd = os.open(path, os.O_RDONLY | os.O_NOFOLLOW)
file_uid = os.fstat(fd).st_uid
os.close(fd)
else: # AIX and Jython
# WARNING: time of check vulnerabity, but best we can do w/o NOFOLLOW
if not os.path.islink(path):
# older versions of Jython don't have `os.fstat`
file_uid = os.stat(path).st_uid
else:
# raise OSError for parity with os.O_NOFOLLOW above
raise OSError("%s is a symlink; Will not return uid for symlinks" % path)
return file_uid
| mit |
novirael/lotr-pygame | variables.py | 1 | 6374 | #-------------------------------------------------------------------------------
# Name: variables.py
# Purpose:
#
# Author: novirael
#
# Created: 18-08-2012
# Copyright: (c) novirael 2012
# Licence: <your licence>
#-------------------------------------------------------------------------------
#!/usr/bin/env python
'''
PLAYER:
0 DARK SIDE
1 WHITE SIDE
MODE :
TOUR FIGHT
TOUR MAP
DARK VICTORY
WHITE VICTORY
DRAW
MAIN MENU
GAME RULES
ABOUT
MAP POSITIONING
MAP GAMEPLAY
FIGHT
'''
class MainVariables():
def __init__(self):
self.mode = "MAIN MENU"
self.last_mode = "MAIN MENU"
self.player = 1
self.x = 0
self.y = 0
self.sheet = 0
self.sel_black_char_no = -1
self.sel_white_char_no = -1
self.sel_char_id = -1
self.selected = False
self.ready = True
self.verify = False
self.confirm = False
self.raised_pawn = False
self.movement = 1
self.fightt = 1
self.mment = {}
self.errors = []
self.check_confrontation = False
self.enemy_no = 0
self.no_pos = 0
self.no_abroad = 0
self.loaded = False
self.saved = False
self.reset = False
self.brak_ruchu = False
self.who = []
self.where = "none"
# Gameplay
self.mouse_on_picture = False
self.mouse_on_pawn = False
self.mouse_on_tour_button = False
self.mouse_on_reset_button = False
self.mouse_on_continue_button = False
# Main Menu
self.mouse_on_menu = False
self.mouse_on_new_game_button = False
self.mouse_on_return_button = False
self.mouse_on_load_game_button = False
self.mouse_on_save_game_button = False
self.mouse_on_game_rules_button = False
self.mouse_on_about_button = False
self.mouse_on_quit_button = False
self.areas = {
"menu" : (600, 300, 270, 440),
"top bar" : ( 0, 0, 900, 85),
"bottom bar" : ( 0, 715, 900, 85),
"map" : ( 0, 85, 630, 630) }
self.buttons = {
"tour" : (640, 595, 250, 50),
"tour-fight": (325, 660, 250, 50),
"reset" : (640, 645, 250, 50),
"continue" : (300, 640, 250, 50),
"new game" : (610, 320, 250, 50),
"return" : (610, 380, 250, 50),
"load game" : (610, 440, 250, 50),
"save game" : (610, 500, 250, 50),
"game rules": (610, 560, 250, 50),
"about" : (610, 620, 250, 50),
"quit" : (610, 680, 250, 50) }
self.lands_names = ["mordor", "dagorlad", "gondor", "mroczna puszcza",
"fangorn", "rohan", "wysoka przelecz", "gory mgliste", "moria",
"wrota rohanu", "rhudaur", "eregion", "enedwaith", "cardolan",
"arthedain", "shire" ]
self.land = {
"mordor" : (225,600,180,80),
"dagorlad" : (315,520,180,80),
"gondor" : (135,520,180,80),
"mroczna puszcza" : (405,440,180,80),
"fangorn" : (225,440,180,80),
"rohan" : ( 45,440,180,80),
"wysoka przelecz" : (455,360,150,80),
"gory mgliste" : (305,360,150,80),
"moria" : (155,360,150,80),
"wrota rohanu" : ( 5,360,150,80),
"rhudaur" : (405,280,180,80),
"eregion" : (225,280,180,80),
"enedwaith" : ( 45,280,180,80),
"cardolan" : (135,200,180,80),
"arthedain" : (315,200,180,80),
"shire" : (225,120,180,80) }
self.pawns_qu = {
"mordor" : [],
"dagorlad" : [],
"gondor" : [],
"mroczna puszcza" : [],
"fangorn" : [],
"rohan" : [],
"wysoka przelecz" : [],
"gory mgliste" : [],
"moria" : [],
"wrota rohanu" : [],
"rhudaur" : [],
"eregion" : [],
"enedwaith" : [],
"cardolan" : [],
"arthedain" : [],
"shire" : [] }
self.live = [[]]
self.dead = [[]]
"""
9: troll0
5: balrog0, szeloba0, czarnoksieznik0, gandalf1
4: saruman0, aragorn1
3: latajacy nazgul0, czarny jezdziec0, legolas1, gimli1
2: ork0, warg0, sam1, merry1
1: frodo1, pippin1
0: boromir1
"""
# being [ [player, number, character, power, land, position]
self.b_live = [
[0, "balrog0", "none", (0,0), 5 ],
[1, "szeloba0", "none", (0,0), 5 ],
[2, "czarnoksieznik0", "none", (0,0), 5 ],
[3, "latajacy nazgul0", "none", (0,0), 3 ],
[4, "czarny jezdziec0", "none", (0,0), 3 ],
[5, "saruman0", "none", (0,0), 4 ],
[6, "ork0", "none", (0,0), 2 ],
[7, "warg0", "none", (0,0), 2 ],
[8, "troll0", "none", (0,0), 9 ] ]
self.w_live = [
[0, "frodo1", "none", (0,0), 1 ],
[1, "sam1", "none", (0,0), 2 ],
[2, "pippin1", "none", (0,0), 1 ],
[3, "merry1", "none", (0,0), 2 ],
[4, "gandalf1", "none", (0,0), 5 ],
[5, "aragorn1", "none", (0,0), 4 ],
[6, "legolas1", "none", (0,0), 3 ],
[7, "gimli1", "none", (0,0), 3 ],
[8, "boromir1", "none", (0,0), 0 ] ]
self.b_dead = []
self.w_dead = []
self.cards = []
self.b_cards = [[0, 1], [1, 2], [2, 3], [3, 4], [4, 5], [5, 6],
[6, "magia"], [7, "ucieczka"], [8, "oko saurona"] ]
self.w_cards = [[0, 1], [1, 2], [2, 3], [3, 4], [4, 5], [5, "magia"],
[6, "odwrot"], [7, "poswiecenie"], [8, "plaszcz elfow"] ]
self.b_wasted_cards = []
self.w_wasted_cards = []
self.live_backup = []
self.pawns_qu_backup = {}
| gpl-2.0 |
jimmyraywv/cloud-custodian | c7n/resources/asg.py | 1 | 55595 | # Copyright 2015-2017 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
from botocore.client import ClientError
from collections import Counter
from concurrent.futures import as_completed
from datetime import datetime, timedelta
from dateutil.parser import parse
from dateutil.tz import tzutc
import logging
import itertools
import time
from c7n.actions import Action, ActionRegistry
from c7n.filters import (
FilterRegistry, ValueFilter, AgeFilter, Filter, FilterValidationError,
OPERATORS)
from c7n.filters.offhours import OffHour, OnHour
import c7n.filters.vpc as net_filters
from c7n.manager import resources
from c7n import query
from c7n.tags import TagActionFilter, DEFAULT_TAG, TagCountFilter, TagTrim
from c7n.utils import (
local_session, type_schema, chunks, get_retry, worker)
log = logging.getLogger('custodian.asg')
filters = FilterRegistry('asg.filters')
actions = ActionRegistry('asg.actions')
filters.register('offhour', OffHour)
filters.register('onhour', OnHour)
filters.register('tag-count', TagCountFilter)
filters.register('marked-for-op', TagActionFilter)
@resources.register('asg')
class ASG(query.QueryResourceManager):
class resource_type(object):
service = 'autoscaling'
type = 'autoScalingGroup'
id = name = 'AutoScalingGroupName'
date = 'CreatedTime'
dimension = 'AutoScalingGroupName'
enum_spec = ('describe_auto_scaling_groups', 'AutoScalingGroups', None)
filter_name = 'AutoScalingGroupNames'
filter_type = 'list'
config_type = 'AWS::AutoScaling::AutoScalingGroup'
default_report_fields = (
'AutoScalingGroupName',
'CreatedTime',
'LaunchConfigurationName',
'count:Instances',
'DesiredCapacity',
'HealthCheckType',
'list:LoadBalancerNames',
)
filter_registry = filters
action_registry = actions
retry = staticmethod(get_retry(('ResourceInUse', 'Throttling',)))
class LaunchConfigFilterBase(object):
"""Mixin base class for querying asg launch configs."""
permissions = ("autoscaling:DescribeLaunchConfigurations",)
configs = None
def initialize(self, asgs):
"""Get launch configs for the set of asgs"""
config_names = set()
skip = []
for a in asgs:
# Per https://github.com/capitalone/cloud-custodian/issues/143
if 'LaunchConfigurationName' not in a:
skip.append(a)
continue
config_names.add(a['LaunchConfigurationName'])
for a in skip:
asgs.remove(a)
self.configs = {}
self.log.debug(
"Querying launch configs for filter %s",
self.__class__.__name__)
configs = self.manager.get_resource_manager(
'launch-config').resources()
self.configs = {
cfg['LaunchConfigurationName']: cfg for cfg in configs}
@filters.register('security-group')
class SecurityGroupFilter(
net_filters.SecurityGroupFilter, LaunchConfigFilterBase):
RelatedIdsExpression = ""
def get_permissions(self):
return ("autoscaling:DescribeLaunchConfigurations",
"ec2:DescribeSecurityGroups",)
def get_related_ids(self, asgs):
group_ids = set()
for asg in asgs:
cfg = self.configs.get(asg['LaunchConfigurationName'])
group_ids.update(cfg.get('SecurityGroups', ()))
return group_ids
def process(self, asgs, event=None):
self.initialize(asgs)
return super(SecurityGroupFilter, self).process(asgs, event)
@filters.register('subnet')
class SubnetFilter(net_filters.SubnetFilter):
RelatedIdsExpression = ""
def get_related_ids(self, asgs):
subnet_ids = set()
for asg in asgs:
subnet_ids.update(
[sid.strip() for sid in asg.get('VPCZoneIdentifier', '').split(',')])
return subnet_ids
filters.register('network-location', net_filters.NetworkLocation)
@filters.register('launch-config')
class LaunchConfigFilter(ValueFilter, LaunchConfigFilterBase):
"""Filter asg by launch config attributes.
:example:
.. code-block: yaml
policies:
- name: launch-config-public-ip
resource: asg
filters:
- type: launch-config
key: AssociatePublicIpAddress
value: true
"""
schema = type_schema(
'launch-config', rinherit=ValueFilter.schema)
permissions = ("autoscaling:DescribeLaunchConfigurations",)
def process(self, asgs, event=None):
self.initialize(asgs)
return super(LaunchConfigFilter, self).process(asgs, event)
def __call__(self, asg):
# Active launch configs can be deleted..
cfg = self.configs.get(asg['LaunchConfigurationName'])
return self.match(cfg)
class ConfigValidFilter(Filter, LaunchConfigFilterBase):
def get_permissions(self):
return list(itertools.chain([
self.manager.get_resource_manager(m).get_permissions()
for m in ('subnet', 'security-group', 'key-pair', 'elb',
'app-elb-target-group', 'ebs-snapshot', 'ami')]))
def validate(self):
if self.manager.data.get('mode'):
raise FilterValidationError(
"invalid-config makes too many queries to be run in lambda")
return self
def initialize(self, asgs):
super(ConfigValidFilter, self).initialize(asgs)
# pylint: disable=attribute-defined-outside-init
self.subnets = self.get_subnets()
self.security_groups = self.get_security_groups()
self.key_pairs = self.get_key_pairs()
self.elbs = self.get_elbs()
self.appelb_target_groups = self.get_appelb_target_groups()
self.snapshots = self.get_snapshots()
self.images, self.image_snaps = self.get_images()
def get_subnets(self):
manager = self.manager.get_resource_manager('subnet')
return set([s['SubnetId'] for s in manager.resources()])
def get_security_groups(self):
manager = self.manager.get_resource_manager('security-group')
return set([s['GroupId'] for s in manager.resources()])
def get_key_pairs(self):
manager = self.manager.get_resource_manager('key-pair')
return set([k['KeyName'] for k in manager.resources()])
def get_elbs(self):
manager = self.manager.get_resource_manager('elb')
return set([e['LoadBalancerName'] for e in manager.resources()])
def get_appelb_target_groups(self):
manager = self.manager.get_resource_manager('app-elb-target-group')
return set([a['TargetGroupArn'] for a in manager.resources()])
def get_images(self):
manager = self.manager.get_resource_manager('ami')
images = set()
image_snaps = set()
image_ids = list({lc['ImageId'] for lc in self.configs.values()})
# Pull account images, we should be able to utilize cached values,
# drawn down the image population to just images not in the account.
account_images = [
i for i in manager.resources() if i['ImageId'] in image_ids]
account_image_ids = {i['ImageId'] for i in account_images}
image_ids = [image_id for image_id in image_ids
if image_id not in account_image_ids]
# To pull third party images, we explicitly use a describe
# source without any cache.
#
# Can't use a config source since it won't have state for
# third party ami, we auto propagate source normally, so we
# explicitly pull a describe source. Can't use a cache either
# as their not in the account.
#
while image_ids:
try:
amis = manager.get_source('describe').get_resources(
image_ids, cache=False)
account_images.extend(amis)
break
except ClientError as e:
msg = e.response['Error']['Message']
if e.response['Error']['Code'] != 'InvalidAMIID.NotFound':
raise
for n in msg[msg.find('[') + 1: msg.find(']')].split(','):
image_ids.remove(n.strip())
for a in account_images:
images.add(a['ImageId'])
# Capture any snapshots, images strongly reference their
# snapshots, and some of these will be third party in the
# case of a third party image.
for bd in a.get('BlockDeviceMappings', ()):
if 'Ebs' not in bd or 'SnapshotId' not in bd['Ebs']:
continue
image_snaps.add(bd['Ebs']['SnapshotId'].strip())
return images, image_snaps
def get_snapshots(self):
manager = self.manager.get_resource_manager('ebs-snapshot')
return set([s['SnapshotId'] for s in manager.resources()])
def process(self, asgs, event=None):
self.initialize(asgs)
return super(ConfigValidFilter, self).process(asgs, event)
def get_asg_errors(self, asg):
errors = []
subnets = asg.get('VPCZoneIdentifier', '').split(',')
for subnet in subnets:
subnet = subnet.strip()
if subnet not in self.subnets:
errors.append(('invalid-subnet', subnet))
for elb in asg['LoadBalancerNames']:
elb = elb.strip()
if elb not in self.elbs:
errors.append(('invalid-elb', elb))
for appelb_target in asg.get('TargetGroupARNs', []):
appelb_target = appelb_target.strip()
if appelb_target not in self.appelb_target_groups:
errors.append(('invalid-appelb-target-group', appelb_target))
cfg_id = asg.get(
'LaunchConfigurationName', asg['AutoScalingGroupName'])
cfg_id = cfg_id.strip()
cfg = self.configs.get(cfg_id)
if cfg is None:
errors.append(('invalid-config', cfg_id))
self.log.debug(
"asg:%s no launch config found" % asg['AutoScalingGroupName'])
asg['Invalid'] = errors
return True
for sg in cfg['SecurityGroups']:
sg = sg.strip()
if sg not in self.security_groups:
errors.append(('invalid-security-group', sg))
if cfg['KeyName'] and cfg['KeyName'].strip() not in self.key_pairs:
errors.append(('invalid-key-pair', cfg['KeyName']))
if cfg['ImageId'].strip() not in self.images:
errors.append(('invalid-image', cfg['ImageId']))
for bd in cfg['BlockDeviceMappings']:
if 'Ebs' not in bd or 'SnapshotId' not in bd['Ebs']:
continue
snapshot_id = bd['Ebs']['SnapshotId'].strip()
if snapshot_id in self.image_snaps:
continue
if snapshot_id not in self.snapshots:
errors.append(('invalid-snapshot', bd['Ebs']['SnapshotId']))
return errors
@filters.register('valid')
class ValidConfigFilter(ConfigValidFilter):
"""Filters autoscale groups to find those that are structurally valid.
This operates as the inverse of the invalid filter for multi-step
workflows.
See details on the invalid filter for a list of checks made.
:example:
.. code-base: yaml
policies:
- name: asg-valid-config
resource: asg
filters:
- valid
"""
schema = type_schema('valid')
def __call__(self, asg):
errors = self.get_asg_errors(asg)
return not bool(errors)
@filters.register('invalid')
class InvalidConfigFilter(ConfigValidFilter):
"""Filter autoscale groups to find those that are structurally invalid.
Structurally invalid means that the auto scale group will not be able
to launch an instance succesfully as the configuration has
- invalid subnets
- invalid security groups
- invalid key pair name
- invalid launch config volume snapshots
- invalid amis
- invalid health check elb (slower)
Internally this tries to reuse other resource managers for better
cache utilization.
:example:
.. code-base: yaml
policies:
- name: asg-invalid-config
resource: asg
filters:
- invalid
"""
schema = type_schema('invalid')
def __call__(self, asg):
errors = self.get_asg_errors(asg)
if errors:
asg['Invalid'] = errors
return True
@filters.register('not-encrypted')
class NotEncryptedFilter(Filter, LaunchConfigFilterBase):
"""Check if an ASG is configured to have unencrypted volumes.
Checks both the ami snapshots and the launch configuration.
:example:
.. code-block: yaml
policies:
- name: asg-unencrypted
resource: asg
filters:
- type: not-encrypted
exclude_image: true
"""
schema = type_schema('not-encrypted', exclude_image={'type': 'boolean'})
permissions = (
'ec2:DescribeImages',
'ec2:DescribeSnapshots',
'autoscaling:DescribeLaunchConfigurations')
images = unencrypted_configs = unencrypted_images = None
# TODO: resource-manager, notfound err mgr
def process(self, asgs, event=None):
self.initialize(asgs)
return super(NotEncryptedFilter, self).process(asgs, event)
def __call__(self, asg):
cfg = self.configs.get(asg['LaunchConfigurationName'])
if not cfg:
self.log.warning(
"ASG %s instances: %d has missing config: %s",
asg['AutoScalingGroupName'], len(asg['Instances']),
asg['LaunchConfigurationName'])
return False
unencrypted = []
if (not self.data.get('exclude_image') and cfg['ImageId'] in self.unencrypted_images):
unencrypted.append('Image')
if cfg['LaunchConfigurationName'] in self.unencrypted_configs:
unencrypted.append('LaunchConfig')
if unencrypted:
asg['Unencrypted'] = unencrypted
return bool(unencrypted)
def initialize(self, asgs):
super(NotEncryptedFilter, self).initialize(asgs)
ec2 = local_session(self.manager.session_factory).client('ec2')
self.unencrypted_images = self.get_unencrypted_images(ec2)
self.unencrypted_configs = self.get_unencrypted_configs(ec2)
def _fetch_images(self, ec2, image_ids):
while True:
try:
return ec2.describe_images(ImageIds=list(image_ids))
except ClientError as e:
if e.response['Error']['Code'] == 'InvalidAMIID.NotFound':
msg = e.response['Error']['Message']
e_ami_ids = [
e_ami_id.strip() for e_ami_id
in msg[msg.find("'[") + 2:msg.rfind("]'")].split(',')]
self.log.warning(
"asg:not-encrypted filter image not found %s",
e_ami_ids)
for e_ami_id in e_ami_ids:
image_ids.remove(e_ami_id)
continue
raise
def get_unencrypted_images(self, ec2):
"""retrieve images which have unencrypted snapshots referenced."""
image_ids = set()
for cfg in self.configs.values():
image_ids.add(cfg['ImageId'])
self.log.debug("querying %d images", len(image_ids))
results = self._fetch_images(ec2, image_ids)
self.images = {i['ImageId']: i for i in results['Images']}
unencrypted_images = set()
for i in self.images.values():
for bd in i['BlockDeviceMappings']:
if 'Ebs' in bd and not bd['Ebs'].get('Encrypted'):
unencrypted_images.add(i['ImageId'])
break
return unencrypted_images
def get_unencrypted_configs(self, ec2):
"""retrieve configs that have unencrypted ebs voluems referenced."""
unencrypted_configs = set()
snaps = {}
for cid, c in self.configs.items():
image = self.images.get(c['ImageId'])
# image deregistered/unavailable
if image is not None:
image_block_devs = {
bd['DeviceName']: bd['Ebs']
for bd in image['BlockDeviceMappings'] if 'Ebs' in bd}
else:
image_block_devs = {}
for bd in c['BlockDeviceMappings']:
if 'Ebs' not in bd:
continue
# Launch configs can shadow image devices, images have
# precedence.
if bd['DeviceName'] in image_block_devs:
continue
if 'SnapshotId' in bd['Ebs']:
snaps.setdefault(
bd['Ebs']['SnapshotId'].strip(), []).append(cid)
elif not bd['Ebs'].get('Encrypted'):
unencrypted_configs.add(cid)
if not snaps:
return unencrypted_configs
self.log.debug("querying %d snapshots", len(snaps))
for s in self.get_snapshots(ec2, list(snaps.keys())):
if not s.get('Encrypted'):
unencrypted_configs.update(snaps[s['SnapshotId']])
return unencrypted_configs
def get_snapshots(self, ec2, snap_ids):
"""get snapshots corresponding to id, but tolerant of missing."""
while True:
try:
result = ec2.describe_snapshots(SnapshotIds=snap_ids)
except ClientError as e:
if e.response['Error']['Code'] == 'InvalidSnapshot.NotFound':
msg = e.response['Error']['Message']
e_snap_id = msg[msg.find("'") + 1:msg.rfind("'")]
self.log.warning("Snapshot not found %s" % e_snap_id)
snap_ids.remove(e_snap_id)
continue
raise
else:
return result.get('Snapshots', ())
@filters.register('image-age')
class ImageAgeFilter(AgeFilter, LaunchConfigFilterBase):
"""Filter asg by image age (in days).
:example:
.. code-block: yaml
policies:
- name: asg-older-image
resource: asg
filters:
- type: image-age
days: 90
op: ge
"""
permissions = (
"ec2:DescribeImages",
"autoscaling:DescribeLaunchConfigurations")
date_attribute = "CreationDate"
schema = type_schema(
'image-age',
op={'type': 'string', 'enum': list(OPERATORS.keys())},
days={'type': 'number'})
def process(self, asgs, event=None):
self.initialize(asgs)
return super(ImageAgeFilter, self).process(asgs, event)
def initialize(self, asgs):
super(ImageAgeFilter, self).initialize(asgs)
image_ids = set()
for cfg in self.configs.values():
image_ids.add(cfg['ImageId'])
results = self.manager.get_resource_manager('ami').resources()
self.images = {i['ImageId']: i for i in results}
def get_resource_date(self, i):
cfg = self.configs[i['LaunchConfigurationName']]
ami = self.images.get(cfg['ImageId'], {})
return parse(ami.get(
self.date_attribute, "2000-01-01T01:01:01.000Z"))
@filters.register('image')
class ImageFilter(ValueFilter, LaunchConfigFilterBase):
"""Filter asg by image
:example:
.. code-block: yaml
policies:
- name: asg-image-tag
resource: asg
filters:
- type: image
value: "tag:ImageTag"
key: "TagValue"
op: eq
"""
permissions = (
"ec2:DescribeImages",
"autoscaling:DescribeLaunchConfigurations")
schema = type_schema('image', rinherit=ValueFilter.schema)
def process(self, asgs, event=None):
self.initialize(asgs)
return super(ImageFilter, self).process(asgs, event)
def initialize(self, asgs):
super(ImageFilter, self).initialize(asgs)
image_ids = set()
for cfg in self.configs.values():
image_ids.add(cfg['ImageId'])
results = self.manager.get_resource_manager('ami').resources()
base_image_map = {i['ImageId']: i for i in results}
resources = {i: base_image_map[i] for i in image_ids if i in base_image_map}
missing = list(set(image_ids) - set(resources.keys()))
if missing:
loaded = self.manager.get_resource_manager('ami').get_resources(missing, False)
resources.update({image['ImageId']: image for image in loaded})
self.images = resources
def __call__(self, i):
cfg = self.configs[i['LaunchConfigurationName']]
image = self.images.get(cfg['ImageId'], {})
# Finally, if we have no image...
if not image:
self.log.warning(
"Could not locate image for instance:%s ami:%s" % (
i['InstanceId'], i["ImageId"]))
# Match instead on empty skeleton?
return False
return self.match(image)
@filters.register('vpc-id')
class VpcIdFilter(ValueFilter):
"""Filters ASG based on the VpcId
This filter is available as a ValueFilter as the vpc-id is not natively
associated to the results from describing the autoscaling groups.
:example:
.. code-block: yaml
policies:
- name: asg-vpc-xyz
resource: asg
filters:
- type: vpc-id
value: vpc-12ab34cd
"""
schema = type_schema(
'vpc-id', rinherit=ValueFilter.schema)
schema['properties'].pop('key')
permissions = ('ec2:DescribeSubnets',)
# TODO: annotation
def __init__(self, data, manager=None):
super(VpcIdFilter, self).__init__(data, manager)
self.data['key'] = 'VpcId'
def process(self, asgs, event=None):
subnets = {}
for a in asgs:
subnet_ids = a.get('VPCZoneIdentifier', '')
if not subnet_ids:
continue
subnets.setdefault(subnet_ids.split(',')[0], []).append(a)
subnet_manager = self.manager.get_resource_manager('subnet')
# Invalid subnets on asgs happen, so query all
all_subnets = {s['SubnetId']: s for s in subnet_manager.resources()}
for s, s_asgs in subnets.items():
if s not in all_subnets:
self.log.warning(
"invalid subnet %s for asgs: %s",
s, [a['AutoScalingGroupName'] for a in s_asgs])
continue
for a in s_asgs:
a['VpcId'] = all_subnets[s]['VpcId']
return super(VpcIdFilter, self).process(asgs)
@actions.register('tag-trim')
class GroupTagTrim(TagTrim):
"""Action to trim the number of tags to avoid hitting tag limits
:example:
.. code-block: yaml
policies:
- name: asg-tag-trim
resource: asg
filters:
- type: tag-count
count: 10
actions:
- type: tag-trim
space: 1
preserve:
- OwnerName
- OwnerContact
"""
max_tag_count = 10
permissions = ('autoscaling:DeleteTags',)
def process_tag_removal(self, resource, candidates):
client = local_session(
self.manager.session_factory).client('autoscaling')
tags = []
for t in candidates:
tags.append(
dict(Key=t, ResourceType='auto-scaling-group',
ResourceId=resource['AutoScalingGroupName']))
client.delete_tags(Tags=tags)
@filters.register('capacity-delta')
class CapacityDelta(Filter):
"""Filter returns ASG that have less instances than desired or required
:example:
.. code-block: yaml
policies:
- name: asg-capacity-delta
resource: asg
filters:
- capacity-delta
"""
schema = type_schema('capacity-delta')
def process(self, asgs, event=None):
return [a for a in asgs
if len(a['Instances']) < a['DesiredCapacity'] or
len(a['Instances']) < a['MinSize']]
@actions.register('resize')
class Resize(Action):
"""Action to resize the min/max/desired instances in an ASG
There are several ways to use this action:
1. set min/desired to current running instances
.. code-block: yaml
policies:
- name: asg-resize
resource: asg
filters:
- capacity-delta
actions:
- type: resize
desired-size: "current"
2. apply a fixed resize of min, max or desired, optionally saving the
previous values to a named tag (for restoring later):
.. code-block: yaml
policies:
- name: offhours-asg-off
resource: asg
filters:
- type: offhour
offhour: 19
default_tz: bst
actions:
- type: resize
min-size: 0
desired-size: 0
save-options-tag: OffHoursPrevious
3. restore previous values for min/max/desired from a tag:
.. code-block: yaml
policies:
- name: offhours-asg-on
resource: asg
filters:
- type: onhour
onhour: 8
default_tz: bst
actions:
- type: resize
restore-options-tag: OffHoursPrevious
"""
schema = type_schema(
'resize',
**{
'min-size': {'type': 'integer', 'minimum': 0},
'max-size': {'type': 'integer', 'minimum': 0},
'desired-size': {
"anyOf": [
{'enum': ["current"]},
{'type': 'integer', 'minimum': 0}
]
},
# support previous key name with underscore
'desired_size': {
"anyOf": [
{'enum': ["current"]},
{'type': 'integer', 'minimum': 0}
]
},
'save-options-tag': {'type': 'string'},
'restore-options-tag': {'type': 'string'},
}
)
permissions = (
'autoscaling:UpdateAutoScalingGroup',
'autoscaling:CreateOrUpdateTags'
)
def validate(self):
# if self.data['desired_size'] != 'current':
# raise FilterValidationError(
# "only resizing desired/min to current capacity is supported")
return self
def process(self, asgs):
# ASG parameters to save to/restore from a tag
asg_params = ['MinSize', 'MaxSize', 'DesiredCapacity']
# support previous param desired_size when desired-size is not present
if 'desired_size' in self.data and 'desired-size' not in self.data:
self.data['desired-size'] = self.data['desired_size']
client = local_session(self.manager.session_factory).client(
'autoscaling')
for a in asgs:
tag_map = {t['Key']: t['Value'] for t in a.get('Tags', [])}
update = {}
current_size = len(a['Instances'])
if 'restore-options-tag' in self.data:
# we want to restore all ASG size params from saved data
log.debug('Want to restore ASG %s size from tag %s' %
(a['AutoScalingGroupName'], self.data['restore-options-tag']))
if self.data['restore-options-tag'] in tag_map:
for field in tag_map[self.data['restore-options-tag']].split(';'):
(param, value) = field.split('=')
if param in asg_params:
update[param] = int(value)
else:
# we want to resize, parse provided params
if 'min-size' in self.data:
update['MinSize'] = self.data['min-size']
if 'max-size' in self.data:
update['MaxSize'] = self.data['max-size']
if 'desired-size' in self.data:
if self.data['desired-size'] == 'current':
update['DesiredCapacity'] = min(current_size, a['DesiredCapacity'])
if 'MinSize' not in update:
# unless we were given a new value for min_size then
# ensure it is at least as low as current_size
update['MinSize'] = min(current_size, a['MinSize'])
elif type(self.data['desired-size']) == int:
update['DesiredCapacity'] = self.data['desired-size']
if update:
log.debug('ASG %s size: current=%d, min=%d, max=%d, desired=%d'
% (a['AutoScalingGroupName'], current_size, a['MinSize'],
a['MaxSize'], a['DesiredCapacity']))
if 'save-options-tag' in self.data:
# save existing ASG params to a tag before changing them
log.debug('Saving ASG %s size to tag %s' %
(a['AutoScalingGroupName'], self.data['save-options-tag']))
tags = [dict(
Key=self.data['save-options-tag'],
PropagateAtLaunch=False,
Value=';'.join({'%s=%d' % (param, a[param]) for param in asg_params}),
ResourceId=a['AutoScalingGroupName'],
ResourceType='auto-scaling-group',
)]
self.manager.retry(client.create_or_update_tags, Tags=tags)
log.debug('Resizing ASG %s with %s' % (a['AutoScalingGroupName'],
str(update)))
self.manager.retry(
client.update_auto_scaling_group,
AutoScalingGroupName=a['AutoScalingGroupName'],
**update)
else:
log.debug('nothing to resize')
@actions.register('remove-tag')
@actions.register('untag')
@actions.register('unmark')
class RemoveTag(Action):
"""Action to remove tag/tags from an ASG
:example:
.. code-block: yaml
policies:
- name: asg-remove-unnecessary-tags
resource: asg
filters:
- "tag:UnnecessaryTag": present
actions:
- type: remove-tag
key: UnnecessaryTag
"""
schema = type_schema(
'remove-tag',
aliases=('untag', 'unmark'),
key={'type': 'string'})
permissions = ('autoscaling:DeleteTags',)
batch_size = 1
def process(self, asgs):
error = False
key = self.data.get('key', DEFAULT_TAG)
with self.executor_factory(max_workers=3) as w:
futures = {}
for asg_set in chunks(asgs, self.batch_size):
futures[w.submit(self.process_asg_set, asg_set, key)] = asg_set
for f in as_completed(futures):
asg_set = futures[f]
if f.exception():
error = f.exception()
self.log.exception(
"Exception untagging asg:%s tag:%s error:%s" % (
", ".join([a['AutoScalingGroupName']
for a in asg_set]),
self.data.get('key', DEFAULT_TAG),
f.exception()))
if error:
raise error
def process_asg_set(self, asgs, key):
session = local_session(self.manager.session_factory)
client = session.client('autoscaling')
tags = [dict(
Key=key, ResourceType='auto-scaling-group',
ResourceId=a['AutoScalingGroupName']) for a in asgs]
self.manager.retry(client.delete_tags, Tags=tags)
@actions.register('tag')
@actions.register('mark')
class Tag(Action):
"""Action to add a tag to an ASG
The *propagate* parameter can be used to specify that the tag being added
will need to be propagated down to each ASG instance associated or simply
to the ASG itself.
:example:
.. code-block: yaml
policies:
- name: asg-add-owner-tag
resource: asg
filters:
- "tag:OwnerName": absent
actions:
- type: tag
key: OwnerName
value: OwnerName
propagate: true
"""
schema = type_schema(
'tag',
key={'type': 'string'},
value={'type': 'string'},
# Backwards compatibility
tag={'type': 'string'},
msg={'type': 'string'},
propagate={'type': 'boolean'},
aliases=('mark',)
)
permissions = ('autoscaling:CreateOrUpdateTags',)
batch_size = 1
def process(self, asgs):
key = self.data.get('key', self.data.get('tag', DEFAULT_TAG))
value = self.data.get(
'value', self.data.get(
'msg', 'AutoScaleGroup does not meet policy guidelines'))
return self.tag(asgs, key, value)
def tag(self, asgs, key, value):
error = None
with self.executor_factory(max_workers=3) as w:
futures = {}
for asg_set in chunks(asgs, self.batch_size):
futures[w.submit(
self.process_asg_set, asg_set, key, value)] = asg_set
for f in as_completed(futures):
asg_set = futures[f]
if f.exception():
self.log.exception(
"Exception untagging tag:%s error:%s asg:%s" % (
self.data.get('key', DEFAULT_TAG),
f.exception(),
", ".join([a['AutoScalingGroupName']
for a in asg_set])))
if error:
raise error
def process_asg_set(self, asgs, key, value):
session = local_session(self.manager.session_factory)
client = session.client('autoscaling')
propagate = self.data.get('propagate_launch', True)
tags = [
dict(Key=key, ResourceType='auto-scaling-group', Value=value,
PropagateAtLaunch=propagate,
ResourceId=a['AutoScalingGroupName']) for a in asgs]
self.manager.retry(client.create_or_update_tags, Tags=tags)
@actions.register('propagate-tags')
class PropagateTags(Action):
"""Propagate tags to an asg instances.
In AWS changing an asg tag does not propagate to instances.
This action exists to do that, and can also trim older tags
not present on the asg anymore that are present on instances.
:example:
.. code-block: yaml
policies:
- name: asg-propagate-required
resource: asg
filters:
- "tag:OwnerName": present
actions:
- type: propagate-tags
tags:
- OwnerName
"""
schema = type_schema(
'propagate-tags',
tags={'type': 'array', 'items': {'type': 'string'}},
trim={'type': 'boolean'})
permissions = ('ec2:DeleteTags', 'ec2:CreateTags')
def validate(self):
if not isinstance(self.data.get('tags', []), (list, tuple)):
raise ValueError("No tags specified")
return self
def process(self, asgs):
if not asgs:
return
if self.data.get('trim', False):
self.instance_map = self.get_instance_map(asgs)
with self.executor_factory(max_workers=10) as w:
instance_count = sum(list(w.map(self.process_asg, asgs)))
self.log.info("Applied tags to %d instances" % instance_count)
def process_asg(self, asg):
client = local_session(self.manager.session_factory).client('ec2')
instance_ids = [i['InstanceId'] for i in asg['Instances']]
tag_map = {t['Key']: t['Value'] for t in asg.get('Tags', [])
if t['PropagateAtLaunch'] and not t['Key'].startswith('aws:')}
if self.data.get('tags'):
tag_map = {
k: v for k, v in tag_map.items()
if k in self.data['tags']}
tag_set = set(tag_map)
if self.data.get('trim', False):
instances = [self.instance_map[i] for i in instance_ids]
self.prune_instance_tags(client, asg, tag_set, instances)
if not self.manager.config.dryrun:
client.create_tags(
Resources=instance_ids,
Tags=[{'Key': k, 'Value': v} for k, v in tag_map.items()])
return len(instance_ids)
def prune_instance_tags(self, client, asg, tag_set, instances):
"""Remove tags present on all asg instances which are not present
on the asg.
"""
instance_tags = Counter()
instance_count = len(instances)
remove_tags = []
extra_tags = []
for i in instances:
instance_tags.update([
t['Key'] for t in i['Tags']
if not t['Key'].startswith('aws:')])
for k, v in instance_tags.items():
if not v >= instance_count:
extra_tags.append(k)
continue
if k not in tag_set:
remove_tags.append(k)
if remove_tags:
log.debug("Pruning asg:%s instances:%d of old tags: %s" % (
asg['AutoScalingGroupName'], instance_count, remove_tags))
if extra_tags:
log.debug("Asg: %s has uneven tags population: %s" % (
asg['AutoScalingGroupName'], instance_tags))
# Remove orphan tags
remove_tags.extend(extra_tags)
if not self.manager.config.dryrun:
client.delete_tags(
Resources=[i['InstanceId'] for i in instances],
Tags=[{'Key': t} for t in remove_tags])
def get_instance_map(self, asgs):
instance_ids = [
i['InstanceId'] for i in
list(itertools.chain(*[
g['Instances']
for g in asgs if g['Instances']]))]
if not instance_ids:
return {}
return {i['InstanceId']: i for i in
self.manager.get_resource_manager(
'ec2').get_resources(instance_ids)}
@actions.register('rename-tag')
class RenameTag(Action):
"""Rename a tag on an AutoScaleGroup.
:example:
.. code-block: yaml
policies:
- name: asg-rename-owner-tag
resource: asg
filters:
- "tag:OwnerNames": present
actions:
- type: rename-tag
propagate: true
source: OwnerNames
dest: OwnerName
"""
schema = type_schema(
'rename-tag', required=['source', 'dest'],
propagate={'type': 'boolean'},
source={'type': 'string'},
dest={'type': 'string'})
def get_permissions(self):
permissions = (
'autoscaling:CreateOrUpdateTags',
'autoscaling:DeleteTags')
if self.data.get('propagate', True):
permissions += ('ec2:CreateTags', 'ec2:DeleteTags')
return permissions
def process(self, asgs):
source = self.data.get('source')
dest = self.data.get('dest')
count = len(asgs)
filtered = []
for a in asgs:
for t in a.get('Tags'):
if t['Key'] == source:
filtered.append(a)
break
asgs = filtered
self.log.info("Filtered from %d asgs to %d", count, len(asgs))
self.log.info(
"Renaming %s to %s on %d asgs", source, dest, len(filtered))
with self.executor_factory(max_workers=3) as w:
list(w.map(self.process_asg, asgs))
def process_asg(self, asg):
"""Move source tag to destination tag.
Check tag count on asg
Create new tag tag
Delete old tag
Check tag count on instance
Create new tag
Delete old tag
"""
source_tag = self.data.get('source')
tag_map = {t['Key']: t for t in asg.get('Tags', [])}
source = tag_map[source_tag]
destination_tag = self.data.get('dest')
propagate = self.data.get('propagate', True)
client = local_session(
self.manager.session_factory).client('autoscaling')
# technically safer to create first, but running into
# max tags constraints, otherwise.
#
# delete_first = len([t for t in tag_map if not t.startswith('aws:')])
client.delete_tags(Tags=[
{'ResourceId': asg['AutoScalingGroupName'],
'ResourceType': 'auto-scaling-group',
'Key': source_tag,
'Value': source['Value']}])
client.create_or_update_tags(Tags=[
{'ResourceId': asg['AutoScalingGroupName'],
'ResourceType': 'auto-scaling-group',
'PropagateAtLaunch': propagate,
'Key': destination_tag,
'Value': source['Value']}])
if propagate:
self.propagate_instance_tag(source, destination_tag, asg)
def propagate_instance_tag(self, source, destination_tag, asg):
client = local_session(self.manager.session_factory).client('ec2')
client.delete_tags(
Resources=[i['InstanceId'] for i in asg['Instances']],
Tags=[{"Key": source['Key']}])
client.create_tags(
Resources=[i['InstanceId'] for i in asg['Instances']],
Tags=[{'Key': destination_tag, 'Value': source['Value']}])
@actions.register('mark-for-op')
class MarkForOp(Tag):
"""Action to create a delayed action for a later date
:example:
.. code-block: yaml
policies:
- name: asg-suspend-schedule
resource: asg
filters:
- type: value
key: MinSize
value: 2
actions:
- type: mark-for-op
tag: custodian_suspend
message: "Suspending: {op}@{action_date}"
op: suspend
days: 7
"""
schema = type_schema(
'mark-for-op',
op={'enum': ['suspend', 'resume', 'delete']},
key={'type': 'string'},
tag={'type': 'string'},
message={'type': 'string'},
days={'type': 'number', 'minimum': 0})
default_template = (
'AutoScaleGroup does not meet org policy: {op}@{action_date}')
def process(self, asgs):
msg_tmpl = self.data.get('message', self.default_template)
key = self.data.get('key', self.data.get('tag', DEFAULT_TAG))
op = self.data.get('op', 'suspend')
date = self.data.get('days', 4)
n = datetime.now(tz=tzutc())
stop_date = n + timedelta(days=date)
try:
msg = msg_tmpl.format(
op=op, action_date=stop_date.strftime('%Y/%m/%d'))
except Exception:
self.log.warning("invalid template %s" % msg_tmpl)
msg = self.default_template.format(
op=op, action_date=stop_date.strftime('%Y/%m/%d'))
self.log.info("Tagging %d asgs for %s on %s" % (
len(asgs), op, stop_date.strftime('%Y/%m/%d')))
self.tag(asgs, key, msg)
@actions.register('suspend')
class Suspend(Action):
"""Action to suspend ASG processes and instances
AWS ASG suspend/resume and process docs https://goo.gl/XYtKQ8
:example:
.. code-block: yaml
policies:
- name: asg-suspend-processes
resource: asg
filters:
- "tag:SuspendTag": present
actions:
- type: suspend
"""
permissions = ("autoscaling:SuspendProcesses", "ec2:StopInstances")
ASG_PROCESSES = [
"Launch",
"Terminate",
"HealthCheck",
"ReplaceUnhealthy",
"AZRebalance",
"AlarmNotification",
"ScheduledActions",
"AddToLoadBalancer"]
schema = type_schema(
'suspend',
exclude={
'type': 'array',
'title': 'ASG Processes to not suspend',
'items': {'enum': ASG_PROCESSES}})
ASG_PROCESSES = set(ASG_PROCESSES)
def process(self, asgs):
with self.executor_factory(max_workers=3) as w:
list(w.map(self.process_asg, asgs))
def process_asg(self, asg):
"""Multistep process to stop an asg aprori of setup
- suspend processes
- stop instances
"""
session = local_session(self.manager.session_factory)
asg_client = session.client('autoscaling')
processes = list(self.ASG_PROCESSES.difference(
self.data.get('exclude', ())))
try:
self.manager.retry(
asg_client.suspend_processes,
ScalingProcesses=processes,
AutoScalingGroupName=asg['AutoScalingGroupName'])
except ClientError as e:
if e.response['Error']['Code'] == 'ValidationError':
return
raise
ec2_client = session.client('ec2')
try:
instance_ids = [i['InstanceId'] for i in asg['Instances']]
if not instance_ids:
return
retry = get_retry((
'RequestLimitExceeded', 'Client.RequestLimitExceeded'))
retry(ec2_client.stop_instances, InstanceIds=instance_ids)
except ClientError as e:
if e.response['Error']['Code'] in (
'InvalidInstanceID.NotFound',
'IncorrectInstanceState'):
log.warning("Erroring stopping asg instances %s %s" % (
asg['AutoScalingGroupName'], e))
return
raise
@actions.register('resume')
class Resume(Action):
"""Resume a suspended autoscale group and its instances
Parameter 'delay' is the amount of time (in seconds) to wait between
resuming each instance within the ASG (default value: 30)
:example:
.. code-block: yaml
policies:
- name: asg-resume-processes
resource: asg
filters:
- "tag:Resume": present
actions:
- type: resume
delay: 300
"""
schema = type_schema('resume', delay={'type': 'number'})
permissions = ("autoscaling:ResumeProcesses", "ec2:StartInstances")
def process(self, asgs):
original_count = len(asgs)
asgs = [a for a in asgs if a['SuspendedProcesses']]
self.delay = self.data.get('delay', 30)
self.log.debug("Filtered from %d to %d suspended asgs",
original_count, len(asgs))
with self.executor_factory(max_workers=3) as w:
futures = {}
for a in asgs:
futures[w.submit(self.resume_asg_instances, a)] = a
for f in as_completed(futures):
if f.exception():
log.error("Traceback resume asg:%s instances error:%s" % (
futures[f]['AutoScalingGroupName'],
f.exception()))
continue
log.debug("Sleeping for asg health check grace")
time.sleep(self.delay)
with self.executor_factory(max_workers=3) as w:
futures = {}
for a in asgs:
futures[w.submit(self.resume_asg, a)] = a
for f in as_completed(futures):
if f.exception():
log.error("Traceback resume asg:%s error:%s" % (
futures[f]['AutoScalingGroupName'],
f.exception()))
def resume_asg_instances(self, asg):
"""Resume asg instances.
"""
session = local_session(self.manager.session_factory)
ec2_client = session.client('ec2')
instance_ids = [i['InstanceId'] for i in asg['Instances']]
if not instance_ids:
return
retry = get_retry((
'RequestLimitExceeded', 'Client.RequestLimitExceeded'))
retry(ec2_client.start_instances, InstanceIds=instance_ids)
def resume_asg(self, asg):
"""Resume asg processes.
"""
session = local_session(self.manager.session_factory)
asg_client = session.client('autoscaling')
self.manager.retry(
asg_client.resume_processes,
AutoScalingGroupName=asg['AutoScalingGroupName'])
@actions.register('delete')
class Delete(Action):
"""Action to delete an ASG
The 'force' parameter is needed when deleting an ASG that has instances
attached to it.
:example:
.. code-block: yaml
policies:
- name: asg-unencrypted
resource: asg
filters:
- type: not-encrypted
exclude_image: true
actions:
- type: delete
force: true
"""
schema = type_schema('delete', force={'type': 'boolean'})
permissions = ("autoscaling:DeleteAutoScalingGroup",)
def process(self, asgs):
with self.executor_factory(max_workers=3) as w:
list(w.map(self.process_asg, asgs))
@worker
def process_asg(self, asg):
force_delete = self.data.get('force', False)
if force_delete:
log.info('Forcing deletion of Auto Scaling group %s',
asg['AutoScalingGroupName'])
session = local_session(self.manager.session_factory)
asg_client = session.client('autoscaling')
try:
self.manager.retry(
asg_client.delete_auto_scaling_group,
AutoScalingGroupName=asg['AutoScalingGroupName'],
ForceDelete=force_delete)
except ClientError as e:
if e.response['Error']['Code'] == 'ValidationError':
log.warning("Erroring deleting asg %s %s",
asg['AutoScalingGroupName'], e)
return
raise
@resources.register('launch-config')
class LaunchConfig(query.QueryResourceManager):
class resource_type(object):
service = 'autoscaling'
type = 'launchConfiguration'
id = name = 'LaunchConfigurationName'
date = 'CreatedTime'
dimension = None
enum_spec = (
'describe_launch_configurations', 'LaunchConfigurations', None)
filter_name = 'LaunchConfigurationNames'
filter_type = 'list'
config_type = 'AWS::AutoScaling::LaunchConfiguration'
def get_source(self, source_type):
if source_type == 'describe':
return DescribeLaunchConfig(self)
elif source_type == 'config':
return query.ConfigSource(self)
raise ValueError('invalid source %s' % source_type)
class DescribeLaunchConfig(query.DescribeSource):
def augment(self, resources):
for r in resources:
r.pop('UserData', None)
return resources
@LaunchConfig.filter_registry.register('age')
class LaunchConfigAge(AgeFilter):
"""Filter ASG launch configuration by age (in days)
:example:
.. code-block: yaml
policies:
- name: asg-launch-config-old
resource: launch-config
filters:
- type: age
days: 90
op: ge
"""
date_attribute = "CreatedTime"
schema = type_schema(
'age',
op={'type': 'string', 'enum': list(OPERATORS.keys())},
days={'type': 'number'})
@LaunchConfig.filter_registry.register('unused')
class UnusedLaunchConfig(Filter):
"""Filters all launch configurations that are not in use but exist
:example:
.. code-block: yaml
policies:
- name: asg-unused-launch-config
resource: launch-config
filters:
- unused
"""
schema = type_schema('unused')
def get_permissions(self):
return self.manager.get_resource_manager('asg').get_permissions()
def process(self, configs, event=None):
asgs = self.manager.get_resource_manager('asg').resources()
self.used = set([
a.get('LaunchConfigurationName', a['AutoScalingGroupName'])
for a in asgs])
return super(UnusedLaunchConfig, self).process(configs)
def __call__(self, config):
return config['LaunchConfigurationName'] not in self.used
@LaunchConfig.action_registry.register('delete')
class LaunchConfigDelete(Action):
"""Filters all unused launch configurations
:example:
.. code-block: yaml
policies:
- name: asg-unused-launch-config-delete
resource: launch-config
filters:
- unused
actions:
- delete
"""
schema = type_schema('delete')
permissions = ("autoscaling:DeleteLaunchConfiguration",)
def process(self, configs):
with self.executor_factory(max_workers=2) as w:
list(w.map(self.process_config, configs))
@worker
def process_config(self, config):
session = local_session(self.manager.session_factory)
client = session.client('autoscaling')
try:
client.delete_launch_configuration(
LaunchConfigurationName=config[
'LaunchConfigurationName'])
except ClientError as e:
# Catch already deleted
if e.response['Error']['Code'] == 'ValidationError':
return
raise
| apache-2.0 |
uwdata/termite-treetm | web2py/gluon/contrib/minify/minify.py | 43 | 4663 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
High-level CSS and JS minification class for web2py.
Called by response.include_files()
Created by: Ross Peoples <ross.peoples@gmail.com>
Modified by: Massimo Di Pierro <massimo.dipierro@gmail.com>
"""
import cssmin
import jsmin
import os
import hashlib
import re
def read_binary_file(filename):
f = open(filename, 'rb')
data = f.read()
f.close()
return data
def write_binary_file(filename, data):
f = open(filename, 'wb')
f.write(data)
f.close()
def fix_links(css, static_path):
return re.sub(r'url\((["\'])\.\./', 'url(\\1' + static_path, css)
def minify(files, path_info, folder, optimize_css, optimize_js,
ignore_concat=[],
ignore_minify=['/jquery.js', '/anytime.js']):
"""
Input:
files: is a list of URLs to JS and CSS files (not repeated)
path_info: is the URL of a temp static folder
folder: is the application folder
optimize_css: is a string of the form 'concat|minify|inline'
optimize_js: is a string of the form 'concat|minify|inline'
(minify requires concat, inline requires concat also)
Returns a new list of:
- filename (absolute or relative, css or js, actual or temporary) or
- ('css:inline','...css..')
- ('js:inline','...js..')
"""
optimize_css = optimize_css or ''
optimize_js = optimize_js or ''
concat_css = 'concat' in optimize_css
minify_css = 'minify' in optimize_css
inline_css = 'inline' in optimize_css
concat_js = 'concat' in optimize_js
minify_js = 'minify' in optimize_js
inline_js = 'inline' in optimize_js
static_path, temp = path_info.rsplit('/', 1)
new_files = []
css = []
js = []
processed = []
for k, filename in enumerate(files):
if not filename.startswith('/') or \
any(filename.endswith(x)
for x in ignore_concat):
new_files.append(filename)
continue
abs_filename = os.path.join(
folder, 'static', filename[len(static_path) + 1:])
if filename.lower().endswith('.css'):
processed.append(filename)
spath_info, sfilename = \
path_info.split('/'), filename.split('/')
u = 0
for i, a in enumerate(sfilename):
try:
if a != spath_info[i]:
u = i
break
except:
pass
if concat_css:
contents = read_binary_file(abs_filename)
replacement = '/'.join(spath_info[:u]) + '/'
contents = fix_links(contents, replacement)
if minify_css:
css.append(cssmin.cssmin(contents))
else:
css.append(contents)
else:
css.append(filename)
elif filename.lower().endswith('.js'):
processed.append(filename)
if concat_js:
contents = read_binary_file(abs_filename)
if minify_js and \
not filename.endswith('.min.js') and \
not any(filename.endswith(x)
for x in ignore_minify):
js.append(jsmin.jsmin(contents))
else:
js.append(contents)
else:
js.append(filename)
dest_key = hashlib.md5(repr(processed)).hexdigest()
if css and concat_css:
css = '\n\n'.join(contents for contents in css)
if not inline_css:
temppath = os.path.join(folder, 'static', temp)
if not os.path.exists(temppath):
os.mkdir(temppath)
dest = "compressed_%s.css" % dest_key
tempfile = os.path.join(temppath, dest)
write_binary_file(tempfile, css)
css = path_info + '/%s' % dest
new_files.append(css)
else:
new_files.append(('css:inline', css))
else:
new_files += css
if js and concat_js:
js = '\n'.join(contents for contents in js)
if inline_js:
js = ('js:inline', js)
else:
temppath = os.path.join(folder, 'static', temp)
if not os.path.exists(temppath):
os.mkdir(temppath)
dest = "compressed_%s.js" % dest_key
tempfile = os.path.join(folder, 'static', temp, dest)
write_binary_file(tempfile, js)
js = path_info + '/%s' % dest
new_files.append(js)
else:
new_files += js
return new_files
| bsd-3-clause |
underyx/ansible-modules-core | cloud/digital_ocean/digital_ocean_sshkey.py | 37 | 4948 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: digital_ocean_sshkey
short_description: Create/delete an SSH key in DigitalOcean
description:
- Create/delete an SSH key.
version_added: "1.6"
options:
state:
description:
- Indicate desired state of the target.
default: present
choices: ['present', 'absent']
client_id:
description:
- DigitalOcean manager id.
api_key:
description:
- DigitalOcean api key.
id:
description:
- Numeric, the SSH key id you want to operate on.
name:
description:
- String, this is the name of an SSH key to create or destroy.
ssh_pub_key:
description:
- The public SSH key you want to add to your account.
notes:
- Two environment variables can be used, DO_CLIENT_ID and DO_API_KEY.
'''
EXAMPLES = '''
# Ensure a SSH key is present
# If a key matches this name, will return the ssh key id and changed = False
# If no existing key matches this name, a new key is created, the ssh key id is returned and changed = False
- digital_ocean_sshkey: >
state=present
name=my_ssh_key
ssh_pub_key='ssh-rsa AAAA...'
client_id=XXX
api_key=XXX
'''
import sys
import os
import time
try:
from dopy.manager import DoError, DoManager
except ImportError as e:
print "failed=True msg='dopy required for this module'"
sys.exit(1)
class TimeoutError(DoError):
def __init__(self, msg, id):
super(TimeoutError, self).__init__(msg)
self.id = id
class JsonfyMixIn(object):
def to_json(self):
return self.__dict__
class SSH(JsonfyMixIn):
manager = None
def __init__(self, ssh_key_json):
self.__dict__.update(ssh_key_json)
update_attr = __init__
def destroy(self):
self.manager.destroy_ssh_key(self.id)
return True
@classmethod
def setup(cls, client_id, api_key):
cls.manager = DoManager(client_id, api_key)
@classmethod
def find(cls, name):
if not name:
return False
keys = cls.list_all()
for key in keys:
if key.name == name:
return key
return False
@classmethod
def list_all(cls):
json = cls.manager.all_ssh_keys()
return map(cls, json)
@classmethod
def add(cls, name, key_pub):
json = cls.manager.new_ssh_key(name, key_pub)
return cls(json)
def core(module):
def getkeyordie(k):
v = module.params[k]
if v is None:
module.fail_json(msg='Unable to load %s' % k)
return v
try:
# params['client_id'] will be None even if client_id is not passed in
client_id = module.params['client_id'] or os.environ['DO_CLIENT_ID']
api_key = module.params['api_key'] or os.environ['DO_API_KEY']
except KeyError, e:
module.fail_json(msg='Unable to load %s' % e.message)
changed = True
state = module.params['state']
SSH.setup(client_id, api_key)
name = getkeyordie('name')
if state in ('present'):
key = SSH.find(name)
if key:
module.exit_json(changed=False, ssh_key=key.to_json())
key = SSH.add(name, getkeyordie('ssh_pub_key'))
module.exit_json(changed=True, ssh_key=key.to_json())
elif state in ('absent'):
key = SSH.find(name)
if not key:
module.exit_json(changed=False, msg='SSH key with the name of %s is not found.' % name)
key.destroy()
module.exit_json(changed=True)
def main():
module = AnsibleModule(
argument_spec = dict(
state = dict(choices=['present', 'absent'], default='present'),
client_id = dict(aliases=['CLIENT_ID'], no_log=True),
api_key = dict(aliases=['API_KEY'], no_log=True),
name = dict(type='str'),
id = dict(aliases=['droplet_id'], type='int'),
ssh_pub_key = dict(type='str'),
),
required_one_of = (
['id', 'name'],
),
)
try:
core(module)
except TimeoutError as e:
module.fail_json(msg=str(e), id=e.id)
except (DoError, Exception) as e:
module.fail_json(msg=str(e))
# import module snippets
from ansible.module_utils.basic import *
main()
| gpl-3.0 |
2014c2g4/2015cda_g7 | static/Brython3.1.1-20150328-091302/Lib/abc.py | 765 | 8057 | # Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Abstract Base Classes (ABCs) according to PEP 3119."""
from _weakrefset import WeakSet
def abstractmethod(funcobj):
"""A decorator indicating abstract methods.
Requires that the metaclass is ABCMeta or derived from it. A
class that has a metaclass derived from ABCMeta cannot be
instantiated unless all of its abstract methods are overridden.
The abstract methods can be called using any of the normal
'super' call mechanisms.
Usage:
class C(metaclass=ABCMeta):
@abstractmethod
def my_abstract_method(self, ...):
...
"""
funcobj.__isabstractmethod__ = True
return funcobj
class abstractclassmethod(classmethod):
"""
A decorator indicating abstract classmethods.
Similar to abstractmethod.
Usage:
class C(metaclass=ABCMeta):
@abstractclassmethod
def my_abstract_classmethod(cls, ...):
...
'abstractclassmethod' is deprecated. Use 'classmethod' with
'abstractmethod' instead.
"""
__isabstractmethod__ = True
def __init__(self, callable):
callable.__isabstractmethod__ = True
super().__init__(callable)
class abstractstaticmethod(staticmethod):
"""
A decorator indicating abstract staticmethods.
Similar to abstractmethod.
Usage:
class C(metaclass=ABCMeta):
@abstractstaticmethod
def my_abstract_staticmethod(...):
...
'abstractstaticmethod' is deprecated. Use 'staticmethod' with
'abstractmethod' instead.
"""
__isabstractmethod__ = True
def __init__(self, callable):
callable.__isabstractmethod__ = True
super().__init__(callable)
class abstractproperty(property):
"""
A decorator indicating abstract properties.
Requires that the metaclass is ABCMeta or derived from it. A
class that has a metaclass derived from ABCMeta cannot be
instantiated unless all of its abstract properties are overridden.
The abstract properties can be called using any of the normal
'super' call mechanisms.
Usage:
class C(metaclass=ABCMeta):
@abstractproperty
def my_abstract_property(self):
...
This defines a read-only property; you can also define a read-write
abstract property using the 'long' form of property declaration:
class C(metaclass=ABCMeta):
def getx(self): ...
def setx(self, value): ...
x = abstractproperty(getx, setx)
'abstractproperty' is deprecated. Use 'property' with 'abstractmethod'
instead.
"""
__isabstractmethod__ = True
class ABCMeta(type):
"""Metaclass for defining Abstract Base Classes (ABCs).
Use this metaclass to create an ABC. An ABC can be subclassed
directly, and then acts as a mix-in class. You can also register
unrelated concrete classes (even built-in classes) and unrelated
ABCs as 'virtual subclasses' -- these and their descendants will
be considered subclasses of the registering ABC by the built-in
issubclass() function, but the registering ABC won't show up in
their MRO (Method Resolution Order) nor will method
implementations defined by the registering ABC be callable (not
even via super()).
"""
# A global counter that is incremented each time a class is
# registered as a virtual subclass of anything. It forces the
# negative cache to be cleared before its next use.
_abc_invalidation_counter = 0
def __new__(mcls, name, bases, namespace):
cls = super().__new__(mcls, name, bases, namespace)
# Compute set of abstract method names
abstracts = {name
for name, value in namespace.items()
if getattr(value, "__isabstractmethod__", False)}
for base in bases:
for name in getattr(base, "__abstractmethods__", set()):
value = getattr(cls, name, None)
if getattr(value, "__isabstractmethod__", False):
abstracts.add(name)
cls.__abstractmethods__ = frozenset(abstracts)
# Set up inheritance registry
cls._abc_registry = WeakSet()
cls._abc_cache = WeakSet()
cls._abc_negative_cache = WeakSet()
cls._abc_negative_cache_version = ABCMeta._abc_invalidation_counter
return cls
def register(cls, subclass):
"""Register a virtual subclass of an ABC.
Returns the subclass, to allow usage as a class decorator.
"""
if not isinstance(subclass, type):
raise TypeError("Can only register classes")
if issubclass(subclass, cls):
return subclass # Already a subclass
# Subtle: test for cycles *after* testing for "already a subclass";
# this means we allow X.register(X) and interpret it as a no-op.
if issubclass(cls, subclass):
# This would create a cycle, which is bad for the algorithm below
raise RuntimeError("Refusing to create an inheritance cycle")
cls._abc_registry.add(subclass)
ABCMeta._abc_invalidation_counter += 1 # Invalidate negative cache
return subclass
def _dump_registry(cls, file=None):
"""Debug helper to print the ABC registry."""
print("Class: %s.%s" % (cls.__module__, cls.__name__), file=file)
print("Inv.counter: %s" % ABCMeta._abc_invalidation_counter, file=file)
for name in sorted(cls.__dict__.keys()):
if name.startswith("_abc_"):
value = getattr(cls, name)
print("%s: %r" % (name, value), file=file)
def __instancecheck__(cls, instance):
"""Override for isinstance(instance, cls)."""
# Inline the cache checking
subclass = instance.__class__
if subclass in cls._abc_cache:
return True
subtype = type(instance)
if subtype is subclass:
if (cls._abc_negative_cache_version ==
ABCMeta._abc_invalidation_counter and
subclass in cls._abc_negative_cache):
return False
# Fall back to the subclass check.
return cls.__subclasscheck__(subclass)
return any(cls.__subclasscheck__(c) for c in {subclass, subtype})
def __subclasscheck__(cls, subclass):
"""Override for issubclass(subclass, cls)."""
# Check cache
if subclass in cls._abc_cache:
return True
# Check negative cache; may have to invalidate
if cls._abc_negative_cache_version < ABCMeta._abc_invalidation_counter:
# Invalidate the negative cache
cls._abc_negative_cache = WeakSet()
cls._abc_negative_cache_version = ABCMeta._abc_invalidation_counter
elif subclass in cls._abc_negative_cache:
return False
# Check the subclass hook
ok = cls.__subclasshook__(subclass)
if ok is not NotImplemented:
assert isinstance(ok, bool)
if ok:
cls._abc_cache.add(subclass)
else:
cls._abc_negative_cache.add(subclass)
return ok
# Check if it's a direct subclass
if cls in getattr(subclass, '__mro__', ()):
cls._abc_cache.add(subclass)
return True
# Check if it's a subclass of a registered class (recursive)
for rcls in cls._abc_registry:
if issubclass(subclass, rcls):
cls._abc_cache.add(subclass)
return True
# Check if it's a subclass of a subclass (recursive)
for scls in cls.__subclasses__():
if issubclass(subclass, scls):
cls._abc_cache.add(subclass)
return True
# No dice; update negative cache
cls._abc_negative_cache.add(subclass)
return False
| gpl-3.0 |
jctanner/ansible | test/units/plugins/connection/test_psrp.py | 56 | 8330 | # -*- coding: utf-8 -*-
# (c) 2018, Jordan Borean <jborean@redhat.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import pytest
import sys
from io import StringIO
from units.compat.mock import MagicMock
from ansible.playbook.play_context import PlayContext
from ansible.plugins.loader import connection_loader
from ansible.utils.display import Display
@pytest.fixture(autouse=True)
def psrp_connection():
"""Imports the psrp connection plugin with a mocked pypsrp module for testing"""
# Take a snapshot of sys.modules before we manipulate it
orig_modules = sys.modules.copy()
try:
fake_pypsrp = MagicMock()
fake_pypsrp.FEATURES = [
'wsman_locale',
'wsman_read_timeout',
'wsman_reconnections',
]
fake_wsman = MagicMock()
fake_wsman.AUTH_KWARGS = {
"certificate": ["certificate_key_pem", "certificate_pem"],
"credssp": ["credssp_auth_mechanism", "credssp_disable_tlsv1_2",
"credssp_minimum_version"],
"negotiate": ["negotiate_delegate", "negotiate_hostname_override",
"negotiate_send_cbt", "negotiate_service"],
"mock": ["mock_test1", "mock_test2"],
}
sys.modules["pypsrp"] = fake_pypsrp
sys.modules["pypsrp.complex_objects"] = MagicMock()
sys.modules["pypsrp.exceptions"] = MagicMock()
sys.modules["pypsrp.host"] = MagicMock()
sys.modules["pypsrp.powershell"] = MagicMock()
sys.modules["pypsrp.shell"] = MagicMock()
sys.modules["pypsrp.wsman"] = fake_wsman
sys.modules["requests.exceptions"] = MagicMock()
from ansible.plugins.connection import psrp
# Take a copy of the original import state vars before we set to an ok import
orig_has_psrp = psrp.HAS_PYPSRP
orig_psrp_imp_err = psrp.PYPSRP_IMP_ERR
yield psrp
psrp.HAS_PYPSRP = orig_has_psrp
psrp.PYPSRP_IMP_ERR = orig_psrp_imp_err
finally:
# Restore sys.modules back to our pre-shenanigans
sys.modules = orig_modules
class TestConnectionPSRP(object):
OPTIONS_DATA = (
# default options
(
{'_extras': {}},
{
'_psrp_auth': 'negotiate',
'_psrp_cert_validation': True,
'_psrp_configuration_name': 'Microsoft.PowerShell',
'_psrp_connection_timeout': 30,
'_psrp_message_encryption': 'auto',
'_psrp_host': 'inventory_hostname',
'_psrp_conn_kwargs': {
'server': 'inventory_hostname',
'port': 5986,
'username': None,
'password': None,
'ssl': True,
'path': 'wsman',
'auth': 'negotiate',
'cert_validation': True,
'connection_timeout': 30,
'encryption': 'auto',
'proxy': None,
'no_proxy': False,
'max_envelope_size': 153600,
'operation_timeout': 20,
'certificate_key_pem': None,
'certificate_pem': None,
'credssp_auth_mechanism': 'auto',
'credssp_disable_tlsv1_2': False,
'credssp_minimum_version': 2,
'negotiate_delegate': None,
'negotiate_hostname_override': None,
'negotiate_send_cbt': True,
'negotiate_service': 'WSMAN',
'read_timeout': 30,
'reconnection_backoff': 2.0,
'reconnection_retries': 0,
},
'_psrp_max_envelope_size': 153600,
'_psrp_ignore_proxy': False,
'_psrp_operation_timeout': 20,
'_psrp_pass': None,
'_psrp_path': 'wsman',
'_psrp_port': 5986,
'_psrp_proxy': None,
'_psrp_protocol': 'https',
'_psrp_user': None
},
),
# ssl=False when port defined to 5985
(
{'_extras': {}, 'ansible_port': '5985'},
{
'_psrp_port': 5985,
'_psrp_protocol': 'http'
},
),
# ssl=True when port defined to not 5985
(
{'_extras': {}, 'ansible_port': 1234},
{
'_psrp_port': 1234,
'_psrp_protocol': 'https'
},
),
# port 5986 when ssl=True
(
{'_extras': {}, 'ansible_psrp_protocol': 'https'},
{
'_psrp_port': 5986,
'_psrp_protocol': 'https'
},
),
# port 5985 when ssl=False
(
{'_extras': {}, 'ansible_psrp_protocol': 'http'},
{
'_psrp_port': 5985,
'_psrp_protocol': 'http'
},
),
# psrp extras
(
{'_extras': {'ansible_psrp_mock_test1': True}},
{
'_psrp_conn_kwargs': {
'server': 'inventory_hostname',
'port': 5986,
'username': None,
'password': None,
'ssl': True,
'path': 'wsman',
'auth': 'negotiate',
'cert_validation': True,
'connection_timeout': 30,
'encryption': 'auto',
'proxy': None,
'no_proxy': False,
'max_envelope_size': 153600,
'operation_timeout': 20,
'certificate_key_pem': None,
'certificate_pem': None,
'credssp_auth_mechanism': 'auto',
'credssp_disable_tlsv1_2': False,
'credssp_minimum_version': 2,
'negotiate_delegate': None,
'negotiate_hostname_override': None,
'negotiate_send_cbt': True,
'negotiate_service': 'WSMAN',
'read_timeout': 30,
'reconnection_backoff': 2.0,
'reconnection_retries': 0,
'mock_test1': True
},
},
),
# cert validation through string repr of bool
(
{'_extras': {}, 'ansible_psrp_cert_validation': 'ignore'},
{
'_psrp_cert_validation': False
},
),
# cert validation path
(
{'_extras': {}, 'ansible_psrp_cert_trust_path': '/path/cert.pem'},
{
'_psrp_cert_validation': '/path/cert.pem'
},
),
)
# pylint bug: https://github.com/PyCQA/pylint/issues/511
# pylint: disable=undefined-variable
@pytest.mark.parametrize('options, expected',
((o, e) for o, e in OPTIONS_DATA))
def test_set_options(self, options, expected):
pc = PlayContext()
new_stdin = StringIO()
conn = connection_loader.get('psrp', pc, new_stdin)
conn.set_options(var_options=options)
conn._build_kwargs()
for attr, expected in expected.items():
actual = getattr(conn, attr)
assert actual == expected, \
"psrp attr '%s', actual '%s' != expected '%s'"\
% (attr, actual, expected)
def test_set_invalid_extras_options(self, monkeypatch):
pc = PlayContext()
new_stdin = StringIO()
conn = connection_loader.get('psrp', pc, new_stdin)
conn.set_options(var_options={'_extras': {'ansible_psrp_mock_test3': True}})
mock_display = MagicMock()
monkeypatch.setattr(Display, "warning", mock_display)
conn._build_kwargs()
assert mock_display.call_args[0][0] == \
'ansible_psrp_mock_test3 is unsupported by the current psrp version installed'
| gpl-3.0 |
ticosax/django | django/contrib/gis/geos/prototypes/misc.py | 103 | 1381 | """
This module is for the miscellaneous GEOS routines, particularly the
ones that return the area, distance, and length.
"""
from ctypes import POINTER, c_double, c_int
from django.contrib.gis.geos.libgeos import GEOM_PTR
from django.contrib.gis.geos.prototypes.errcheck import check_dbl, check_string
from django.contrib.gis.geos.prototypes.geom import geos_char_p
from django.contrib.gis.geos.prototypes.threadsafe import GEOSFunc
from django.utils.six.moves import range
__all__ = ['geos_area', 'geos_distance', 'geos_length', 'geos_isvalidreason']
# ### ctypes generator function ###
def dbl_from_geom(func, num_geom=1):
"""
Argument is a Geometry, return type is double that is passed
in by reference as the last argument.
"""
argtypes = [GEOM_PTR for i in range(num_geom)]
argtypes += [POINTER(c_double)]
func.argtypes = argtypes
func.restype = c_int # Status code returned
func.errcheck = check_dbl
return func
# ### ctypes prototypes ###
# Area, distance, and length prototypes.
geos_area = dbl_from_geom(GEOSFunc('GEOSArea'))
geos_distance = dbl_from_geom(GEOSFunc('GEOSDistance'), num_geom=2)
geos_length = dbl_from_geom(GEOSFunc('GEOSLength'))
geos_isvalidreason = GEOSFunc('GEOSisValidReason')
geos_isvalidreason.argtypes = [GEOM_PTR]
geos_isvalidreason.restype = geos_char_p
geos_isvalidreason.errcheck = check_string
| bsd-3-clause |
leapp-to/prototype | leapp/snactor/commands/workflow/run.py | 1 | 2817 | from __future__ import print_function
import datetime
import os
import sys
from leapp.exceptions import CommandError, LeappError
from leapp.logger import configure_logger
from leapp.repository.scan import find_and_scan_repositories
from leapp.snactor.commands.workflow import workflow
from leapp.snactor.context import with_snactor_context
from leapp.utils.clicmd import command_arg, command_opt
from leapp.utils.output import beautify_actor_exception, report_deprecations, report_errors
from leapp.utils.repository import find_repository_basedir, requires_repository
_LONG_DESCRIPTION = '''
Executes the given workflow.
Using --until-phase the workflow will be only executed until including
the given phase.
Using --until-actor the workflow will be only executed until including
the first occurrence of the given actor name.
For more information please consider reading the documentation at:
https://red.ht/leapp-docs
'''
@workflow.command('run', help='Execute a workflow with the given name', description=_LONG_DESCRIPTION)
@command_arg('name')
@command_opt('until-phase', help='Runs until including the given phase but then exits')
@command_opt('until-actor', help='Runs until including the given actor but then exits')
@command_opt('save-output', is_flag=True,
help='Saves the output for actors to be consumable when executed with snactor run')
@command_opt('--whitelist-experimental', action='append', metavar='ActorName',
help='Enables experimental actors')
@requires_repository
def cli(params):
def impl(context=None):
start = datetime.datetime.utcnow()
configure_logger()
repository = find_and_scan_repositories(find_repository_basedir('.'), include_locals=True)
try:
repository.load()
except LeappError as exc:
sys.stderr.write(exc.message)
sys.stderr.write('\n')
sys.exit(1)
wf = repository.lookup_workflow(params.name)
if not wf:
raise CommandError('Could not find any workflow named "{}"'.format(params.name))
instance = wf()
for actor_name in params.whitelist_experimental or ():
actor = repository.lookup_actor(actor_name)
if actor:
instance.whitelist_experimental_actor(actor)
with beautify_actor_exception():
instance.run(context=context, until_phase=params.until_phase, until_actor=params.until_actor)
report_errors(instance.errors)
report_deprecations(os.getenv('LEAPP_EXECUTION_ID'), start=start)
if instance.failure:
sys.exit(1)
@with_snactor_context
def snactor_context_impl():
impl(context=os.getenv('LEAPP_EXECUTION_ID'))
if params.save_output:
snactor_context_impl()
else:
impl()
| lgpl-2.1 |
koobonil/Boss2D | Boss2D/addon/tensorflow-1.2.1_for_boss/tensorflow/python/estimator/inputs/queues/feeding_functions.py | 46 | 15782 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helper functions for enqueuing data from arrays and pandas `DataFrame`s."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import random
import types as tp
import numpy as np
import six
from tensorflow.python.estimator.inputs.queues import feeding_queue_runner as fqr
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary
from tensorflow.python.training import queue_runner
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
def _get_integer_indices_for_next_batch(
batch_indices_start, batch_size, epoch_end, array_length,
current_epoch, total_epochs):
"""Returns the integer indices for next batch.
If total epochs is not None and current epoch is the final epoch, the end
index of the next batch should not exceed the `epoch_end` (i.e., the final
batch might not have size `batch_size` to avoid overshooting the last epoch).
Args:
batch_indices_start: Integer, the index to start next batch.
batch_size: Integer, size of batches to return.
epoch_end: Integer, the end index of the epoch. The epoch could start from a
random position, so `epoch_end` provides the end index for that.
array_length: Integer, the length of the array.
current_epoch: Integer, the epoch number has been emitted.
total_epochs: Integer or `None`, the total number of epochs to emit. If
`None` will run forever.
Returns:
A tuple of a list with integer indices for next batch and `current_epoch`
value after the next batch.
Raises:
OutOfRangeError if `current_epoch` is not less than `total_epochs`.
"""
if total_epochs is not None and current_epoch >= total_epochs:
raise errors.OutOfRangeError(None, None,
"Already emitted %s epochs." % current_epoch)
batch_indices_end = batch_indices_start + batch_size
batch_indices = [j % array_length for j in
range(batch_indices_start, batch_indices_end)]
epoch_end_indices = [i for i, x in enumerate(batch_indices) if x == epoch_end]
current_epoch += len(epoch_end_indices)
if total_epochs is None or current_epoch < total_epochs:
return (batch_indices, current_epoch)
# Now we might have emitted more data for expected epochs. Need to trim.
final_epoch_end_inclusive = epoch_end_indices[
-(current_epoch - total_epochs + 1)]
batch_indices = batch_indices[:final_epoch_end_inclusive + 1]
return (batch_indices, total_epochs)
class _ArrayFeedFn(object):
"""Creates feed dictionaries from numpy arrays."""
def __init__(self,
placeholders,
array,
batch_size,
random_start=False,
seed=None,
num_epochs=None):
if len(placeholders) != 2:
raise ValueError("_array_feed_fn expects 2 placeholders; got {}.".format(
len(placeholders)))
self._placeholders = placeholders
self._array = array
self._max = len(array)
self._batch_size = batch_size
self._num_epochs = num_epochs
self._epoch = 0
random.seed(seed)
self._trav = random.randrange(self._max) if random_start else 0
self._epoch_end = (self._trav - 1) % self._max
def __call__(self):
integer_indexes, self._epoch = _get_integer_indices_for_next_batch(
batch_indices_start=self._trav,
batch_size=self._batch_size,
epoch_end=self._epoch_end,
array_length=self._max,
current_epoch=self._epoch,
total_epochs=self._num_epochs)
self._trav = (integer_indexes[-1] + 1) % self._max
return {
self._placeholders[0]: integer_indexes,
self._placeholders[1]: self._array[integer_indexes]
}
class _OrderedDictNumpyFeedFn(object):
"""Creates feed dictionaries from `OrderedDict`s of numpy arrays."""
def __init__(self,
placeholders,
ordered_dict_of_arrays,
batch_size,
random_start=False,
seed=None,
num_epochs=None):
if len(placeholders) != len(ordered_dict_of_arrays) + 1:
raise ValueError("Expected {} placeholders; got {}.".format(
len(ordered_dict_of_arrays), len(placeholders)))
self._index_placeholder = placeholders[0]
self._col_placeholders = placeholders[1:]
self._ordered_dict_of_arrays = ordered_dict_of_arrays
self._max = len(next(iter(ordered_dict_of_arrays.values())))
for _, v in ordered_dict_of_arrays.items():
if len(v) != self._max:
raise ValueError("Array lengths must match.")
self._batch_size = batch_size
self._num_epochs = num_epochs
self._epoch = 0
random.seed(seed)
self._trav = random.randrange(self._max) if random_start else 0
self._epoch_end = (self._trav - 1) % self._max
def __call__(self):
integer_indexes, self._epoch = _get_integer_indices_for_next_batch(
batch_indices_start=self._trav,
batch_size=self._batch_size,
epoch_end=self._epoch_end,
array_length=self._max,
current_epoch=self._epoch,
total_epochs=self._num_epochs)
self._trav = (integer_indexes[-1] + 1) % self._max
feed_dict = {self._index_placeholder: integer_indexes}
cols = [
column[integer_indexes]
for column in self._ordered_dict_of_arrays.values()
]
feed_dict.update(dict(zip(self._col_placeholders, cols)))
return feed_dict
class _PandasFeedFn(object):
"""Creates feed dictionaries from pandas `DataFrames`."""
def __init__(self,
placeholders,
dataframe,
batch_size,
random_start=False,
seed=None,
num_epochs=None):
if len(placeholders) != len(dataframe.columns) + 1:
raise ValueError("Expected {} placeholders; got {}.".format(
len(dataframe.columns), len(placeholders)))
self._index_placeholder = placeholders[0]
self._col_placeholders = placeholders[1:]
self._dataframe = dataframe
self._max = len(dataframe)
self._batch_size = batch_size
self._num_epochs = num_epochs
self._epoch = 0
random.seed(seed)
self._trav = random.randrange(self._max) if random_start else 0
self._epoch_end = (self._trav - 1) % self._max
def __call__(self):
integer_indexes, self._epoch = _get_integer_indices_for_next_batch(
batch_indices_start=self._trav,
batch_size=self._batch_size,
epoch_end=self._epoch_end,
array_length=self._max,
current_epoch=self._epoch,
total_epochs=self._num_epochs)
self._trav = (integer_indexes[-1] + 1) % self._max
result = self._dataframe.iloc[integer_indexes]
cols = [result[col].values for col in result.columns]
feed_dict = dict(zip(self._col_placeholders, cols))
feed_dict[self._index_placeholder] = result.index.values
return feed_dict
class _GeneratorFeedFn(object):
"""Creates feed dictionaries from `Generator` of `dicts` of numpy arrays."""
def __init__(self,
placeholders,
generator,
batch_size,
random_start=False,
seed=None,
num_epochs=None):
first_sample = next(generator())
if len(placeholders) != len(first_sample):
raise ValueError("Expected {} placeholders; got {}.".format(
len(first_sample), len(placeholders)))
self._keys = sorted(list(first_sample.keys()))
self._col_placeholders = placeholders
self._generator_function = generator
self._iterator = generator()
self._batch_size = batch_size
self._num_epochs = num_epochs
self._epoch = 0
random.seed(seed)
def __call__(self):
if self._num_epochs and self._epoch >= self._num_epochs:
raise errors.OutOfRangeError(None, None,
"Already emitted %s epochs." % self._epoch)
list_dict = {}
list_dict_size = 0
while list_dict_size < self._batch_size:
try:
data_row = next(self._iterator)
except StopIteration:
self._epoch += 1
self._iterator = self._generator_function()
data_row = next(self._iterator)
for index, key in enumerate(self._keys):
if key not in data_row.keys():
raise KeyError("key mismatch between dicts emitted by GenFun"
"Expected {} keys; got {}".format(
self._keys, data_row.keys()))
list_dict.setdefault(self._col_placeholders[index],
list()).append(data_row[key])
list_dict_size += 1
feed_dict = {key: np.asarray(item) for key, item in list(list_dict.items())}
return feed_dict
def _enqueue_data(data,
capacity,
shuffle=False,
min_after_dequeue=None,
num_threads=1,
seed=None,
name="enqueue_input",
enqueue_size=1,
num_epochs=None):
"""Creates a queue filled from a numpy array or pandas `DataFrame`.
Returns a queue filled with the rows of the given (`OrderedDict` of) array
or `DataFrame`. In the case of a pandas `DataFrame`, the first enqueued
`Tensor` corresponds to the index of the `DataFrame`. For (`OrderedDict` of)
numpy arrays, the first enqueued `Tensor` contains the row number.
Args:
data: a numpy `ndarray`, `OrderedDict` of numpy arrays, or a generator
yielding `dict`s of numpy arrays or pandas `DataFrame` that will be read
into the queue.
capacity: the capacity of the queue.
shuffle: whether or not to shuffle the rows of the array.
min_after_dequeue: minimum number of elements that can remain in the queue
after a dequeue operation. Only used when `shuffle` is true. If not set,
defaults to `capacity` / 4.
num_threads: number of threads used for reading and enqueueing.
seed: used to seed shuffling and reader starting points.
name: a scope name identifying the data.
enqueue_size: the number of rows to enqueue per step.
num_epochs: limit enqueuing to a specified number of epochs, if provided.
Returns:
A queue filled with the rows of the given (`OrderedDict` of) array or
`DataFrame`.
Raises:
TypeError: `data` is not a Pandas `DataFrame`, an `OrderedDict` of numpy
arrays, a numpy `ndarray`, or a generator producing these.
"""
with ops.name_scope(name):
if isinstance(data, np.ndarray):
types = [dtypes.int64, dtypes.as_dtype(data.dtype)]
queue_shapes = [(), data.shape[1:]]
get_feed_fn = _ArrayFeedFn
elif isinstance(data, collections.OrderedDict):
types = [dtypes.int64] + [
dtypes.as_dtype(col.dtype) for col in data.values()
]
queue_shapes = [()] + [col.shape[1:] for col in data.values()]
get_feed_fn = _OrderedDictNumpyFeedFn
elif isinstance(data, tp.FunctionType):
x_first_el = six.next(data())
x_first_keys = sorted(x_first_el.keys())
x_first_values = [x_first_el[key] for key in x_first_keys]
types = [dtypes.as_dtype(col.dtype) for col in x_first_values]
queue_shapes = [col.shape for col in x_first_values]
get_feed_fn = _GeneratorFeedFn
elif HAS_PANDAS and isinstance(data, pd.DataFrame):
types = [
dtypes.as_dtype(dt) for dt in [data.index.dtype] + list(data.dtypes)
]
queue_shapes = [() for _ in types]
get_feed_fn = _PandasFeedFn
else:
raise TypeError(
"data must be either a numpy array or pandas DataFrame if pandas is "
"installed; got {}".format(type(data).__name__))
# TODO(jamieas): TensorBoard warnings for all warnings below once available.
if num_threads > 1 and num_epochs is not None:
logging.warning(
"enqueue_data was called with num_epochs and num_threads > 1. "
"num_epochs is applied per thread, so this will produce more "
"epochs than you probably intend. "
"If you want to limit epochs, use one thread.")
if shuffle and num_threads > 1 and num_epochs is not None:
logging.warning(
"enqueue_data was called with shuffle=True, num_threads > 1, and "
"num_epochs. This will create multiple threads, all reading the "
"array/dataframe in order adding to the same shuffling queue; the "
"results will likely not be sufficiently shuffled.")
if not shuffle and num_threads > 1:
logging.warning(
"enqueue_data was called with shuffle=False and num_threads > 1. "
"This will create multiple threads, all reading the "
"array/dataframe in order. If you want examples read in order, use"
" one thread; if you want multiple threads, enable shuffling.")
if shuffle:
min_after_dequeue = int(capacity / 4 if min_after_dequeue is None else
min_after_dequeue)
queue = data_flow_ops.RandomShuffleQueue(
capacity,
min_after_dequeue,
dtypes=types,
shapes=queue_shapes,
seed=seed)
else:
min_after_dequeue = 0 # just for the summary text
queue = data_flow_ops.FIFOQueue(
capacity, dtypes=types, shapes=queue_shapes)
enqueue_ops = []
feed_fns = []
for i in range(num_threads):
# Note the placeholders have no shapes, so they will accept any
# enqueue_size. enqueue_many below will break them up.
placeholders = [array_ops.placeholder(t) for t in types]
enqueue_ops.append(queue.enqueue_many(placeholders))
seed_i = None if seed is None else (i + 1) * seed
feed_fns.append(
get_feed_fn(
placeholders,
data,
enqueue_size,
random_start=shuffle,
seed=seed_i,
num_epochs=num_epochs))
runner = fqr._FeedingQueueRunner( # pylint: disable=protected-access
queue=queue, enqueue_ops=enqueue_ops, feed_fns=feed_fns)
queue_runner.add_queue_runner(runner)
full = (math_ops.cast(
math_ops.maximum(0, queue.size() - min_after_dequeue),
dtypes.float32) * (1. / (capacity - min_after_dequeue)))
# Note that name contains a '/' at the end so we intentionally do not place
# a '/' after %s below.
summary_name = ("queue/%sfraction_over_%d_of_%d_full" %
(queue.name, min_after_dequeue,
capacity - min_after_dequeue))
summary.scalar(summary_name, full)
return queue
| mit |
JulienMcJay/eclock | windows/Python27/Lib/site-packages/pywin32-218-py2.7-win32.egg/pywin/Demos/toolbar.py | 17 | 2742 | # Demo of ToolBars
# Shows the toolbar control.
# Demos how to make custom tooltips, etc.
import win32ui
import win32con
import win32api
from pywin.mfc import docview, window, afxres
import commctrl
class GenericFrame(window.MDIChildWnd):
def OnCreateClient(self, cp, context):
# handlers for toolbar buttons
self.HookCommand (self.OnPrevious, 401)
self.HookCommand (self.OnNext, 402)
# Its not necessary for us to hook both of these - the
# common controls should fall-back all by themselves.
# Indeed, given we hook TTN_NEEDTEXTW, commctrl.TTN_NEEDTEXTA
# will not be called.
self.HookNotify(self.GetTTText, commctrl.TTN_NEEDTEXT)
self.HookNotify(self.GetTTText, commctrl.TTN_NEEDTEXTW)
# parent = win32ui.GetMainFrame()
parent = self
style = win32con.WS_CHILD | win32con.WS_VISIBLE | \
afxres.CBRS_SIZE_DYNAMIC | afxres.CBRS_TOP | afxres.CBRS_TOOLTIPS | afxres.CBRS_FLYBY
buttons = (win32ui.ID_APP_ABOUT,win32ui.ID_VIEW_INTERACTIVE)
bitmap = win32ui.IDB_BROWSER_HIER
tbid = 0xE840
self.toolbar = tb = win32ui.CreateToolBar (parent, style, tbid)
tb.LoadBitmap(bitmap)
tb.SetButtons(buttons)
tb.EnableDocking(afxres.CBRS_ALIGN_ANY)
tb.SetWindowText("Test")
parent.EnableDocking(afxres.CBRS_ALIGN_ANY)
parent.DockControlBar(tb)
parent.LoadBarState("ToolbarTest")
window.MDIChildWnd.OnCreateClient(self, cp, context)
return 1
def OnDestroy(self, msg):
self.SaveBarState("ToolbarTest")
def GetTTText(self, std, extra):
(hwndFrom, idFrom, code) = std
text, hinst, flags = extra
if flags & commctrl.TTF_IDISHWND:
return # Not handled
if (idFrom==win32ui.ID_APP_ABOUT):
# our 'extra' return value needs to be the following
# entries from a NMTTDISPINFO[W] struct:
# (szText, hinst, uFlags). None means 'don't change
# the value'
return 0, ("It works!", None, None)
return None # not handled.
def GetMessageString(self, id):
if id==win32ui.ID_APP_ABOUT:
return "Dialog Test\nTest"
else:
return self._obj_.GetMessageString(id)
def OnSize (self, params):
print 'OnSize called with ', params
def OnNext (self, id, cmd):
print 'OnNext called'
def OnPrevious (self, id, cmd):
print 'OnPrevious called'
msg = """\
This toolbar was dynamically created.\r
\r
The first item's tooltips is provided by Python code.\r
\r
(Dont close the window with the toolbar in a floating state - it may not re-appear!)\r
"""
def test():
template = docview.DocTemplate( win32ui.IDR_PYTHONTYPE, None, GenericFrame, docview.EditView)
doc = template.OpenDocumentFile(None)
doc.SetTitle("Toolbar Test")
view = doc.GetFirstView()
view.SetWindowText(msg)
if __name__=='__main__':
import demoutils
if demoutils.NeedGoodGUI():
test()
| gpl-2.0 |
jwlawson/tensorflow | tensorflow/python/ops/quantized_conv_ops_test.py | 9 | 7495 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for quantized convolutional operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import test
class Conv2DTest(test.TestCase):
def __init__(self, method_name="runTest"):
super(Conv2DTest, self).__init__(method_name)
def _VerifyValues(self, tensor_in_sizes, filter_in_sizes, stride, padding,
expected):
"""Verifies the output values of the convolution function.
Args:
tensor_in_sizes: Input tensor dimensions in
[batch, input_rows, input_cols, input_depth].
filter_in_sizes: Filter tensor dimensions in
[kernel_rows, kernel_cols, input_depth, output_depth].
stride: Stride.
padding: Padding type.
expected: An array containing the expected operation outputs.
"""
total_size_1 = 1
total_size_2 = 1
for s in tensor_in_sizes:
total_size_1 *= s
for s in filter_in_sizes:
total_size_2 *= s
# Initializes the input tensor with array containing incrementing
# numbers from 1.
x1 = np.array([f for f in range(1, total_size_1 + 1)])
x1 = x1.astype(np.uint8).reshape(tensor_in_sizes)
x1_min = 0.0
x1_max = 255.0
x2 = np.array([f for f in range(1, total_size_2 + 1)]).astype(np.uint8)
x2 = x2.astype(np.uint8).reshape(filter_in_sizes)
x2_min = 0.0
x2_max = 255.0
with self.test_session(use_gpu=False) as sess:
t1 = constant_op.constant(x1, shape=tensor_in_sizes, dtype=dtypes.quint8)
t2 = constant_op.constant(x2, shape=filter_in_sizes, dtype=dtypes.quint8)
conv = nn_ops.quantized_conv2d(
t1,
t2,
out_type=dtypes.qint32,
strides=[1, stride, stride, 1],
padding=padding,
min_input=x1_min,
max_input=x1_max,
min_filter=x2_min,
max_filter=x2_max)
value = sess.run(conv)
quantized_output = value[0]
output_min = value[1]
output_max = value[2]
float_output = self._QuantizedOutputToFloat(quantized_output, output_min,
output_max)
self.assertArrayNear(expected, float_output.flatten(), 1.0)
self.assertEqual(value[0].shape, conv[0].get_shape())
def _assertQuantizedArrayEquals(self, iarray1, iarray2):
for i1, i2 in zip(iarray1, iarray2):
self.assertTrue(i1 == i2)
def _QuantizedOutputToFloat(self, quantized, quantized_min, quantized_max):
number_of_bits = 32
number_of_steps = 1 << number_of_bits
range_adjust = (number_of_steps / (number_of_steps - 1.0))
quantized_range = ((quantized_max - quantized_min) * range_adjust)
range_scale = (quantized_range / number_of_steps)
lowest_quantized = -(1 << (number_of_bits - 1))
result = np.array([(quantized_min + ((float(x) - lowest_quantized) * range_scale))
for x in quantized.flatten()])
return result
def testConv2D1x1Filter(self):
# Our generated input is [batch, rows, cols, depth], and looks like this:
# (1,2,3) (4,5,6) (7,8,9)
# (10,11,12) (13,14,15) (16,17,18)
# The filter data is:
# (1,4,7) (2,5,8) (3,6,9)
# That means the calculations are:
# 1*1+2*4+3*7=30
# 1*2+2*5+3*8=36
# 1*3+2*6+3*9=42
# 4*1+5*4+6*7=66
# 4*2+5*5+6*8=81
# 4*3+5*6+6*9=96
# 7*1+5*8+6*9=102
# 7*2+8*5+9*8=126
# 7*3+8*6+9*9=150
# 10*1+11*4+12*7=138
# 10*2+11*5+12*8=171
# 10*3+11*6+12*9=204
# 13*1+14*4+15*7=174
# 13*2+14*5+15*8=216
# 13*3+14*6+15*9=258, clamped to 255
# 16*1+17*4+18*7=210
# 16*2+17*5+18*8=261, clamped to 255
# 16*3+17*6+18*9=312, clamped to 255
# Because the output shift is zero, we call the non-optimized reference
# path for the convolution.
expected_output = [
30, 36, 42, 66, 81, 96, 102, 126, 150, 138, 171, 204, 174, 216, 258,
210, 261, 312
]
self._VerifyValues(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[1, 1, 3, 3],
stride=1,
padding="VALID",
expected=expected_output)
def testConv2D2x2Filter(self):
# Our generated input is [batch, rows, cols, depth], and looks like this:
# (1,2,3) (4,5,6) (7,8,9)
# (10,11,12) (13,14,15) (16,17,18)
# The filter data is [filter_height, filter_width, depth, filter_count]:
# ( 1, 4, 7) (10, 13, 16)
# (19,22,25) (28, 31, 34)
# -
# ( 2, 5, 8) (11, 14, 17)
# (20,23,26) (29, 32, 35)
# -
# ( 3, 6, 9) (12, 15, 18)
# (21,24,27) (30, 33, 36)
# The raw accumulated totals are:
# 1*1+2*4+3*7+4*10+5*13+6*16+10*19+11*22+12*25+13*28+14*31+15*34=2271
# 1*2+2*5+3*8+4*11+5*14+6*17+10*20+11*23+12*26+13*29+14*32+15*35=2367
# 1*3+2*6+3*9+4*12+5*15+6*18+10*21+11*24+12*27+13*30+14*33+15*36=2463
# 4*1+5*4+6*7+7*10+8*13+9*16+13*19+14*22+15*25+16*28+17*31+18*34=2901
# 4*2+5*5+6*8+7*11+8*14+9*17+13*20+14*23+15*26+16*29+17*32+18*35=3033
# 4*3+5*6+6*9+7*12+8*15+9*18+13*21+14*24+15*27+16*30+17*33+18*36=3165
# The expected values are taken from the raw totals and rescaled to fit into
# eight bits.
expected_output = [2271.0, 2367.0, 2463.0, 2901.0, 3033.0, 3165.0]
self._VerifyValues(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[2, 2, 3, 3],
stride=1,
padding="VALID",
expected=expected_output)
def testConv2D1x2Filter(self):
# The outputs are computed using third_party/py/IPython/notebook.
# With a shift of 21, we should execute the optimized path here.
expected_output = [
231.0, 252.0, 273.0, 384.0, 423.0, 462.0, 690.0, 765.0, 840.0, 843.0,
936.0, 1029.0
]
self._VerifyValues(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[1, 2, 3, 3],
stride=1,
padding="VALID",
expected=expected_output)
def testConv2D2x2FilterStride2(self):
# With a shift of 21, we should execute the optimized path here.
expected_output = [2271.0, 2367.0, 2463.0]
self._VerifyValues(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[2, 2, 3, 3],
stride=2,
padding="VALID",
expected=expected_output)
def testConv2D2x2FilterStride2Same(self):
# With a shift of 21, we should execute the optimized path here.
expected_output = [2271.0, 2367.0, 2463.0, 1230.0, 1305.0, 1380.0]
self._VerifyValues(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[2, 2, 3, 3],
stride=2,
padding="SAME",
expected=expected_output)
if __name__ == "__main__":
test.main()
| apache-2.0 |
taotie12010/bigfour | openedx/core/djangoapps/content/course_structures/api/v0/serializers.py | 65 | 1313 | """
API Serializers
"""
from rest_framework import serializers
class GradingPolicySerializer(serializers.Serializer):
""" Serializer for course grading policy. """
assignment_type = serializers.CharField(source='type')
count = serializers.IntegerField(source='min_count')
dropped = serializers.IntegerField(source='drop_count')
weight = serializers.FloatField()
# pylint: disable=invalid-name
class BlockSerializer(serializers.Serializer):
""" Serializer for course structure block. """
id = serializers.CharField(source='usage_key')
type = serializers.CharField(source='block_type')
parent = serializers.CharField(source='parent')
display_name = serializers.CharField()
graded = serializers.BooleanField(default=False)
format = serializers.CharField()
children = serializers.CharField()
class CourseStructureSerializer(serializers.Serializer):
""" Serializer for course structure. """
root = serializers.CharField(source='root')
blocks = serializers.SerializerMethodField('get_blocks')
def get_blocks(self, structure):
""" Serialize the individual blocks. """
serialized = {}
for key, block in structure['blocks'].iteritems():
serialized[key] = BlockSerializer(block).data
return serialized
| agpl-3.0 |
vegitron/ansible | test/integration/gce_credentials.py | 275 | 1809 | import collections
import os
import yaml
try:
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
_ = Provider.GCE
except ImportError:
print("failed=True " + \
"msg='libcloud with GCE support (0.13.3+) required for this module'")
sys.exit(1)
def add_credentials_options(parser):
default_service_account_email=None
default_pem_file=None
default_project_id=None
# Load details from credentials.yml
if os.path.isfile('credentials.yml'):
credentials = yaml.load(open('credentials.yml', 'r'))
default_service_account_email = credentials['gce_service_account_email']
default_pem_file = credentials['gce_pem_file']
default_project_id = credentials['gce_project_id']
parser.add_option("--service_account_email",
action="store", dest="service_account_email",
default=default_service_account_email,
help="GCE service account email. Default is loaded from credentials.yml.")
parser.add_option("--pem_file",
action="store", dest="pem_file",
default=default_pem_file,
help="GCE client key. Default is loaded from credentials.yml.")
parser.add_option("--project_id",
action="store", dest="project_id",
default=default_project_id,
help="Google Cloud project ID. Default is loaded from credentials.yml.")
def check_required(opts, parser):
for required in ['service_account_email', 'pem_file', 'project_id']:
if getattr(opts, required) is None:
parser.error("Missing required parameter: --%s" % required)
def get_gce_driver(opts):
# Connect to GCE
gce_cls = get_driver(Provider.GCE)
return gce_cls(
opts.service_account_email, opts.pem_file, project=opts.project_id)
| gpl-3.0 |
yufengg/tensorflow | tensorflow/contrib/quantization/python/array_ops.py | 178 | 1156 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Quantized Array Operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from tensorflow.python.ops import gen_array_ops as quantized_gen_array_ops
from tensorflow.python.ops.gen_array_ops import dequantize
from tensorflow.python.ops.gen_array_ops import quantize_v2
from tensorflow.python.ops.gen_array_ops import quantized_concat
# pylint: enable=unused-import
| apache-2.0 |
kustodian/ansible | test/units/mock/loader.py | 127 | 3861 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible.errors import AnsibleParserError
from ansible.parsing.dataloader import DataLoader
from ansible.module_utils._text import to_bytes, to_text
class DictDataLoader(DataLoader):
def __init__(self, file_mapping=None):
file_mapping = {} if file_mapping is None else file_mapping
assert type(file_mapping) == dict
super(DictDataLoader, self).__init__()
self._file_mapping = file_mapping
self._build_known_directories()
self._vault_secrets = None
def load_from_file(self, path, cache=True, unsafe=False):
path = to_text(path)
if path in self._file_mapping:
return self.load(self._file_mapping[path], path)
return None
# TODO: the real _get_file_contents returns a bytestring, so we actually convert the
# unicode/text it's created with to utf-8
def _get_file_contents(self, path):
path = to_text(path)
if path in self._file_mapping:
return (to_bytes(self._file_mapping[path]), False)
else:
raise AnsibleParserError("file not found: %s" % path)
def path_exists(self, path):
path = to_text(path)
return path in self._file_mapping or path in self._known_directories
def is_file(self, path):
path = to_text(path)
return path in self._file_mapping
def is_directory(self, path):
path = to_text(path)
return path in self._known_directories
def list_directory(self, path):
ret = []
path = to_text(path)
for x in (list(self._file_mapping.keys()) + self._known_directories):
if x.startswith(path):
if os.path.dirname(x) == path:
ret.append(os.path.basename(x))
return ret
def is_executable(self, path):
# FIXME: figure out a way to make paths return true for this
return False
def _add_known_directory(self, directory):
if directory not in self._known_directories:
self._known_directories.append(directory)
def _build_known_directories(self):
self._known_directories = []
for path in self._file_mapping:
dirname = os.path.dirname(path)
while dirname not in ('/', ''):
self._add_known_directory(dirname)
dirname = os.path.dirname(dirname)
def push(self, path, content):
rebuild_dirs = False
if path not in self._file_mapping:
rebuild_dirs = True
self._file_mapping[path] = content
if rebuild_dirs:
self._build_known_directories()
def pop(self, path):
if path in self._file_mapping:
del self._file_mapping[path]
self._build_known_directories()
def clear(self):
self._file_mapping = dict()
self._known_directories = []
def get_basedir(self):
return os.getcwd()
def set_vault_secrets(self, vault_secrets):
self._vault_secrets = vault_secrets
| gpl-3.0 |
apbard/scipy | scipy/signal/spectral.py | 1 | 66503 | """Tools for spectral analysis.
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from scipy import fftpack
from . import signaltools
from .windows import get_window
from ._spectral import _lombscargle
from ._arraytools import const_ext, even_ext, odd_ext, zero_ext
import warnings
from scipy._lib.six import string_types
__all__ = ['periodogram', 'welch', 'lombscargle', 'csd', 'coherence',
'spectrogram', 'stft', 'istft', 'check_COLA']
def lombscargle(x,
y,
freqs,
precenter=False,
normalize=False):
"""
lombscargle(x, y, freqs)
Computes the Lomb-Scargle periodogram.
The Lomb-Scargle periodogram was developed by Lomb [1]_ and further
extended by Scargle [2]_ to find, and test the significance of weak
periodic signals with uneven temporal sampling.
When *normalize* is False (default) the computed periodogram
is unnormalized, it takes the value ``(A**2) * N/4`` for a harmonic
signal with amplitude A for sufficiently large N.
When *normalize* is True the computed periodogram is is normalized by
the residuals of the data around a constant reference model (at zero).
Input arrays should be one-dimensional and will be cast to float64.
Parameters
----------
x : array_like
Sample times.
y : array_like
Measurement values.
freqs : array_like
Angular frequencies for output periodogram.
precenter : bool, optional
Pre-center amplitudes by subtracting the mean.
normalize : bool, optional
Compute normalized periodogram.
Returns
-------
pgram : array_like
Lomb-Scargle periodogram.
Raises
------
ValueError
If the input arrays `x` and `y` do not have the same shape.
Notes
-----
This subroutine calculates the periodogram using a slightly
modified algorithm due to Townsend [3]_ which allows the
periodogram to be calculated using only a single pass through
the input arrays for each frequency.
The algorithm running time scales roughly as O(x * freqs) or O(N^2)
for a large number of samples and frequencies.
References
----------
.. [1] N.R. Lomb "Least-squares frequency analysis of unequally spaced
data", Astrophysics and Space Science, vol 39, pp. 447-462, 1976
.. [2] J.D. Scargle "Studies in astronomical time series analysis. II -
Statistical aspects of spectral analysis of unevenly spaced data",
The Astrophysical Journal, vol 263, pp. 835-853, 1982
.. [3] R.H.D. Townsend, "Fast calculation of the Lomb-Scargle
periodogram using graphics processing units.", The Astrophysical
Journal Supplement Series, vol 191, pp. 247-253, 2010
Examples
--------
>>> import scipy.signal
>>> import matplotlib.pyplot as plt
First define some input parameters for the signal:
>>> A = 2.
>>> w = 1.
>>> phi = 0.5 * np.pi
>>> nin = 1000
>>> nout = 100000
>>> frac_points = 0.9 # Fraction of points to select
Randomly select a fraction of an array with timesteps:
>>> r = np.random.rand(nin)
>>> x = np.linspace(0.01, 10*np.pi, nin)
>>> x = x[r >= frac_points]
Plot a sine wave for the selected times:
>>> y = A * np.sin(w*x+phi)
Define the array of frequencies for which to compute the periodogram:
>>> f = np.linspace(0.01, 10, nout)
Calculate Lomb-Scargle periodogram:
>>> import scipy.signal as signal
>>> pgram = signal.lombscargle(x, y, f, normalize=True)
Now make a plot of the input data:
>>> plt.subplot(2, 1, 1)
>>> plt.plot(x, y, 'b+')
Then plot the normalized periodogram:
>>> plt.subplot(2, 1, 2)
>>> plt.plot(f, pgram)
>>> plt.show()
"""
x = np.asarray(x, dtype=np.float64)
y = np.asarray(y, dtype=np.float64)
freqs = np.asarray(freqs, dtype=np.float64)
assert x.ndim == 1
assert y.ndim == 1
assert freqs.ndim == 1
if precenter:
pgram = _lombscargle(x, y - y.mean(), freqs)
else:
pgram = _lombscargle(x, y, freqs)
if normalize:
pgram *= 2 / np.dot(y, y)
return pgram
def periodogram(x, fs=1.0, window='boxcar', nfft=None, detrend='constant',
return_onesided=True, scaling='density', axis=-1):
"""
Estimate power spectral density using a periodogram.
Parameters
----------
x : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. If `window` is a string or tuple, it is
passed to `get_window` to generate the window values, which are
DFT-even by default. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length must be nperseg. Defaults
to 'boxcar'.
nfft : int, optional
Length of the FFT used. If `None` the length of `x` will be
used.
detrend : str or function or `False`, optional
Specifies how to detrend each segment. If `detrend` is a
string, it is passed as the `type` argument to the `detrend`
function. If it is a function, it takes a segment and returns a
detrended segment. If `detrend` is `False`, no detrending is
done. Defaults to 'constant'.
return_onesided : bool, optional
If `True`, return a one-sided spectrum for real data. If
`False` return a two-sided spectrum. Note that for complex
data, a two-sided spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the power spectral density ('density')
where `Pxx` has units of V**2/Hz and computing the power
spectrum ('spectrum') where `Pxx` has units of V**2, if `x`
is measured in V and `fs` is measured in Hz. Defaults to
'density'
axis : int, optional
Axis along which the periodogram is computed; the default is
over the last axis (i.e. ``axis=-1``).
Returns
-------
f : ndarray
Array of sample frequencies.
Pxx : ndarray
Power spectral density or power spectrum of `x`.
Notes
-----
.. versionadded:: 0.12.0
See Also
--------
welch: Estimate power spectral density using Welch's method
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> np.random.seed(1234)
Generate a test signal, a 2 Vrms sine wave at 1234 Hz, corrupted by
0.001 V**2/Hz of white noise sampled at 10 kHz.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 2*np.sqrt(2)
>>> freq = 1234.0
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / fs
>>> x = amp*np.sin(2*np.pi*freq*time)
>>> x += np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
Compute and plot the power spectral density.
>>> f, Pxx_den = signal.periodogram(x, fs)
>>> plt.semilogy(f, Pxx_den)
>>> plt.ylim([1e-7, 1e2])
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('PSD [V**2/Hz]')
>>> plt.show()
If we average the last half of the spectral density, to exclude the
peak, we can recover the noise power on the signal.
>>> np.mean(Pxx_den[256:])
0.0018156616014838548
Now compute and plot the power spectrum.
>>> f, Pxx_spec = signal.periodogram(x, fs, 'flattop', scaling='spectrum')
>>> plt.figure()
>>> plt.semilogy(f, np.sqrt(Pxx_spec))
>>> plt.ylim([1e-4, 1e1])
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('Linear spectrum [V RMS]')
>>> plt.show()
The peak height in the power spectrum is an estimate of the RMS
amplitude.
>>> np.sqrt(Pxx_spec.max())
2.0077340678640727
"""
x = np.asarray(x)
if x.size == 0:
return np.empty(x.shape), np.empty(x.shape)
if window is None:
window = 'boxcar'
if nfft is None:
nperseg = x.shape[axis]
elif nfft == x.shape[axis]:
nperseg = nfft
elif nfft > x.shape[axis]:
nperseg = x.shape[axis]
elif nfft < x.shape[axis]:
s = [np.s_[:]]*len(x.shape)
s[axis] = np.s_[:nfft]
x = x[s]
nperseg = nfft
nfft = None
return welch(x, fs, window, nperseg, 0, nfft, detrend, return_onesided,
scaling, axis)
def welch(x, fs=1.0, window='hann', nperseg=None, noverlap=None, nfft=None,
detrend='constant', return_onesided=True, scaling='density',
axis=-1):
r"""
Estimate power spectral density using Welch's method.
Welch's method [1]_ computes an estimate of the power spectral
density by dividing the data into overlapping segments, computing a
modified periodogram for each segment and averaging the
periodograms.
Parameters
----------
x : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. If `window` is a string or tuple, it is
passed to `get_window` to generate the window values, which are
DFT-even by default. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length must be nperseg. Defaults
to a Hann window.
nperseg : int, optional
Length of each segment. Defaults to None, but if window is str or
tuple, is set to 256, and if window is array_like, is set to the
length of the window.
noverlap : int, optional
Number of points to overlap between segments. If `None`,
``noverlap = nperseg // 2``. Defaults to `None`.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If
`None`, the FFT length is `nperseg`. Defaults to `None`.
detrend : str or function or `False`, optional
Specifies how to detrend each segment. If `detrend` is a
string, it is passed as the `type` argument to the `detrend`
function. If it is a function, it takes a segment and returns a
detrended segment. If `detrend` is `False`, no detrending is
done. Defaults to 'constant'.
return_onesided : bool, optional
If `True`, return a one-sided spectrum for real data. If
`False` return a two-sided spectrum. Note that for complex
data, a two-sided spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the power spectral density ('density')
where `Pxx` has units of V**2/Hz and computing the power
spectrum ('spectrum') where `Pxx` has units of V**2, if `x`
is measured in V and `fs` is measured in Hz. Defaults to
'density'
axis : int, optional
Axis along which the periodogram is computed; the default is
over the last axis (i.e. ``axis=-1``).
Returns
-------
f : ndarray
Array of sample frequencies.
Pxx : ndarray
Power spectral density or power spectrum of x.
See Also
--------
periodogram: Simple, optionally modified periodogram
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
Notes
-----
An appropriate amount of overlap will depend on the choice of window
and on your requirements. For the default Hann window an overlap of
50% is a reasonable trade off between accurately estimating the
signal power, while not over counting any of the data. Narrower
windows may require a larger overlap.
If `noverlap` is 0, this method is equivalent to Bartlett's method
[2]_.
.. versionadded:: 0.12.0
References
----------
.. [1] P. Welch, "The use of the fast Fourier transform for the
estimation of power spectra: A method based on time averaging
over short, modified periodograms", IEEE Trans. Audio
Electroacoust. vol. 15, pp. 70-73, 1967.
.. [2] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra",
Biometrika, vol. 37, pp. 1-16, 1950.
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> np.random.seed(1234)
Generate a test signal, a 2 Vrms sine wave at 1234 Hz, corrupted by
0.001 V**2/Hz of white noise sampled at 10 kHz.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 2*np.sqrt(2)
>>> freq = 1234.0
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / fs
>>> x = amp*np.sin(2*np.pi*freq*time)
>>> x += np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
Compute and plot the power spectral density.
>>> f, Pxx_den = signal.welch(x, fs, nperseg=1024)
>>> plt.semilogy(f, Pxx_den)
>>> plt.ylim([0.5e-3, 1])
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('PSD [V**2/Hz]')
>>> plt.show()
If we average the last half of the spectral density, to exclude the
peak, we can recover the noise power on the signal.
>>> np.mean(Pxx_den[256:])
0.0009924865443739191
Now compute and plot the power spectrum.
>>> f, Pxx_spec = signal.welch(x, fs, 'flattop', 1024, scaling='spectrum')
>>> plt.figure()
>>> plt.semilogy(f, np.sqrt(Pxx_spec))
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('Linear spectrum [V RMS]')
>>> plt.show()
The peak height in the power spectrum is an estimate of the RMS
amplitude.
>>> np.sqrt(Pxx_spec.max())
2.0077340678640727
"""
freqs, Pxx = csd(x, x, fs, window, nperseg, noverlap, nfft, detrend,
return_onesided, scaling, axis)
return freqs, Pxx.real
def csd(x, y, fs=1.0, window='hann', nperseg=None, noverlap=None, nfft=None,
detrend='constant', return_onesided=True, scaling='density', axis=-1):
r"""
Estimate the cross power spectral density, Pxy, using Welch's
method.
Parameters
----------
x : array_like
Time series of measurement values
y : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` and `y` time series. Defaults
to 1.0.
window : str or tuple or array_like, optional
Desired window to use. If `window` is a string or tuple, it is
passed to `get_window` to generate the window values, which are
DFT-even by default. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length must be nperseg. Defaults
to a Hann window.
nperseg : int, optional
Length of each segment. Defaults to None, but if window is str or
tuple, is set to 256, and if window is array_like, is set to the
length of the window.
noverlap: int, optional
Number of points to overlap between segments. If `None`,
``noverlap = nperseg // 2``. Defaults to `None`.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If
`None`, the FFT length is `nperseg`. Defaults to `None`.
detrend : str or function or `False`, optional
Specifies how to detrend each segment. If `detrend` is a
string, it is passed as the `type` argument to the `detrend`
function. If it is a function, it takes a segment and returns a
detrended segment. If `detrend` is `False`, no detrending is
done. Defaults to 'constant'.
return_onesided : bool, optional
If `True`, return a one-sided spectrum for real data. If
`False` return a two-sided spectrum. Note that for complex
data, a two-sided spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the cross spectral density ('density')
where `Pxy` has units of V**2/Hz and computing the cross spectrum
('spectrum') where `Pxy` has units of V**2, if `x` and `y` are
measured in V and `fs` is measured in Hz. Defaults to 'density'
axis : int, optional
Axis along which the CSD is computed for both inputs; the
default is over the last axis (i.e. ``axis=-1``).
Returns
-------
f : ndarray
Array of sample frequencies.
Pxy : ndarray
Cross spectral density or cross power spectrum of x,y.
See Also
--------
periodogram: Simple, optionally modified periodogram
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
welch: Power spectral density by Welch's method. [Equivalent to
csd(x,x)]
coherence: Magnitude squared coherence by Welch's method.
Notes
--------
By convention, Pxy is computed with the conjugate FFT of X
multiplied by the FFT of Y.
If the input series differ in length, the shorter series will be
zero-padded to match.
An appropriate amount of overlap will depend on the choice of window
and on your requirements. For the default Hann window an overlap of
50% is a reasonable trade off between accurately estimating the
signal power, while not over counting any of the data. Narrower
windows may require a larger overlap.
.. versionadded:: 0.16.0
References
----------
.. [1] P. Welch, "The use of the fast Fourier transform for the
estimation of power spectra: A method based on time averaging
over short, modified periodograms", IEEE Trans. Audio
Electroacoust. vol. 15, pp. 70-73, 1967.
.. [2] Rabiner, Lawrence R., and B. Gold. "Theory and Application of
Digital Signal Processing" Prentice-Hall, pp. 414-419, 1975
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Generate two test signals with some common features.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 20
>>> freq = 1234.0
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / fs
>>> b, a = signal.butter(2, 0.25, 'low')
>>> x = np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
>>> y = signal.lfilter(b, a, x)
>>> x += amp*np.sin(2*np.pi*freq*time)
>>> y += np.random.normal(scale=0.1*np.sqrt(noise_power), size=time.shape)
Compute and plot the magnitude of the cross spectral density.
>>> f, Pxy = signal.csd(x, y, fs, nperseg=1024)
>>> plt.semilogy(f, np.abs(Pxy))
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('CSD [V**2/Hz]')
>>> plt.show()
"""
freqs, _, Pxy = _spectral_helper(x, y, fs, window, nperseg, noverlap, nfft,
detrend, return_onesided, scaling, axis,
mode='psd')
# Average over windows.
if len(Pxy.shape) >= 2 and Pxy.size > 0:
if Pxy.shape[-1] > 1:
Pxy = Pxy.mean(axis=-1)
else:
Pxy = np.reshape(Pxy, Pxy.shape[:-1])
return freqs, Pxy
def spectrogram(x, fs=1.0, window=('tukey',.25), nperseg=None, noverlap=None,
nfft=None, detrend='constant', return_onesided=True,
scaling='density', axis=-1, mode='psd'):
"""
Compute a spectrogram with consecutive Fourier transforms.
Spectrograms can be used as a way of visualizing the change of a
nonstationary signal's frequency content over time.
Parameters
----------
x : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. If `window` is a string or tuple, it is
passed to `get_window` to generate the window values, which are
DFT-even by default. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length must be nperseg.
Defaults to a Tukey window with shape parameter of 0.25.
nperseg : int, optional
Length of each segment. Defaults to None, but if window is str or
tuple, is set to 256, and if window is array_like, is set to the
length of the window.
noverlap : int, optional
Number of points to overlap between segments. If `None`,
``noverlap = nperseg // 8``. Defaults to `None`.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If
`None`, the FFT length is `nperseg`. Defaults to `None`.
detrend : str or function or `False`, optional
Specifies how to detrend each segment. If `detrend` is a
string, it is passed as the `type` argument to the `detrend`
function. If it is a function, it takes a segment and returns a
detrended segment. If `detrend` is `False`, no detrending is
done. Defaults to 'constant'.
return_onesided : bool, optional
If `True`, return a one-sided spectrum for real data. If
`False` return a two-sided spectrum. Note that for complex
data, a two-sided spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the power spectral density ('density')
where `Sxx` has units of V**2/Hz and computing the power
spectrum ('spectrum') where `Sxx` has units of V**2, if `x`
is measured in V and `fs` is measured in Hz. Defaults to
'density'.
axis : int, optional
Axis along which the spectrogram is computed; the default is over
the last axis (i.e. ``axis=-1``).
mode : str, optional
Defines what kind of return values are expected. Options are
['psd', 'complex', 'magnitude', 'angle', 'phase']. 'complex' is
equivalent to the output of `stft` with no padding or boundary
extension. 'magnitude' returns the absolute magnitude of the
STFT. 'angle' and 'phase' return the complex angle of the STFT,
with and without unwrapping, respectively.
Returns
-------
f : ndarray
Array of sample frequencies.
t : ndarray
Array of segment times.
Sxx : ndarray
Spectrogram of x. By default, the last axis of Sxx corresponds
to the segment times.
See Also
--------
periodogram: Simple, optionally modified periodogram
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
welch: Power spectral density by Welch's method.
csd: Cross spectral density by Welch's method.
Notes
-----
An appropriate amount of overlap will depend on the choice of window
and on your requirements. In contrast to welch's method, where the
entire data stream is averaged over, one may wish to use a smaller
overlap (or perhaps none at all) when computing a spectrogram, to
maintain some statistical independence between individual segments.
It is for this reason that the default window is a Tukey window with
1/8th of a window's length overlap at each end.
.. versionadded:: 0.16.0
References
----------
.. [1] Oppenheim, Alan V., Ronald W. Schafer, John R. Buck
"Discrete-Time Signal Processing", Prentice Hall, 1999.
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Generate a test signal, a 2 Vrms sine wave whose frequency is slowly
modulated around 3kHz, corrupted by white noise of exponentially
decreasing magnitude sampled at 10 kHz.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 2 * np.sqrt(2)
>>> noise_power = 0.01 * fs / 2
>>> time = np.arange(N) / float(fs)
>>> mod = 500*np.cos(2*np.pi*0.25*time)
>>> carrier = amp * np.sin(2*np.pi*3e3*time + mod)
>>> noise = np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
>>> noise *= np.exp(-time/5)
>>> x = carrier + noise
Compute and plot the spectrogram.
>>> f, t, Sxx = signal.spectrogram(x, fs)
>>> plt.pcolormesh(t, f, Sxx)
>>> plt.ylabel('Frequency [Hz]')
>>> plt.xlabel('Time [sec]')
>>> plt.show()
"""
modelist = ['psd', 'complex', 'magnitude', 'angle', 'phase']
if mode not in modelist:
raise ValueError('unknown value for mode {}, must be one of {}'
.format(mode, modelist))
# need to set default for nperseg before setting default for noverlap below
window, nperseg = _triage_segments(window, nperseg,
input_length=x.shape[axis])
# Less overlap than welch, so samples are more statisically independent
if noverlap is None:
noverlap = nperseg // 8
if mode == 'psd':
freqs, time, Sxx = _spectral_helper(x, x, fs, window, nperseg,
noverlap, nfft, detrend,
return_onesided, scaling, axis,
mode='psd')
else:
freqs, time, Sxx = _spectral_helper(x, x, fs, window, nperseg,
noverlap, nfft, detrend,
return_onesided, scaling, axis,
mode='stft')
if mode == 'magnitude':
Sxx = np.abs(Sxx)
elif mode in ['angle', 'phase']:
Sxx = np.angle(Sxx)
if mode == 'phase':
# Sxx has one additional dimension for time strides
if axis < 0:
axis -= 1
Sxx = np.unwrap(Sxx, axis=axis)
# mode =='complex' is same as `stft`, doesn't need modification
return freqs, time, Sxx
def check_COLA(window, nperseg, noverlap, tol=1e-10):
r"""
Check whether the Constant OverLap Add (COLA) constraint is met
Parameters
----------
window : str or tuple or array_like
Desired window to use. If `window` is a string or tuple, it is
passed to `get_window` to generate the window values, which are
DFT-even by default. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length must be nperseg.
nperseg : int
Length of each segment.
noverlap : int
Number of points to overlap between segments.
tol : float, optional
The allowed variance of a bin's weighted sum from the median bin
sum.
Returns
-------
verdict : bool
`True` if chosen combination satisfies COLA within `tol`,
`False` otherwise
See Also
--------
stft: Short Time Fourier Transform
istft: Inverse Short Time Fourier Transform
Notes
-----
In order to enable inversion of an STFT via the inverse STFT in
`istft`, the signal windowing must obey the constraint of "Constant
OverLap Add" (COLA). This ensures that every point in the input data
is equally weighted, thereby avoiding aliasing and allowing full
reconstruction.
Some examples of windows that satisfy COLA:
- Rectangular window at overlap of 0, 1/2, 2/3, 3/4, ...
- Bartlett window at overlap of 1/2, 3/4, 5/6, ...
- Hann window at 1/2, 2/3, 3/4, ...
- Any Blackman family window at 2/3 overlap
- Any window with ``noverlap = nperseg-1``
A very comprehensive list of other windows may be found in [2]_,
wherein the COLA condition is satisfied when the "Amplitude
Flatness" is unity.
.. versionadded:: 0.19.0
References
----------
.. [1] Julius O. Smith III, "Spectral Audio Signal Processing", W3K
Publishing, 2011,ISBN 978-0-9745607-3-1.
.. [2] G. Heinzel, A. Ruediger and R. Schilling, "Spectrum and
spectral density estimation by the Discrete Fourier transform
(DFT), including a comprehensive list of window functions and
some new at-top windows", 2002,
http://hdl.handle.net/11858/00-001M-0000-0013-557A-5
Examples
--------
>>> from scipy import signal
Confirm COLA condition for rectangular window of 75% (3/4) overlap:
>>> signal.check_COLA(signal.boxcar(100), 100, 75)
True
COLA is not true for 25% (1/4) overlap, though:
>>> signal.check_COLA(signal.boxcar(100), 100, 25)
False
"Symmetrical" Hann window (for filter design) is not COLA:
>>> signal.check_COLA(signal.hann(120, sym=True), 120, 60)
False
"Periodic" or "DFT-even" Hann window (for FFT analysis) is COLA for
overlap of 1/2, 2/3, 3/4, etc.:
>>> signal.check_COLA(signal.hann(120, sym=False), 120, 60)
True
>>> signal.check_COLA(signal.hann(120, sym=False), 120, 80)
True
>>> signal.check_COLA(signal.hann(120, sym=False), 120, 90)
True
"""
nperseg = int(nperseg)
if nperseg < 1:
raise ValueError('nperseg must be a positive integer')
if noverlap >= nperseg:
raise ValueError('noverlap must be less than nperseg.')
noverlap = int(noverlap)
if isinstance(window, string_types) or type(window) is tuple:
win = get_window(window, nperseg)
else:
win = np.asarray(window)
if len(win.shape) != 1:
raise ValueError('window must be 1-D')
if win.shape[0] != nperseg:
raise ValueError('window must have length of nperseg')
step = nperseg - noverlap
binsums = np.sum((win[ii*step:(ii+1)*step] for ii in range(nperseg//step)),
axis=0)
if nperseg % step != 0:
binsums[:nperseg % step] += win[-(nperseg % step):]
deviation = binsums - np.median(binsums)
return np.max(np.abs(deviation)) < tol
def stft(x, fs=1.0, window='hann', nperseg=256, noverlap=None, nfft=None,
detrend=False, return_onesided=True, boundary='zeros', padded=True,
axis=-1):
r"""
Compute the Short Time Fourier Transform (STFT).
STFTs can be used as a way of quantifying the change of a
nonstationary signal's frequency and phase content over time.
Parameters
----------
x : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. If `window` is a string or tuple, it is
passed to `get_window` to generate the window values, which are
DFT-even by default. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length must be nperseg. Defaults
to a Hann window.
nperseg : int, optional
Length of each segment. Defaults to 256.
noverlap : int, optional
Number of points to overlap between segments. If `None`,
``noverlap = nperseg // 2``. Defaults to `None`. When
specified, the COLA constraint must be met (see Notes below).
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If
`None`, the FFT length is `nperseg`. Defaults to `None`.
detrend : str or function or `False`, optional
Specifies how to detrend each segment. If `detrend` is a
string, it is passed as the `type` argument to the `detrend`
function. If it is a function, it takes a segment and returns a
detrended segment. If `detrend` is `False`, no detrending is
done. Defaults to `False`.
return_onesided : bool, optional
If `True`, return a one-sided spectrum for real data. If
`False` return a two-sided spectrum. Note that for complex
data, a two-sided spectrum is always returned. Defaults to
`True`.
boundary : str or None, optional
Specifies whether the input signal is extended at both ends, and
how to generate the new values, in order to center the first
windowed segment on the first input point. This has the benefit
of enabling reconstruction of the first input point when the
employed window function starts at zero. Valid options are
``['even', 'odd', 'constant', 'zeros', None]``. Defaults to
'zeros', for zero padding extension. I.e. ``[1, 2, 3, 4]`` is
extended to ``[0, 1, 2, 3, 4, 0]`` for ``nperseg=3``.
padded : bool, optional
Specifies whether the input signal is zero-padded at the end to
make the signal fit exactly into an integer number of window
segments, so that all of the signal is included in the output.
Defaults to `True`. Padding occurs after boundary extension, if
`boundary` is not `None`, and `padded` is `True`, as is the
default.
axis : int, optional
Axis along which the STFT is computed; the default is over the
last axis (i.e. ``axis=-1``).
Returns
-------
f : ndarray
Array of sample frequencies.
t : ndarray
Array of segment times.
Zxx : ndarray
STFT of `x`. By default, the last axis of `Zxx` corresponds
to the segment times.
See Also
--------
istft: Inverse Short Time Fourier Transform
check_COLA: Check whether the Constant OverLap Add (COLA) constraint
is met
welch: Power spectral density by Welch's method.
spectrogram: Spectrogram by Welch's method.
csd: Cross spectral density by Welch's method.
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
Notes
-----
In order to enable inversion of an STFT via the inverse STFT in
`istft`, the signal windowing must obey the constraint of "Constant
OverLap Add" (COLA), and the input signal must have complete
windowing coverage (i.e. ``(x.shape[axis] - nperseg) %
(nperseg-noverlap) == 0``). The `padded` argument may be used to
accomplish this.
The COLA constraint ensures that every point in the input data is
equally weighted, thereby avoiding aliasing and allowing full
reconstruction. Whether a choice of `window`, `nperseg`, and
`noverlap` satisfy this constraint can be tested with
`check_COLA`.
.. versionadded:: 0.19.0
References
----------
.. [1] Oppenheim, Alan V., Ronald W. Schafer, John R. Buck
"Discrete-Time Signal Processing", Prentice Hall, 1999.
.. [2] Daniel W. Griffin, Jae S. Limdt "Signal Estimation from
Modified Short Fourier Transform", IEEE 1984,
10.1109/TASSP.1984.1164317
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Generate a test signal, a 2 Vrms sine wave whose frequency is slowly
modulated around 3kHz, corrupted by white noise of exponentially
decreasing magnitude sampled at 10 kHz.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 2 * np.sqrt(2)
>>> noise_power = 0.01 * fs / 2
>>> time = np.arange(N) / float(fs)
>>> mod = 500*np.cos(2*np.pi*0.25*time)
>>> carrier = amp * np.sin(2*np.pi*3e3*time + mod)
>>> noise = np.random.normal(scale=np.sqrt(noise_power),
... size=time.shape)
>>> noise *= np.exp(-time/5)
>>> x = carrier + noise
Compute and plot the STFT's magnitude.
>>> f, t, Zxx = signal.stft(x, fs, nperseg=1000)
>>> plt.pcolormesh(t, f, np.abs(Zxx), vmin=0, vmax=amp)
>>> plt.title('STFT Magnitude')
>>> plt.ylabel('Frequency [Hz]')
>>> plt.xlabel('Time [sec]')
>>> plt.show()
"""
freqs, time, Zxx = _spectral_helper(x, x, fs, window, nperseg, noverlap,
nfft, detrend, return_onesided,
scaling='spectrum', axis=axis,
mode='stft', boundary=boundary,
padded=padded)
return freqs, time, Zxx
def istft(Zxx, fs=1.0, window='hann', nperseg=None, noverlap=None, nfft=None,
input_onesided=True, boundary=True, time_axis=-1, freq_axis=-2):
r"""
Perform the inverse Short Time Fourier transform (iSTFT).
Parameters
----------
Zxx : array_like
STFT of the signal to be reconstructed. If a purely real array
is passed, it will be cast to a complex data type.
fs : float, optional
Sampling frequency of the time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. If `window` is a string or tuple, it is
passed to `get_window` to generate the window values, which are
DFT-even by default. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length must be nperseg. Defaults
to a Hann window. Must match the window used to generate the
STFT for faithful inversion.
nperseg : int, optional
Number of data points corresponding to each STFT segment. This
parameter must be specified if the number of data points per
segment is odd, or if the STFT was padded via ``nfft >
nperseg``. If `None`, the value depends on the shape of
`Zxx` and `input_onesided`. If `input_onesided` is True,
``nperseg=2*(Zxx.shape[freq_axis] - 1)``. Otherwise,
``nperseg=Zxx.shape[freq_axis]``. Defaults to `None`.
noverlap : int, optional
Number of points to overlap between segments. If `None`, half
of the segment length. Defaults to `None`. When specified, the
COLA constraint must be met (see Notes below), and should match
the parameter used to generate the STFT. Defaults to `None`.
nfft : int, optional
Number of FFT points corresponding to each STFT segment. This
parameter must be specified if the STFT was padded via ``nfft >
nperseg``. If `None`, the default values are the same as for
`nperseg`, detailed above, with one exception: if
`input_onesided` is True and
``nperseg==2*Zxx.shape[freq_axis] - 1``, `nfft` also takes on
that value. This case allows the proper inversion of an
odd-length unpadded STFT using ``nfft=None``. Defaults to
`None`.
input_onesided : bool, optional
If `True`, interpret the input array as one-sided FFTs, such
as is returned by `stft` with ``return_onesided=True`` and
`numpy.fft.rfft`. If `False`, interpret the input as a a
two-sided FFT. Defaults to `True`.
boundary : bool, optional
Specifies whether the input signal was extended at its
boundaries by supplying a non-`None` ``boundary`` argument to
`stft`. Defaults to `True`.
time_axis : int, optional
Where the time segments of the STFT is located; the default is
the last axis (i.e. ``axis=-1``).
freq_axis : int, optional
Where the frequency axis of the STFT is located; the default is
the penultimate axis (i.e. ``axis=-2``).
Returns
-------
t : ndarray
Array of output data times.
x : ndarray
iSTFT of `Zxx`.
See Also
--------
stft: Short Time Fourier Transform
check_COLA: Check whether the Constant OverLap Add (COLA) constraint
is met
Notes
-----
In order to enable inversion of an STFT via the inverse STFT with
`istft`, the signal windowing must obey the constraint of "Constant
OverLap Add" (COLA). This ensures that every point in the input data
is equally weighted, thereby avoiding aliasing and allowing full
reconstruction. Whether a choice of `window`, `nperseg`, and
`noverlap` satisfy this constraint can be tested with
`check_COLA`, by using ``nperseg = Zxx.shape[freq_axis]``.
An STFT which has been modified (via masking or otherwise) is not
guaranteed to correspond to a exactly realizible signal. This
function implements the iSTFT via the least-squares esimation
algorithm detailed in [2]_, which produces a signal that minimizes
the mean squared error between the STFT of the returned signal and
the modified STFT.
.. versionadded:: 0.19.0
References
----------
.. [1] Oppenheim, Alan V., Ronald W. Schafer, John R. Buck
"Discrete-Time Signal Processing", Prentice Hall, 1999.
.. [2] Daniel W. Griffin, Jae S. Limdt "Signal Estimation from
Modified Short Fourier Transform", IEEE 1984,
10.1109/TASSP.1984.1164317
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Generate a test signal, a 2 Vrms sine wave at 50Hz corrupted by
0.001 V**2/Hz of white noise sampled at 1024 Hz.
>>> fs = 1024
>>> N = 10*fs
>>> nperseg = 512
>>> amp = 2 * np.sqrt(2)
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / float(fs)
>>> carrier = amp * np.sin(2*np.pi*50*time)
>>> noise = np.random.normal(scale=np.sqrt(noise_power),
... size=time.shape)
>>> x = carrier + noise
Compute the STFT, and plot its magnitude
>>> f, t, Zxx = signal.stft(x, fs=fs, nperseg=nperseg)
>>> plt.figure()
>>> plt.pcolormesh(t, f, np.abs(Zxx), vmin=0, vmax=amp)
>>> plt.ylim([f[1], f[-1]])
>>> plt.title('STFT Magnitude')
>>> plt.ylabel('Frequency [Hz]')
>>> plt.xlabel('Time [sec]')
>>> plt.yscale('log')
>>> plt.show()
Zero the components that are 10% or less of the carrier magnitude,
then convert back to a time series via inverse STFT
>>> Zxx = np.where(np.abs(Zxx) >= amp/10, Zxx, 0)
>>> _, xrec = signal.istft(Zxx, fs)
Compare the cleaned signal with the original and true carrier signals.
>>> plt.figure()
>>> plt.plot(time, x, time, xrec, time, carrier)
>>> plt.xlim([2, 2.1])
>>> plt.xlabel('Time [sec]')
>>> plt.ylabel('Signal')
>>> plt.legend(['Carrier + Noise', 'Filtered via STFT', 'True Carrier'])
>>> plt.show()
Note that the cleaned signal does not start as abruptly as the original,
since some of the coefficients of the transient were also removed:
>>> plt.figure()
>>> plt.plot(time, x, time, xrec, time, carrier)
>>> plt.xlim([0, 0.1])
>>> plt.xlabel('Time [sec]')
>>> plt.ylabel('Signal')
>>> plt.legend(['Carrier + Noise', 'Filtered via STFT', 'True Carrier'])
>>> plt.show()
"""
# Make sure input is an ndarray of appropriate complex dtype
Zxx = np.asarray(Zxx) + 0j
freq_axis = int(freq_axis)
time_axis = int(time_axis)
if Zxx.ndim < 2:
raise ValueError('Input stft must be at least 2d!')
if freq_axis == time_axis:
raise ValueError('Must specify differing time and frequency axes!')
nseg = Zxx.shape[time_axis]
if input_onesided:
# Assume even segment length
n_default = 2*(Zxx.shape[freq_axis] - 1)
else:
n_default = Zxx.shape[freq_axis]
# Check windowing parameters
if nperseg is None:
nperseg = n_default
else:
nperseg = int(nperseg)
if nperseg < 1:
raise ValueError('nperseg must be a positive integer')
if nfft is None:
if (input_onesided) and (nperseg == n_default + 1):
# Odd nperseg, no FFT padding
nfft = nperseg
else:
nfft = n_default
elif nfft < nperseg:
raise ValueError('nfft must be greater than or equal to nperseg.')
else:
nfft = int(nfft)
if noverlap is None:
noverlap = nperseg//2
else:
noverlap = int(noverlap)
if noverlap >= nperseg:
raise ValueError('noverlap must be less than nperseg.')
nstep = nperseg - noverlap
if not check_COLA(window, nperseg, noverlap):
raise ValueError('Window, STFT shape and noverlap do not satisfy the '
'COLA constraint.')
# Rearrange axes if neccessary
if time_axis != Zxx.ndim-1 or freq_axis != Zxx.ndim-2:
# Turn negative indices to positive for the call to transpose
if freq_axis < 0:
freq_axis = Zxx.ndim + freq_axis
if time_axis < 0:
time_axis = Zxx.ndim + time_axis
zouter = list(range(Zxx.ndim))
for ax in sorted([time_axis, freq_axis], reverse=True):
zouter.pop(ax)
Zxx = np.transpose(Zxx, zouter+[freq_axis, time_axis])
# Get window as array
if isinstance(window, string_types) or type(window) is tuple:
win = get_window(window, nperseg)
else:
win = np.asarray(window)
if len(win.shape) != 1:
raise ValueError('window must be 1-D')
if win.shape[0] != nperseg:
raise ValueError('window must have length of {0}'.format(nperseg))
if input_onesided:
ifunc = np.fft.irfft
else:
ifunc = fftpack.ifft
xsubs = ifunc(Zxx, axis=-2, n=nfft)[..., :nperseg, :]
# Initialize output and normalization arrays
outputlength = nperseg + (nseg-1)*nstep
x = np.zeros(list(Zxx.shape[:-2])+[outputlength], dtype=xsubs.dtype)
norm = np.zeros(outputlength, dtype=xsubs.dtype)
if np.result_type(win, xsubs) != xsubs.dtype:
win = win.astype(xsubs.dtype)
xsubs *= win.sum() # This takes care of the 'spectrum' scaling
# Construct the output from the ifft segments
# This loop could perhaps be vectorized/strided somehow...
for ii in range(nseg):
# Window the ifft
x[..., ii*nstep:ii*nstep+nperseg] += xsubs[..., ii] * win
norm[..., ii*nstep:ii*nstep+nperseg] += win**2
# Divide out normalization where non-tiny
x /= np.where(norm > 1e-10, norm, 1.0)
# Remove extension points
if boundary:
x = x[..., nperseg//2:-(nperseg//2)]
if input_onesided:
x = x.real
# Put axes back
if x.ndim > 1:
if time_axis != Zxx.ndim-1:
if freq_axis < time_axis:
time_axis -= 1
x = np.rollaxis(x, -1, time_axis)
time = np.arange(x.shape[0])/float(fs)
return time, x
def coherence(x, y, fs=1.0, window='hann', nperseg=None, noverlap=None,
nfft=None, detrend='constant', axis=-1):
r"""
Estimate the magnitude squared coherence estimate, Cxy, of
discrete-time signals X and Y using Welch's method.
``Cxy = abs(Pxy)**2/(Pxx*Pyy)``, where `Pxx` and `Pyy` are power
spectral density estimates of X and Y, and `Pxy` is the cross
spectral density estimate of X and Y.
Parameters
----------
x : array_like
Time series of measurement values
y : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` and `y` time series. Defaults
to 1.0.
window : str or tuple or array_like, optional
Desired window to use. If `window` is a string or tuple, it is
passed to `get_window` to generate the window values, which are
DFT-even by default. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length must be nperseg. Defaults
to a Hann window.
nperseg : int, optional
Length of each segment. Defaults to None, but if window is str or
tuple, is set to 256, and if window is array_like, is set to the
length of the window.
noverlap: int, optional
Number of points to overlap between segments. If `None`,
``noverlap = nperseg // 2``. Defaults to `None`.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If
`None`, the FFT length is `nperseg`. Defaults to `None`.
detrend : str or function or `False`, optional
Specifies how to detrend each segment. If `detrend` is a
string, it is passed as the `type` argument to the `detrend`
function. If it is a function, it takes a segment and returns a
detrended segment. If `detrend` is `False`, no detrending is
done. Defaults to 'constant'.
axis : int, optional
Axis along which the coherence is computed for both inputs; the
default is over the last axis (i.e. ``axis=-1``).
Returns
-------
f : ndarray
Array of sample frequencies.
Cxy : ndarray
Magnitude squared coherence of x and y.
See Also
--------
periodogram: Simple, optionally modified periodogram
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
welch: Power spectral density by Welch's method.
csd: Cross spectral density by Welch's method.
Notes
--------
An appropriate amount of overlap will depend on the choice of window
and on your requirements. For the default Hann window an overlap of
50% is a reasonable trade off between accurately estimating the
signal power, while not over counting any of the data. Narrower
windows may require a larger overlap.
.. versionadded:: 0.16.0
References
----------
.. [1] P. Welch, "The use of the fast Fourier transform for the
estimation of power spectra: A method based on time averaging
over short, modified periodograms", IEEE Trans. Audio
Electroacoust. vol. 15, pp. 70-73, 1967.
.. [2] Stoica, Petre, and Randolph Moses, "Spectral Analysis of
Signals" Prentice Hall, 2005
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Generate two test signals with some common features.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 20
>>> freq = 1234.0
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / fs
>>> b, a = signal.butter(2, 0.25, 'low')
>>> x = np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
>>> y = signal.lfilter(b, a, x)
>>> x += amp*np.sin(2*np.pi*freq*time)
>>> y += np.random.normal(scale=0.1*np.sqrt(noise_power), size=time.shape)
Compute and plot the coherence.
>>> f, Cxy = signal.coherence(x, y, fs, nperseg=1024)
>>> plt.semilogy(f, Cxy)
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('Coherence')
>>> plt.show()
"""
freqs, Pxx = welch(x, fs, window, nperseg, noverlap, nfft, detrend,
axis=axis)
_, Pyy = welch(y, fs, window, nperseg, noverlap, nfft, detrend, axis=axis)
_, Pxy = csd(x, y, fs, window, nperseg, noverlap, nfft, detrend, axis=axis)
Cxy = np.abs(Pxy)**2 / Pxx / Pyy
return freqs, Cxy
def _spectral_helper(x, y, fs=1.0, window='hann', nperseg=None, noverlap=None,
nfft=None, detrend='constant', return_onesided=True,
scaling='spectrum', axis=-1, mode='psd', boundary=None,
padded=False):
"""
Calculate various forms of windowed FFTs for PSD, CSD, etc.
This is a helper function that implements the commonality between
the stft, psd, csd, and spectrogram functions. It is not designed to
be called externally. The windows are not averaged over; the result
from each window is returned.
Parameters
---------
x : array_like
Array or sequence containing the data to be analyzed.
y : array_like
Array or sequence containing the data to be analyzed. If this is
the same object in memory as `x` (i.e. ``_spectral_helper(x,
x, ...)``), the extra computations are spared.
fs : float, optional
Sampling frequency of the time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. If `window` is a string or tuple, it is
passed to `get_window` to generate the window values, which are
DFT-even by default. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length must be nperseg. Defaults
to a Hann window.
nperseg : int, optional
Length of each segment. Defaults to None, but if window is str or
tuple, is set to 256, and if window is array_like, is set to the
length of the window.
noverlap : int, optional
Number of points to overlap between segments. If `None`,
``noverlap = nperseg // 2``. Defaults to `None`.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If
`None`, the FFT length is `nperseg`. Defaults to `None`.
detrend : str or function or `False`, optional
Specifies how to detrend each segment. If `detrend` is a
string, it is passed as the `type` argument to the `detrend`
function. If it is a function, it takes a segment and returns a
detrended segment. If `detrend` is `False`, no detrending is
done. Defaults to 'constant'.
return_onesided : bool, optional
If `True`, return a one-sided spectrum for real data. If
`False` return a two-sided spectrum. Note that for complex
data, a two-sided spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the cross spectral density ('density')
where `Pxy` has units of V**2/Hz and computing the cross
spectrum ('spectrum') where `Pxy` has units of V**2, if `x`
and `y` are measured in V and `fs` is measured in Hz.
Defaults to 'density'
axis : int, optional
Axis along which the FFTs are computed; the default is over the
last axis (i.e. ``axis=-1``).
mode: str {'psd', 'stft'}, optional
Defines what kind of return values are expected. Defaults to
'psd'.
boundary : str or None, optional
Specifies whether the input signal is extended at both ends, and
how to generate the new values, in order to center the first
windowed segment on the first input point. This has the benefit
of enabling reconstruction of the first input point when the
employed window function starts at zero. Valid options are
``['even', 'odd', 'constant', 'zeros', None]``. Defaults to
`None`.
padded : bool, optional
Specifies whether the input signal is zero-padded at the end to
make the signal fit exactly into an integer number of window
segments, so that all of the signal is included in the output.
Defaults to `False`. Padding occurs after boundary extension, if
`boundary` is not `None`, and `padded` is `True`.
Returns
-------
freqs : ndarray
Array of sample frequencies.
t : ndarray
Array of times corresponding to each data segment
result : ndarray
Array of output data, contents dependant on *mode* kwarg.
References
----------
.. [1] Stack Overflow, "Rolling window for 1D arrays in Numpy?",
http://stackoverflow.com/a/6811241
.. [2] Stack Overflow, "Using strides for an efficient moving
average filter", http://stackoverflow.com/a/4947453
Notes
-----
Adapted from matplotlib.mlab
.. versionadded:: 0.16.0
"""
if mode not in ['psd', 'stft']:
raise ValueError("Unknown value for mode %s, must be one of: "
"{'psd', 'stft'}" % mode)
boundary_funcs = {'even': even_ext,
'odd': odd_ext,
'constant': const_ext,
'zeros': zero_ext,
None: None}
if boundary not in boundary_funcs:
raise ValueError("Unknown boundary option '{0}', must be one of: {1}"
.format(boundary, list(boundary_funcs.keys())))
# If x and y are the same object we can save ourselves some computation.
same_data = y is x
if not same_data and mode != 'psd':
raise ValueError("x and y must be equal if mode is 'stft'")
axis = int(axis)
# Ensure we have np.arrays, get outdtype
x = np.asarray(x)
if not same_data:
y = np.asarray(y)
outdtype = np.result_type(x, y, np.complex64)
else:
outdtype = np.result_type(x, np.complex64)
if not same_data:
# Check if we can broadcast the outer axes together
xouter = list(x.shape)
youter = list(y.shape)
xouter.pop(axis)
youter.pop(axis)
try:
outershape = np.broadcast(np.empty(xouter), np.empty(youter)).shape
except ValueError:
raise ValueError('x and y cannot be broadcast together.')
if same_data:
if x.size == 0:
return np.empty(x.shape), np.empty(x.shape), np.empty(x.shape)
else:
if x.size == 0 or y.size == 0:
outshape = outershape + (min([x.shape[axis], y.shape[axis]]),)
emptyout = np.rollaxis(np.empty(outshape), -1, axis)
return emptyout, emptyout, emptyout
if x.ndim > 1:
if axis != -1:
x = np.rollaxis(x, axis, len(x.shape))
if not same_data and y.ndim > 1:
y = np.rollaxis(y, axis, len(y.shape))
# Check if x and y are the same length, zero-pad if neccesary
if not same_data:
if x.shape[-1] != y.shape[-1]:
if x.shape[-1] < y.shape[-1]:
pad_shape = list(x.shape)
pad_shape[-1] = y.shape[-1] - x.shape[-1]
x = np.concatenate((x, np.zeros(pad_shape)), -1)
else:
pad_shape = list(y.shape)
pad_shape[-1] = x.shape[-1] - y.shape[-1]
y = np.concatenate((y, np.zeros(pad_shape)), -1)
if nperseg is not None: # if specified by user
nperseg = int(nperseg)
if nperseg < 1:
raise ValueError('nperseg must be a positive integer')
# parse window; if array like, then set nperseg = win.shape
win, nperseg = _triage_segments(window, nperseg,input_length=x.shape[-1])
if nfft is None:
nfft = nperseg
elif nfft < nperseg:
raise ValueError('nfft must be greater than or equal to nperseg.')
else:
nfft = int(nfft)
if noverlap is None:
noverlap = nperseg//2
else:
noverlap = int(noverlap)
if noverlap >= nperseg:
raise ValueError('noverlap must be less than nperseg.')
nstep = nperseg - noverlap
# Padding occurs after boundary extension, so that the extended signal ends
# in zeros, instead of introducing an impulse at the end.
# I.e. if x = [..., 3, 2]
# extend then pad -> [..., 3, 2, 2, 3, 0, 0, 0]
# pad then extend -> [..., 3, 2, 0, 0, 0, 2, 3]
if boundary is not None:
ext_func = boundary_funcs[boundary]
x = ext_func(x, nperseg//2, axis=-1)
if not same_data:
y = ext_func(y, nperseg//2, axis=-1)
if padded:
# Pad to integer number of windowed segments
# I.e make x.shape[-1] = nperseg + (nseg-1)*nstep, with integer nseg
nadd = (-(x.shape[-1]-nperseg) % nstep) % nperseg
zeros_shape = list(x.shape[:-1]) + [nadd]
x = np.concatenate((x, np.zeros(zeros_shape)), axis=-1)
if not same_data:
zeros_shape = list(y.shape[:-1]) + [nadd]
y = np.concatenate((y, np.zeros(zeros_shape)), axis=-1)
# Handle detrending and window functions
if not detrend:
def detrend_func(d):
return d
elif not hasattr(detrend, '__call__'):
def detrend_func(d):
return signaltools.detrend(d, type=detrend, axis=-1)
elif axis != -1:
# Wrap this function so that it receives a shape that it could
# reasonably expect to receive.
def detrend_func(d):
d = np.rollaxis(d, -1, axis)
d = detrend(d)
return np.rollaxis(d, axis, len(d.shape))
else:
detrend_func = detrend
if np.result_type(win,np.complex64) != outdtype:
win = win.astype(outdtype)
if scaling == 'density':
scale = 1.0 / (fs * (win*win).sum())
elif scaling == 'spectrum':
scale = 1.0 / win.sum()**2
else:
raise ValueError('Unknown scaling: %r' % scaling)
if mode == 'stft':
scale = np.sqrt(scale)
if return_onesided:
if np.iscomplexobj(x):
sides = 'twosided'
warnings.warn('Input data is complex, switching to '
'return_onesided=False')
else:
sides = 'onesided'
if not same_data:
if np.iscomplexobj(y):
sides = 'twosided'
warnings.warn('Input data is complex, switching to '
'return_onesided=False')
else:
sides = 'twosided'
if sides == 'twosided':
freqs = fftpack.fftfreq(nfft, 1/fs)
elif sides == 'onesided':
freqs = np.fft.rfftfreq(nfft, 1/fs)
# Perform the windowed FFTs
result = _fft_helper(x, win, detrend_func, nperseg, noverlap, nfft, sides)
if not same_data:
# All the same operations on the y data
result_y = _fft_helper(y, win, detrend_func, nperseg, noverlap, nfft,
sides)
result = np.conjugate(result) * result_y
elif mode == 'psd':
result = np.conjugate(result) * result
result *= scale
if sides == 'onesided' and mode == 'psd':
if nfft % 2:
result[..., 1:] *= 2
else:
# Last point is unpaired Nyquist freq point, don't double
result[..., 1:-1] *= 2
time = np.arange(nperseg/2, x.shape[-1] - nperseg/2 + 1,
nperseg - noverlap)/float(fs)
if boundary is not None:
time -= (nperseg/2) / fs
result = result.astype(outdtype)
# All imaginary parts are zero anyways
if same_data and mode != 'stft':
result = result.real
# Output is going to have new last axis for time/window index, so a
# negative axis index shifts down one
if axis < 0:
axis -= 1
# Roll frequency axis back to axis where the data came from
result = np.rollaxis(result, -1, axis)
return freqs, time, result
def _fft_helper(x, win, detrend_func, nperseg, noverlap, nfft, sides):
"""
Calculate windowed FFT, for internal use by
scipy.signal._spectral_helper
This is a helper function that does the main FFT calculation for
`_spectral helper`. All input valdiation is performed there, and the
data axis is assumed to be the last axis of x. It is not designed to
be called externally. The windows are not averaged over; the result
from each window is returned.
Returns
-------
result : ndarray
Array of FFT data
References
----------
.. [1] Stack Overflow, "Repeat NumPy array without replicating
data?", http://stackoverflow.com/a/5568169
Notes
-----
Adapted from matplotlib.mlab
.. versionadded:: 0.16.0
"""
# Created strided array of data segments
if nperseg == 1 and noverlap == 0:
result = x[..., np.newaxis]
else:
step = nperseg - noverlap
shape = x.shape[:-1]+((x.shape[-1]-noverlap)//step, nperseg)
strides = x.strides[:-1]+(step*x.strides[-1], x.strides[-1])
result = np.lib.stride_tricks.as_strided(x, shape=shape,
strides=strides)
# Detrend each data segment individually
result = detrend_func(result)
# Apply window by multiplication
result = win * result
# Perform the fft. Acts on last axis by default. Zero-pads automatically
if sides == 'twosided':
func = fftpack.fft
else:
result = result.real
func = np.fft.rfft
result = func(result, n=nfft)
return result
def _triage_segments(window, nperseg,input_length):
"""
Parses window and nperseg arguments for spectrogram and _spectral_helper.
This is a helper function, not meant to be called externally.
Parameters
---------
window : string, tuple, or ndarray
If window is specified by a string or tuple and nperseg is not
specified, nperseg is set to the default of 256 and returns a window of
that length.
If instead the window is array_like and nperseg is not specified, then
nperseg is set to the length of the window. A ValueError is raised if
the user supplies both an array_like window and a value for nperseg but
nperseg does not equal the length of the window.
nperseg : int
Length of each segment
input_length: int
Length of input signal, i.e. x.shape[-1]. Used to test for errors.
Returns
-------
win : ndarray
window. If function was called with string or tuple than this will hold
the actual array used as a window.
nperseg : int
Length of each segment. If window is str or tuple, nperseg is set to
256. If window is array_like, nperseg is set to the length of the
6
window.
"""
#parse window; if array like, then set nperseg = win.shape
if isinstance(window, string_types) or isinstance(window, tuple):
# if nperseg not specified
if nperseg is None:
nperseg = 256 # then change to default
if nperseg > input_length:
warnings.warn('nperseg = {0:d} is greater than input length '
' = {1:d}, using nperseg = {1:d}'
.format(nperseg, input_length))
nperseg = input_length
win = get_window(window, nperseg)
else:
win = np.asarray(window)
if len(win.shape) != 1:
raise ValueError('window must be 1-D')
if input_length < win.shape[-1]:
raise ValueError('window is longer than input signal')
if nperseg is None:
nperseg = win.shape[0]
elif nperseg is not None:
if nperseg != win.shape[0]:
raise ValueError("value specified for nperseg is different from"
" length of window")
return win, nperseg
| bsd-3-clause |
rolandgeider/pk15-orakel | oracle/views/misc.py | 1 | 5938 | # -*- coding: utf-8 -*-
# This file is part of pk15 Orakel
#
# pk15 Orakel is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pk15 Orakel is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
import random
from django.conf import settings
from django import forms
from django.contrib.auth.decorators import login_required
from django.core import mail
from django.core.urlresolvers import reverse
from django.forms import Form
from django.forms import Textarea
from django.http import HttpResponseForbidden
from django.http import HttpResponseRedirect
from django.shortcuts import render, get_object_or_404
from django.template.loader import render_to_string
from oracle.helpers import next_coordinates
from oracle.models import Coordinate
from oracle.models import QuestionConfig
from oracle.models import AnswerConfig
from oracle.models import Answer
from oracle.models import TeamAnswerLog
@login_required
def dashboard(request):
'''
Show the index page
'''
next_coordinate = None if request.user.has_perm('oracle.add_question') \
else next_coordinates(request.user.userprofile.team)
context = {'coord': next_coordinate}
return render(request, 'index.html', context)
@login_required
def check_step(request, uuid):
'''
:param request:
:param uuid:
:return:
'''
context = {}
coordinate = get_object_or_404(Coordinate, uuid=uuid)
context['coordinate'] = coordinate
team = request.user.userprofile.team
# Check if the user was sent to jail
#
# In this case, the next coordinate is the first correct answer of
# the previous question and there is no need to answer anything.
if coordinate.is_jail:
answer_log = TeamAnswerLog.objects.filter(team=team).last()
answer_config_set = answer_log.question_config.answerconfig_set
next_coordinate = answer_config_set.filter(answer__is_wrong=False).first().next_coordinate
return HttpResponseRedirect(reverse('oracle:coordinate-show',
kwargs={'lat': next_coordinate.lat,
'lon': next_coordinate.lon,
'question': 100}))
if request.user.has_perm('oracle.add_question'):
return HttpResponseForbidden('Sorry admin, only regular users can access this')
question_config = get_object_or_404(QuestionConfig,
coordinate=coordinate,
team=team)
context['question_config'] = question_config
#
# Check that the user allowed to access this step
#
# Is it the first step (there are no answers pointing to it)?
if AnswerConfig.objects.filter(next_coordinate=coordinate).count():
pass
# Is it the correct step?
pass
# Answer form, defined here because it's easier to set the queryset and
# is used only once anyway
class AnswerLogForm(Form):
answer = forms.ModelChoiceField(queryset=Answer.objects.filter(question=question_config.question),
empty_label=None,
widget=forms.RadioSelect,
label=u"Antwort")
place = forms.CharField(label=u"Wo sind wir?",
required=False)
reference = forms.CharField(label=u"Bezug zur Intevation",
widget=Textarea({'cols': 40, 'rows': 3}),
required=False)
form = AnswerLogForm()
if request.method == 'POST':
form = AnswerLogForm(data=request.POST)
if form.is_valid():
# Check if the team already answered this question and notify the
# santa task force that they have been naughty
if TeamAnswerLog.objects.filter(team=team, question_config=question_config).exists():
subject = 'Team {} hat geschummelt!'.format(team)
context = {'question_config': question_config,
'team': team,
'answer': form.cleaned_data['answer'],
'place': form.cleaned_data['place'],
'reference': form.cleaned_data['reference']}
message = render_to_string('user/email_double_answer.html', context)
mail.send_mail(subject,
message,
settings.EMAIL_FROM,
settings.EMAIL_PENALTY,
fail_silently=False)
return HttpResponseRedirect(reverse('oracle:dashboard'))
answer_log = TeamAnswerLog()
answer_log.team = team
answer_log.question_config = question_config
answer_log.team_answer = form.cleaned_data['answer']
answer_log.place = form.cleaned_data['place']
answer_log.reference = form.cleaned_data['reference']
answer_log.save()
# If this is the last question, don't redirect to dashboard
if not next_coordinates(request.user.userprofile.team):
return HttpResponseRedirect(reverse('oracle:finish'))
else:
return HttpResponseRedirect(reverse('oracle:dashboard'))
else:
pass
context['form'] = form
return render(request, 'check_step.html', context)
| agpl-3.0 |
heli522/scikit-learn | sklearn/covariance/outlier_detection.py | 208 | 6917 | """
Class for outlier detection.
This class provides a framework for outlier detection. It consists in
several methods that can be added to a covariance estimator in order to
assess the outlying-ness of the observations of a data set.
Such a "outlier detector" object is proposed constructed from a robust
covariance estimator (the Minimum Covariance Determinant).
"""
# Author: Virgile Fritsch <virgile.fritsch@inria.fr>
#
# License: BSD 3 clause
import warnings
import numpy as np
import scipy as sp
from . import MinCovDet
from ..base import ClassifierMixin
from ..utils.validation import check_is_fitted
class OutlierDetectionMixin(object):
"""Set of methods for outliers detection with covariance estimators.
Parameters
----------
contamination : float, 0. < contamination < 0.5
The amount of contamination of the data set, i.e. the proportion
of outliers in the data set.
Notes
-----
Outlier detection from covariance estimation may break or not
perform well in high-dimensional settings. In particular, one will
always take care to work with ``n_samples > n_features ** 2``.
"""
def __init__(self, contamination=0.1):
self.contamination = contamination
def decision_function(self, X, raw_values=False):
"""Compute the decision function of the given observations.
Parameters
----------
X : array-like, shape (n_samples, n_features)
raw_values : bool
Whether or not to consider raw Mahalanobis distances as the
decision function. Must be False (default) for compatibility
with the others outlier detection tools.
Returns
-------
decision : array-like, shape (n_samples, )
The values of the decision function for each observations.
It is equal to the Mahalanobis distances if `raw_values`
is True. By default (``raw_values=True``), it is equal
to the cubic root of the shifted Mahalanobis distances.
In that case, the threshold for being an outlier is 0, which
ensures a compatibility with other outlier detection tools
such as the One-Class SVM.
"""
check_is_fitted(self, 'threshold_')
mahal_dist = self.mahalanobis(X)
if raw_values:
decision = mahal_dist
else:
check_is_fitted(self, 'threshold_')
transformed_mahal_dist = mahal_dist ** 0.33
decision = self.threshold_ ** 0.33 - transformed_mahal_dist
return decision
def predict(self, X):
"""Outlyingness of observations in X according to the fitted model.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Returns
-------
is_outliers : array, shape = (n_samples, ), dtype = bool
For each observations, tells whether or not it should be considered
as an outlier according to the fitted model.
threshold : float,
The values of the less outlying point's decision function.
"""
check_is_fitted(self, 'threshold_')
is_inlier = -np.ones(X.shape[0], dtype=int)
if self.contamination is not None:
values = self.decision_function(X, raw_values=True)
is_inlier[values <= self.threshold_] = 1
else:
raise NotImplementedError("You must provide a contamination rate.")
return is_inlier
@property
def threshold(self):
warnings.warn(("The threshold attribute is renamed to threshold_ from "
"0.16 onwards and will be removed in 0.18"),
DeprecationWarning, stacklevel=1)
return getattr(self, 'threshold_', None)
class EllipticEnvelope(ClassifierMixin, OutlierDetectionMixin, MinCovDet):
"""An object for detecting outliers in a Gaussian distributed dataset.
Read more in the :ref:`User Guide <outlier_detection>`.
Attributes
----------
`contamination` : float, 0. < contamination < 0.5
The amount of contamination of the data set, i.e. the proportion of \
outliers in the data set.
location_ : array-like, shape (n_features,)
Estimated robust location
covariance_ : array-like, shape (n_features, n_features)
Estimated robust covariance matrix
precision_ : array-like, shape (n_features, n_features)
Estimated pseudo inverse matrix.
(stored only if store_precision is True)
support_ : array-like, shape (n_samples,)
A mask of the observations that have been used to compute the
robust estimates of location and shape.
Parameters
----------
store_precision : bool
Specify if the estimated precision is stored.
assume_centered : Boolean
If True, the support of robust location and covariance estimates
is computed, and a covariance estimate is recomputed from it,
without centering the data.
Useful to work with data whose mean is significantly equal to
zero but is not exactly zero.
If False, the robust location and covariance are directly computed
with the FastMCD algorithm without additional treatment.
support_fraction : float, 0 < support_fraction < 1
The proportion of points to be included in the support of the raw
MCD estimate. Default is ``None``, which implies that the minimum
value of support_fraction will be used within the algorithm:
`[n_sample + n_features + 1] / 2`.
contamination : float, 0. < contamination < 0.5
The amount of contamination of the data set, i.e. the proportion
of outliers in the data set.
See Also
--------
EmpiricalCovariance, MinCovDet
Notes
-----
Outlier detection from covariance estimation may break or not
perform well in high-dimensional settings. In particular, one will
always take care to work with ``n_samples > n_features ** 2``.
References
----------
.. [1] Rousseeuw, P.J., Van Driessen, K. "A fast algorithm for the minimum
covariance determinant estimator" Technometrics 41(3), 212 (1999)
"""
def __init__(self, store_precision=True, assume_centered=False,
support_fraction=None, contamination=0.1,
random_state=None):
MinCovDet.__init__(self, store_precision=store_precision,
assume_centered=assume_centered,
support_fraction=support_fraction,
random_state=random_state)
OutlierDetectionMixin.__init__(self, contamination=contamination)
def fit(self, X, y=None):
MinCovDet.fit(self, X)
self.threshold_ = sp.stats.scoreatpercentile(
self.dist_, 100. * (1. - self.contamination))
return self
| bsd-3-clause |
WillisXChen/django-oscar | docs/source/conf.py | 37 | 9704 | # -*- coding: utf-8 -*-
#
# django-oscar documentation build configuration file, created by
# sphinx-quickstart on Mon Feb 7 13:16:33 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Add the project root and sandbox root to the path
import sys
import os
oscar_folder = os.path.realpath(
os.path.join(os.path.dirname(__file__), '../..'))
sandbox_folder = os.path.realpath(
os.path.join(os.path.dirname(__file__), '../../sites/sandbox'))
sys.path.append(oscar_folder)
sys.path.append(sandbox_folder)
# Specify settings module (which will be picked up from the sandbox)
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'settings_sphinx')
import django
django.setup()
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
'sphinxcontrib.napoleon',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'django-oscar'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
from oscar import get_version, get_short_version
version = get_version()
release = get_short_version()
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_draft']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
# Use RTD theme locally
html_theme = 'default'
if not on_rtd:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'django-oscardoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'django-oscar.tex', u'django-oscar Documentation',
u'David Winterbottom', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'django-oscar', u'django-oscar Documentation',
[u'David Winterbottom'], 1)
]
# Autodoc settings
autoclass_content = 'class'
# Better documenting of Django models
# See http://djangosnippets.org/snippets/2533/
import inspect
from django.utils.html import strip_tags
from django.utils.encoding import force_unicode
def process_docstring(app, what, name, obj, options, lines):
# This causes import errors if left outside the function
from django.db import models
# Only look at objects that inherit from Django's base model class
if inspect.isclass(obj) and issubclass(obj, models.Model):
# Ignore abstract models
if not hasattr(obj._meta, '_fields'):
return lines
# Grab the field list from the meta class
fields = obj._meta._fields()
for field in fields:
# Decode and strip any html out of the field's help text
help_text = strip_tags(force_unicode(field.help_text))
# Decode and capitalize the verbose name, for use if there isn't
# any help text
verbose_name = force_unicode(field.verbose_name).capitalize()
if help_text:
# Add the model field to the end of the docstring as a param
# using the help text as the description
lines.append(u':param %s: %s' % (field.attname, help_text))
else:
# Add the model field to the end of the docstring as a param
# using the verbose name as the description
lines.append(u':param %s: %s' % (field.attname, verbose_name))
# Add the field's type to the docstring
lines.append(u':type %s: %s' % (field.attname, type(field).__name__))
# Return the extended docstring
return lines
def setup(app):
# Register the docstring processor with sphinx
app.connect('autodoc-process-docstring', process_docstring)
| bsd-3-clause |
doduytrung/odoo-8.0 | addons/event/report/report_event_registration.py | 310 | 4079 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields
from openerp import tools
class report_event_registration(models.Model):
"""Events Analysis"""
_name = "report.event.registration"
_order = 'event_date desc'
_auto = False
event_date = fields.Datetime('Event Date', readonly=True)
event_id = fields.Many2one('event.event', 'Event', required=True)
draft_state = fields.Integer(' # No of Draft Registrations')
confirm_state = fields.Integer(' # No of Confirmed Registrations')
seats_max = fields.Integer('Max Seats')
nbevent = fields.Integer('Number of Events')
nbregistration = fields.Integer('Number of Registrations')
event_type = fields.Many2one('event.type', 'Event Type')
registration_state = fields.Selection([('draft', 'Draft'), ('confirm', 'Confirmed'), ('done', 'Attended'), ('cancel', 'Cancelled')], 'Registration State', readonly=True, required=True)
event_state = fields.Selection([('draft', 'Draft'), ('confirm', 'Confirmed'), ('done', 'Done'), ('cancel', 'Cancelled')], 'Event State', readonly=True, required=True)
user_id = fields.Many2one('res.users', 'Event Responsible', readonly=True)
user_id_registration = fields.Many2one('res.users', 'Register', readonly=True)
name_registration = fields.Char('Participant / Contact Name', readonly=True)
company_id = fields.Many2one('res.company', 'Company', readonly=True)
def init(self, cr):
"""Initialize the sql view for the event registration """
tools.drop_view_if_exists(cr, 'report_event_registration')
# TOFIX this request won't select events that have no registration
cr.execute(""" CREATE VIEW report_event_registration AS (
SELECT
e.id::varchar || '/' || coalesce(r.id::varchar,'') AS id,
e.id AS event_id,
e.user_id AS user_id,
r.user_id AS user_id_registration,
r.name AS name_registration,
e.company_id AS company_id,
e.date_begin AS event_date,
count(r.id) AS nbevent,
sum(r.nb_register) AS nbregistration,
CASE WHEN r.state IN ('draft') THEN r.nb_register ELSE 0 END AS draft_state,
CASE WHEN r.state IN ('open','done') THEN r.nb_register ELSE 0 END AS confirm_state,
e.type AS event_type,
e.seats_max AS seats_max,
e.state AS event_state,
r.state AS registration_state
FROM
event_event e
LEFT JOIN event_registration r ON (e.id=r.event_id)
GROUP BY
event_id,
user_id_registration,
r.id,
registration_state,
r.nb_register,
event_type,
e.id,
e.date_begin,
e.user_id,
event_state,
e.company_id,
e.seats_max,
name_registration
)
""")
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
fllodrab/Practica1 | venv/lib/python2.7/site-packages/gunicorn/http/_sendfile.py | 53 | 2272 | # -*- coding: utf-8 -
#
# This file is part of gunicorn released under the MIT license.
# See the NOTICE for more information.
import errno
import os
import sys
try:
import ctypes
import ctypes.util
except MemoryError:
# selinux execmem denial
# https://bugzilla.redhat.com/show_bug.cgi?id=488396
raise ImportError
SUPPORTED_PLATFORMS = (
'darwin',
'freebsd',
'dragonfly',
'linux2')
if sys.version_info < (2, 6) or \
sys.platform not in SUPPORTED_PLATFORMS:
raise ImportError("sendfile isn't supported on this platform")
_libc = ctypes.CDLL(ctypes.util.find_library("c"), use_errno=True)
_sendfile = _libc.sendfile
def sendfile(fdout, fdin, offset, nbytes):
if sys.platform == 'darwin':
_sendfile.argtypes = [ctypes.c_int, ctypes.c_int, ctypes.c_uint64,
ctypes.POINTER(ctypes.c_uint64), ctypes.c_voidp,
ctypes.c_int]
_nbytes = ctypes.c_uint64(nbytes)
result = _sendfile(fdin, fdout, offset, _nbytes, None, 0)
if result == -1:
e = ctypes.get_errno()
if e == errno.EAGAIN and _nbytes.value is not None:
return _nbytes.value
raise OSError(e, os.strerror(e))
return _nbytes.value
elif sys.platform in ('freebsd', 'dragonfly',):
_sendfile.argtypes = [ctypes.c_int, ctypes.c_int, ctypes.c_uint64,
ctypes.c_uint64, ctypes.c_voidp,
ctypes.POINTER(ctypes.c_uint64), ctypes.c_int]
_sbytes = ctypes.c_uint64()
result = _sendfile(fdin, fdout, offset, nbytes, None, _sbytes, 0)
if result == -1:
e = ctypes.get_errno()
if e == errno.EAGAIN and _sbytes.value is not None:
return _sbytes.value
raise OSError(e, os.strerror(e))
return _sbytes.value
else:
_sendfile.argtypes = [ctypes.c_int, ctypes.c_int,
ctypes.POINTER(ctypes.c_uint64), ctypes.c_size_t]
_offset = ctypes.c_uint64(offset)
sent = _sendfile(fdout, fdin, _offset, nbytes)
if sent == -1:
e = ctypes.get_errno()
raise OSError(e, os.strerror(e))
return sent
| gpl-3.0 |
fxfitz/ansible | test/units/modules/network/edgeos/edgeos_module.py | 55 | 2477 | # (c) 2018 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
from units.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except:
pass
fixture_data[path] = data
return data
class TestEdgeosModule(ModuleTestCase):
def execute_module(self, failed=False, changed=False, commands=None, sort=True, defaults=False):
self.load_fixtures(commands)
if failed:
result = self.failed()
self.assertTrue(result['failed'], result)
else:
result = self.changed(changed)
self.assertEqual(result['changed'], changed, result)
if commands is not None:
if sort:
self.assertEqual(sorted(commands), sorted(result['commands']), result['commands'])
else:
self.assertEqual(commands, result['commands'], result['commands'])
return result
def failed(self):
with self.assertRaises(AnsibleFailJson) as exc:
self.module.main()
result = exc.exception.args[0]
self.assertTrue(result['failed'], result)
return result
def changed(self, changed=False):
with self.assertRaises(AnsibleExitJson) as exc:
self.module.main()
result = exc.exception.args[0]
self.assertEqual(result['changed'], changed, result)
return result
def load_fixtures(self, commands=None):
pass
| gpl-3.0 |
bigswitch/nova | nova/api/openstack/compute/schemas/keypairs.py | 24 | 2524 | # Copyright 2013 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from nova.api.validation import parameter_types
create = {
'type': 'object',
'properties': {
'keypair': {
'type': 'object',
'properties': {
'name': parameter_types.name,
'public_key': {'type': 'string'},
},
'required': ['name'],
'additionalProperties': False,
},
},
'required': ['keypair'],
'additionalProperties': False,
}
create_v20 = copy.deepcopy(create)
create_v20['properties']['keypair']['properties']['name'] = (parameter_types.
name_with_leading_trailing_spaces)
create_v22 = {
'type': 'object',
'properties': {
'keypair': {
'type': 'object',
'properties': {
'name': parameter_types.name,
'type': {
'type': 'string',
'enum': ['ssh', 'x509']
},
'public_key': {'type': 'string'},
},
'required': ['name'],
'additionalProperties': False,
},
},
'required': ['keypair'],
'additionalProperties': False,
}
create_v210 = {
'type': 'object',
'properties': {
'keypair': {
'type': 'object',
'properties': {
'name': parameter_types.name,
'type': {
'type': 'string',
'enum': ['ssh', 'x509']
},
'public_key': {'type': 'string'},
'user_id': {'type': 'string'},
},
'required': ['name'],
'additionalProperties': False,
},
},
'required': ['keypair'],
'additionalProperties': False,
}
server_create = {
'key_name': parameter_types.name,
}
server_create_v20 = {
'key_name': parameter_types.name_with_leading_trailing_spaces,
}
| apache-2.0 |
dkodnik/Ant | openerp/addons/base/ir/ir_mail_server.py | 15 | 26515 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011-2012 OpenERP S.A (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
from email.MIMEText import MIMEText
from email.MIMEBase import MIMEBase
from email.MIMEMultipart import MIMEMultipart
from email.Charset import Charset
from email.Header import Header
from email.utils import formatdate, make_msgid, COMMASPACE, getaddresses, formataddr
from email import Encoders
import logging
import re
import smtplib
import threading
from openerp import SUPERUSER_ID
from openerp.osv import osv, fields
from openerp.tools.translate import _
from openerp.tools import html2text
import openerp.tools as tools
# ustr was originally from tools.misc.
# it is moved to loglevels until we refactor tools.
from openerp.loglevels import ustr
_logger = logging.getLogger(__name__)
class MailDeliveryException(osv.except_osv):
"""Specific exception subclass for mail delivery errors"""
def __init__(self, name, value):
super(MailDeliveryException, self).__init__(name, value)
class WriteToLogger(object):
"""debugging helper: behave as a fd and pipe to logger at the given level"""
def __init__(self, logger, level=logging.DEBUG):
self.logger = logger
self.level = level
def write(self, s):
self.logger.log(self.level, s)
def try_coerce_ascii(string_utf8):
"""Attempts to decode the given utf8-encoded string
as ASCII after coercing it to UTF-8, then return
the confirmed 7-bit ASCII string.
If the process fails (because the string
contains non-ASCII characters) returns ``None``.
"""
try:
string_utf8.decode('ascii')
except UnicodeDecodeError:
return
return string_utf8
def encode_header(header_text):
"""Returns an appropriate representation of the given header value,
suitable for direct assignment as a header value in an
email.message.Message. RFC2822 assumes that headers contain
only 7-bit characters, so we ensure it is the case, using
RFC2047 encoding when needed.
:param header_text: unicode or utf-8 encoded string with header value
:rtype: string | email.header.Header
:return: if ``header_text`` represents a plain ASCII string,
return the same 7-bit string, otherwise returns an email.header.Header
that will perform the appropriate RFC2047 encoding of
non-ASCII values.
"""
if not header_text: return ""
# convert anything to utf-8, suitable for testing ASCIIness, as 7-bit chars are
# encoded as ASCII in utf-8
header_text_utf8 = tools.ustr(header_text).encode('utf-8')
header_text_ascii = try_coerce_ascii(header_text_utf8)
# if this header contains non-ASCII characters,
# we'll need to wrap it up in a message.header.Header
# that will take care of RFC2047-encoding it as
# 7-bit string.
return header_text_ascii if header_text_ascii\
else Header(header_text_utf8, 'utf-8')
def encode_header_param(param_text):
"""Returns an appropriate RFC2047 encoded representation of the given
header parameter value, suitable for direct assignation as the
param value (e.g. via Message.set_param() or Message.add_header())
RFC2822 assumes that headers contain only 7-bit characters,
so we ensure it is the case, using RFC2047 encoding when needed.
:param param_text: unicode or utf-8 encoded string with header value
:rtype: string
:return: if ``param_text`` represents a plain ASCII string,
return the same 7-bit string, otherwise returns an
ASCII string containing the RFC2047 encoded text.
"""
# For details see the encode_header() method that uses the same logic
if not param_text: return ""
param_text_utf8 = tools.ustr(param_text).encode('utf-8')
param_text_ascii = try_coerce_ascii(param_text_utf8)
return param_text_ascii if param_text_ascii\
else Charset('utf8').header_encode(param_text_utf8)
# TODO master, remove me, no longer used internaly
name_with_email_pattern = re.compile(r'("[^<@>]+")\s*<([^ ,<@]+@[^> ,]+)>')
address_pattern = re.compile(r'([^ ,<@]+@[^> ,]+)')
def extract_rfc2822_addresses(text):
"""Returns a list of valid RFC2822 addresses
that can be found in ``source``, ignoring
malformed ones and non-ASCII ones.
"""
if not text: return []
candidates = address_pattern.findall(tools.ustr(text).encode('utf-8'))
return filter(try_coerce_ascii, candidates)
def encode_rfc2822_address_header(header_text):
"""If ``header_text`` contains non-ASCII characters,
attempts to locate patterns of the form
``"Name" <address@domain>`` and replace the
``"Name"`` portion by the RFC2047-encoded
version, preserving the address part untouched.
"""
def encode_addr(addr):
name, email = addr
if not try_coerce_ascii(name):
name = str(Header(name, 'utf-8'))
return formataddr((name, email))
addresses = getaddresses([tools.ustr(header_text).encode('utf-8')])
return COMMASPACE.join(map(encode_addr, addresses))
class ir_mail_server(osv.osv):
"""Represents an SMTP server, able to send outgoing emails, with SSL and TLS capabilities."""
_name = "ir.mail_server"
NO_VALID_RECIPIENT = ("At least one valid recipient address should be "
"specified for outgoing emails (To/Cc/Bcc)")
_columns = {
'name': fields.char('Description', size=64, required=True, select=True),
'smtp_host': fields.char('SMTP Server', size=128, required=True, help="Hostname or IP of SMTP server"),
'smtp_port': fields.integer('SMTP Port', size=5, required=True, help="SMTP Port. Usually 465 for SSL, and 25 or 587 for other cases."),
'smtp_user': fields.char('Username', size=64, help="Optional username for SMTP authentication"),
'smtp_pass': fields.char('Password', size=64, help="Optional password for SMTP authentication"),
'smtp_encryption': fields.selection([('none','None'),
('starttls','TLS (STARTTLS)'),
('ssl','SSL/TLS')],
string='Connection Security', required=True,
help="Choose the connection encryption scheme:\n"
"- None: SMTP sessions are done in cleartext.\n"
"- TLS (STARTTLS): TLS encryption is requested at start of SMTP session (Recommended)\n"
"- SSL/TLS: SMTP sessions are encrypted with SSL/TLS through a dedicated port (default: 465)"),
'smtp_debug': fields.boolean('Debugging', help="If enabled, the full output of SMTP sessions will "
"be written to the server log at DEBUG level"
"(this is very verbose and may include confidential info!)"),
'sequence': fields.integer('Priority', help="When no specific mail server is requested for a mail, the highest priority one "
"is used. Default priority is 10 (smaller number = higher priority)"),
'active': fields.boolean('Active')
}
_defaults = {
'smtp_port': 25,
'active': True,
'sequence': 10,
'smtp_encryption': 'none',
}
def __init__(self, *args, **kwargs):
# Make sure we pipe the smtplib outputs to our own DEBUG logger
if not isinstance(smtplib.stderr, WriteToLogger):
logpiper = WriteToLogger(_logger)
smtplib.stderr = logpiper
smtplib.stdout = logpiper
super(ir_mail_server, self).__init__(*args,**kwargs)
def name_get(self, cr, uid, ids, context=None):
return [(a["id"], "(%s)" % (a['name'])) for a in self.read(cr, uid, ids, ['name'], context=context)]
def test_smtp_connection(self, cr, uid, ids, context=None):
for smtp_server in self.browse(cr, uid, ids, context=context):
smtp = False
try:
smtp = self.connect(smtp_server.smtp_host, smtp_server.smtp_port, user=smtp_server.smtp_user,
password=smtp_server.smtp_pass, encryption=smtp_server.smtp_encryption,
smtp_debug=smtp_server.smtp_debug)
except Exception, e:
raise osv.except_osv(_("Connection Test Failed!"), _("Here is what we got instead:\n %s") % tools.ustr(e))
finally:
try:
if smtp: smtp.quit()
except Exception:
# ignored, just a consequence of the previous exception
pass
raise osv.except_osv(_("Connection Test Succeeded!"), _("Everything seems properly set up!"))
def connect(self, host, port, user=None, password=None, encryption=False, smtp_debug=False):
"""Returns a new SMTP connection to the give SMTP server, authenticated
with ``user`` and ``password`` if provided, and encrypted as requested
by the ``encryption`` parameter.
:param host: host or IP of SMTP server to connect to
:param int port: SMTP port to connect to
:param user: optional username to authenticate with
:param password: optional password to authenticate with
:param string encryption: optional, ``'ssl'`` | ``'starttls'``
:param bool smtp_debug: toggle debugging of SMTP sessions (all i/o
will be output in logs)
"""
if encryption == 'ssl':
if not 'SMTP_SSL' in smtplib.__all__:
raise osv.except_osv(
_("SMTP-over-SSL mode unavailable"),
_("Your OpenERP Server does not support SMTP-over-SSL. You could use STARTTLS instead."
"If SSL is needed, an upgrade to Python 2.6 on the server-side should do the trick."))
connection = smtplib.SMTP_SSL(host, port)
else:
connection = smtplib.SMTP(host, port)
connection.set_debuglevel(smtp_debug)
if encryption == 'starttls':
# starttls() will perform ehlo() if needed first
# and will discard the previous list of services
# after successfully performing STARTTLS command,
# (as per RFC 3207) so for example any AUTH
# capability that appears only on encrypted channels
# will be correctly detected for next step
connection.starttls()
if user:
# Attempt authentication - will raise if AUTH service not supported
# The user/password must be converted to bytestrings in order to be usable for
# certain hashing schemes, like HMAC.
# See also bug #597143 and python issue #5285
user = tools.ustr(user).encode('utf-8')
password = tools.ustr(password).encode('utf-8')
connection.login(user, password)
return connection
def build_email(self, email_from, email_to, subject, body, email_cc=None, email_bcc=None, reply_to=False,
attachments=None, message_id=None, references=None, object_id=False, subtype='plain', headers=None,
body_alternative=None, subtype_alternative='plain'):
"""Constructs an RFC2822 email.message.Message object based on the keyword arguments passed, and returns it.
:param string email_from: sender email address
:param list email_to: list of recipient addresses (to be joined with commas)
:param string subject: email subject (no pre-encoding/quoting necessary)
:param string body: email body, of the type ``subtype`` (by default, plaintext).
If html subtype is used, the message will be automatically converted
to plaintext and wrapped in multipart/alternative, unless an explicit
``body_alternative`` version is passed.
:param string body_alternative: optional alternative body, of the type specified in ``subtype_alternative``
:param string reply_to: optional value of Reply-To header
:param string object_id: optional tracking identifier, to be included in the message-id for
recognizing replies. Suggested format for object-id is "res_id-model",
e.g. "12345-crm.lead".
:param string subtype: optional mime subtype for the text body (usually 'plain' or 'html'),
must match the format of the ``body`` parameter. Default is 'plain',
making the content part of the mail "text/plain".
:param string subtype_alternative: optional mime subtype of ``body_alternative`` (usually 'plain'
or 'html'). Default is 'plain'.
:param list attachments: list of (filename, filecontents) pairs, where filecontents is a string
containing the bytes of the attachment
:param list email_cc: optional list of string values for CC header (to be joined with commas)
:param list email_bcc: optional list of string values for BCC header (to be joined with commas)
:param dict headers: optional map of headers to set on the outgoing mail (may override the
other headers, including Subject, Reply-To, Message-Id, etc.)
:rtype: email.message.Message (usually MIMEMultipart)
:return: the new RFC2822 email message
"""
email_from = email_from or tools.config.get('email_from')
assert email_from, "You must either provide a sender address explicitly or configure "\
"a global sender address in the server configuration or with the "\
"--email-from startup parameter."
# Note: we must force all strings to to 8-bit utf-8 when crafting message,
# or use encode_header() for headers, which does it automatically.
headers = headers or {} # need valid dict later
if not email_cc: email_cc = []
if not email_bcc: email_bcc = []
if not body: body = u''
email_body_utf8 = ustr(body).encode('utf-8')
email_text_part = MIMEText(email_body_utf8, _subtype=subtype, _charset='utf-8')
msg = MIMEMultipart()
if not message_id:
if object_id:
message_id = tools.generate_tracking_message_id(object_id)
else:
message_id = make_msgid()
msg['Message-Id'] = encode_header(message_id)
if references:
msg['references'] = encode_header(references)
msg['Subject'] = encode_header(subject)
msg['From'] = encode_rfc2822_address_header(email_from)
del msg['Reply-To']
if reply_to:
msg['Reply-To'] = encode_rfc2822_address_header(reply_to)
else:
msg['Reply-To'] = msg['From']
msg['To'] = encode_rfc2822_address_header(COMMASPACE.join(email_to))
if email_cc:
msg['Cc'] = encode_rfc2822_address_header(COMMASPACE.join(email_cc))
if email_bcc:
msg['Bcc'] = encode_rfc2822_address_header(COMMASPACE.join(email_bcc))
msg['Date'] = formatdate()
# Custom headers may override normal headers or provide additional ones
for key, value in headers.iteritems():
msg[ustr(key).encode('utf-8')] = encode_header(value)
if subtype == 'html' and not body_alternative and html2text:
# Always provide alternative text body ourselves if possible.
text_utf8 = tools.html2text(email_body_utf8.decode('utf-8')).encode('utf-8')
alternative_part = MIMEMultipart(_subtype="alternative")
alternative_part.attach(MIMEText(text_utf8, _charset='utf-8', _subtype='plain'))
alternative_part.attach(email_text_part)
msg.attach(alternative_part)
elif body_alternative:
# Include both alternatives, as specified, within a multipart/alternative part
alternative_part = MIMEMultipart(_subtype="alternative")
body_alternative_utf8 = ustr(body_alternative).encode('utf-8')
alternative_body_part = MIMEText(body_alternative_utf8, _subtype=subtype_alternative, _charset='utf-8')
alternative_part.attach(alternative_body_part)
alternative_part.attach(email_text_part)
msg.attach(alternative_part)
else:
msg.attach(email_text_part)
if attachments:
for (fname, fcontent) in attachments:
filename_rfc2047 = encode_header_param(fname)
part = MIMEBase('application', "octet-stream")
# The default RFC2231 encoding of Message.add_header() works in Thunderbird but not GMail
# so we fix it by using RFC2047 encoding for the filename instead.
part.set_param('name', filename_rfc2047)
part.add_header('Content-Disposition', 'attachment', filename=filename_rfc2047)
part.set_payload(fcontent)
Encoders.encode_base64(part)
msg.attach(part)
return msg
def _get_default_bounce_address(self, cr, uid, context=None):
'''Compute the default bounce address.
The default bounce address is used to set the envelop address if no
envelop address is provided in the message. It is formed by properly
joining the parameters "mail.catchall.alias" and
"mail.catchall.domain".
If "mail.catchall.alias" is not set it defaults to "postmaster-odoo".
If "mail.catchall.domain" is not set, return None.
'''
get_param = self.pool['ir.config_parameter'].get_param
postmaster = get_param(cr, SUPERUSER_ID, 'mail.catchall.alias',
default='postmaster-odoo',
context=context,)
domain = get_param(cr, SUPERUSER_ID, 'mail.catchall.domain', context=context)
if postmaster and domain:
return '%s@%s' % (postmaster, domain)
def send_email(self, cr, uid, message, mail_server_id=None, smtp_server=None, smtp_port=None,
smtp_user=None, smtp_password=None, smtp_encryption=None, smtp_debug=False,
context=None):
"""Sends an email directly (no queuing).
No retries are done, the caller should handle MailDeliveryException in order to ensure that
the mail is never lost.
If the mail_server_id is provided, sends using this mail server, ignoring other smtp_* arguments.
If mail_server_id is None and smtp_server is None, use the default mail server (highest priority).
If mail_server_id is None and smtp_server is not None, use the provided smtp_* arguments.
If both mail_server_id and smtp_server are None, look for an 'smtp_server' value in server config,
and fails if not found.
:param message: the email.message.Message to send. The envelope sender will be extracted from the
``Return-Path`` (if present), or will be set to the default bounce address.
The envelope recipients will be extracted from the combined list of ``To``,
``CC`` and ``BCC`` headers.
:param mail_server_id: optional id of ir.mail_server to use for sending. overrides other smtp_* arguments.
:param smtp_server: optional hostname of SMTP server to use
:param smtp_encryption: optional TLS mode, one of 'none', 'starttls' or 'ssl' (see ir.mail_server fields for explanation)
:param smtp_port: optional SMTP port, if mail_server_id is not passed
:param smtp_user: optional SMTP user, if mail_server_id is not passed
:param smtp_password: optional SMTP password to use, if mail_server_id is not passed
:param smtp_debug: optional SMTP debug flag, if mail_server_id is not passed
:return: the Message-ID of the message that was just sent, if successfully sent, otherwise raises
MailDeliveryException and logs root cause.
"""
# Use the default bounce address **only if** no Return-Path was
# provided by caller. Caller may be using Variable Envelope Return
# Path (VERP) to detect no-longer valid email addresses.
smtp_from = message['Return-Path']
if not smtp_from:
smtp_from = self._get_default_bounce_address(cr, uid, context=context)
if not smtp_from:
smtp_from = message['From']
assert smtp_from, "The Return-Path or From header is required for any outbound email"
# The email's "Envelope From" (Return-Path), and all recipient addresses must only contain ASCII characters.
from_rfc2822 = extract_rfc2822_addresses(smtp_from)
assert from_rfc2822, ("Malformed 'Return-Path' or 'From' address: %r - "
"It should contain one valid plain ASCII email") % smtp_from
# use last extracted email, to support rarities like 'Support@MyComp <support@mycompany.com>'
smtp_from = from_rfc2822[-1]
email_to = message['To']
email_cc = message['Cc']
email_bcc = message['Bcc']
smtp_to_list = filter(None, tools.flatten(map(extract_rfc2822_addresses,[email_to, email_cc, email_bcc])))
assert smtp_to_list, self.NO_VALID_RECIPIENT
# Do not actually send emails in testing mode!
if getattr(threading.currentThread(), 'testing', False):
_logger.log(logging.TEST, "skip sending email in test mode")
return message['Message-Id']
# Get SMTP Server Details from Mail Server
mail_server = None
if mail_server_id:
mail_server = self.browse(cr, SUPERUSER_ID, mail_server_id)
elif not smtp_server:
mail_server_ids = self.search(cr, SUPERUSER_ID, [], order='sequence', limit=1)
if mail_server_ids:
mail_server = self.browse(cr, SUPERUSER_ID, mail_server_ids[0])
if mail_server:
smtp_server = mail_server.smtp_host
smtp_user = mail_server.smtp_user
smtp_password = mail_server.smtp_pass
smtp_port = mail_server.smtp_port
smtp_encryption = mail_server.smtp_encryption
smtp_debug = smtp_debug or mail_server.smtp_debug
else:
# we were passed an explicit smtp_server or nothing at all
smtp_server = smtp_server or tools.config.get('smtp_server')
smtp_port = tools.config.get('smtp_port', 25) if smtp_port is None else smtp_port
smtp_user = smtp_user or tools.config.get('smtp_user')
smtp_password = smtp_password or tools.config.get('smtp_password')
if smtp_encryption is None and tools.config.get('smtp_ssl'):
smtp_encryption = 'starttls' # STARTTLS is the new meaning of the smtp_ssl flag as of v7.0
if not smtp_server:
raise osv.except_osv(
_("Missing SMTP Server"),
_("Please define at least one SMTP server, or provide the SMTP parameters explicitly."))
try:
message_id = message['Message-Id']
# Add email in Maildir if smtp_server contains maildir.
if smtp_server.startswith('maildir:/'):
from mailbox import Maildir
maildir_path = smtp_server[8:]
mdir = Maildir(maildir_path, factory=None, create = True)
mdir.add(message.as_string(True))
return message_id
try:
smtp = self.connect(smtp_server, smtp_port, smtp_user, smtp_password, smtp_encryption or False, smtp_debug)
smtp.sendmail(smtp_from, smtp_to_list, message.as_string())
finally:
try:
# Close Connection of SMTP Server
smtp.quit()
except Exception:
# ignored, just a consequence of the previous exception
pass
except Exception, e:
msg = _("Mail delivery failed via SMTP server '%s'.\n%s: %s") % (tools.ustr(smtp_server),
e.__class__.__name__,
tools.ustr(e))
_logger.exception(msg)
raise MailDeliveryException(_("Mail delivery failed"), msg)
return message_id
def on_change_encryption(self, cr, uid, ids, smtp_encryption):
if smtp_encryption == 'ssl':
result = {'value': {'smtp_port': 465}}
if not 'SMTP_SSL' in smtplib.__all__:
result['warning'] = {'title': _('Warning'),
'message': _('Your server does not seem to support SSL, you may want to try STARTTLS instead')}
else:
result = {'value': {'smtp_port': 25}}
return result
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
MechCoder/sympy | sympy/crypto/crypto.py | 47 | 47918 | # -*- coding: utf-8 -*-
"""
Classical ciphers and LFSRs
"""
from __future__ import print_function
from random import randrange
from sympy import nextprime
from sympy.core import Rational, S, Symbol
from sympy.core.numbers import igcdex
from sympy.core.compatibility import range
from sympy.matrices import Matrix
from sympy.ntheory import isprime, totient, primitive_root
from sympy.polys.domains import FF
from sympy.polys.polytools import gcd, Poly, invert
from sympy.utilities.iterables import flatten, uniq
def alphabet_of_cipher(symbols="ABCDEFGHIJKLMNOPQRSTUVWXYZ"):
"""
Returns the list of characters in the string input defining the alphabet.
Notes
=====
First, some basic definitions.
A *substitution cipher* is a method of encryption by which
"units" (not necessarily characters) of plaintext are replaced with
ciphertext according to a regular system. The "units" may be
characters (ie, words of length `1`), words of length `2`, and so forth.
A *transposition cipher* is a method of encryption by which
the positions held by "units" of plaintext are replaced by a
permutation of the plaintext. That is, the order of the units is
changed using a bijective function on the characters' positions
to perform the encryption.
A *monoalphabetic cipher* uses fixed substitution over the entire
message, whereas a *polyalphabetic cipher* uses a number of substitutions
at different times in the message.
Each of these ciphers require an alphabet for the messages to be
constructed from.
Examples
========
>>> from sympy.crypto.crypto import alphabet_of_cipher
>>> alphabet_of_cipher()
['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M',
'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']
>>> L = [str(i) for i in range(10)] + ['a', 'b', 'c']; L
['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c']
>>> A = "".join(L); A
'0123456789abc'
>>> alphabet_of_cipher(A)
['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c']
>>> alphabet_of_cipher()
['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M',
'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']
"""
symbols = "".join(symbols)
return list(symbols)
######## shift cipher examples ############
def cycle_list(k, n):
"""
Returns the cyclic shift of the list range(n) by k.
Examples
========
>>> from sympy.crypto.crypto import cycle_list, alphabet_of_cipher
>>> L = cycle_list(3,26); L
[3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
17, 18, 19, 20, 21, 22, 23, 24, 25, 0, 1, 2]
>>> A = alphabet_of_cipher(); A
['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M',
'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']
>>> [A[i] for i in L]
['D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P',
'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'A', 'B', 'C']
"""
L = list(range(n))
return L[k:] + L[:k]
def encipher_shift(pt, key, symbols="ABCDEFGHIJKLMNOPQRSTUVWXYZ"):
"""
Performs shift cipher encryption on plaintext pt, and returns the ciphertext.
Notes
=====
The shift cipher is also called the Caesar cipher, after
Julius Caesar, who, according to Suetonius, used it with a
shift of three to protect messages of military significance.
Caesar's nephew Augustus reportedtly used a similar cipher, but
with a right shift of 1.
ALGORITHM:
INPUT:
``k``: an integer from 0 to 25 (the secret key)
``m``: string of upper-case letters (the plaintext message)
OUTPUT:
``c``: string of upper-case letters (the ciphertext message)
STEPS:
0. Identify the alphabet A, ..., Z with the integers 0, ..., 25.
1. Compute from the string ``m`` a list ``L1`` of corresponding
integers.
2. Compute from the list ``L1`` a new list ``L2``, given by
adding ``(k mod 26)`` to each element in ``L1``.
3. Compute from the list ``L2`` a string ``c`` of corresponding
letters.
Examples
========
>>> from sympy.crypto.crypto import encipher_shift
>>> pt = "GONAVYBEATARMY"
>>> encipher_shift(pt, 1)
'HPOBWZCFBUBSNZ'
>>> encipher_shift(pt, 0)
'GONAVYBEATARMY'
>>> encipher_shift(pt, -1)
'FNMZUXADZSZQLX'
"""
symbols = "".join(symbols)
A = alphabet_of_cipher(symbols)
n = len(A)
L = cycle_list(key, n)
C = [A[(A.index(pt[i]) + key) % n] for i in range(len(pt))]
return "".join(C)
######## affine cipher examples ############
def encipher_affine(pt, key, symbols="ABCDEFGHIJKLMNOPQRSTUVWXYZ"):
r"""
Performs the affine cipher encryption on plaintext ``pt``, and returns the ciphertext.
Encryption is based on the map `x \rightarrow ax+b` (mod `26`). Decryption is based on
the map `x \rightarrow cx+d` (mod `26`), where `c = a^{-1}` (mod `26`) and
`d = -a^{-1}c` (mod `26`). (In particular, for the map to be invertible,
we need `\mathrm{gcd}(a, 26) = 1.`)
Notes
=====
This is a straightforward generalization of the shift cipher.
ALGORITHM:
INPUT:
``a, b``: a pair integers, where ``gcd(a, 26) = 1`` (the secret key)
``m``: string of upper-case letters (the plaintext message)
OUTPUT:
``c``: string of upper-case letters (the ciphertext message)
STEPS:
0. Identify the alphabet "A", ..., "Z" with the integers 0, ..., 25.
1. Compute from the string ``m`` a list ``L1`` of corresponding
integers.
2. Compute from the list ``L1`` a new list ``L2``, given by replacing
``x`` by ``a*x + b (mod 26)``, for each element ``x`` in ``L1``.
3. Compute from the list ``L2`` a string ``c`` of corresponding
letters.
Examples
========
>>> from sympy.crypto.crypto import encipher_affine
>>> pt = "GONAVYBEATARMY"
>>> encipher_affine(pt, (1, 1))
'HPOBWZCFBUBSNZ'
>>> encipher_affine(pt, (1, 0))
'GONAVYBEATARMY'
>>> pt = "GONAVYBEATARMY"
>>> encipher_affine(pt, (3, 1))
'TROBMVENBGBALV'
>>> ct = "TROBMVENBGBALV"
>>> encipher_affine(ct, (9, 17))
'GONAVYBEATARMY'
"""
symbols = "".join(symbols)
A = alphabet_of_cipher(symbols)
n = len(A)
k1 = key[0] # multiplicative coeff "a"
k2 = key[1] # additive coeff "b"
L = cycle_list(k2, n)
C = [A[(k1*A.index(pt[i]) + k2) % n] for i in range(len(pt))]
return "".join(C)
#################### substitution cipher ###########################
def encipher_substitution(pt, key, symbols="ABCDEFGHIJKLMNOPQRSTUVWXYZ"):
"""
Performs the substitution cipher encryption on plaintext ``pt``, and returns the ciphertext.
Assumes the ``pt`` has only letters taken from ``symbols``.
Assumes ``key`` is a permutation of the symbols. This function permutes the
letters of the plaintext using the permutation given in ``key``.
The decription uses the inverse permutation.
Note that if the permutation in key is order 2 (eg, a transposition) then
the encryption permutation and the decryption permutation are the same.
Examples
========
>>> from sympy.crypto.crypto import alphabet_of_cipher, encipher_substitution
>>> symbols = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
>>> A = alphabet_of_cipher(symbols)
>>> key = "BACDEFGHIJKLMNOPQRSTUVWXYZ"
>>> pt = "go navy! beat army!"
>>> encipher_substitution(pt, key)
'GONBVYAEBTBRMY'
>>> ct = 'GONBVYAEBTBRMY'
>>> encipher_substitution(ct, key)
'GONAVYBEATARMY'
"""
symbols = "".join(symbols)
A = alphabet_of_cipher(symbols)
n = len(A)
pt0 = [x.capitalize() for x in pt if x.isalnum()]
ct = [key[A.index(x)] for x in pt0]
return "".join(ct)
######################################################################
#################### Vigenère cipher examples ########################
######################################################################
def encipher_vigenere(pt, key, symbols="ABCDEFGHIJKLMNOPQRSTUVWXYZ"):
"""
Performs the Vigenère cipher encryption on plaintext ``pt``, and returns the ciphertext.
Notes
=====
The Vigenère cipher is named after Blaise de Vigenère, a sixteenth
century diplomat and cryptographer, by a historical accident.
Vigenère actually invented a different and more complicated cipher.
The so-called *Vigenère cipher* was actually invented
by Giovan Batista Belaso in 1553.
This cipher was used in the 1800's, for example, during the American Civil War.
The Confederacy used a brass cipher disk to implement the Vigenère cipher
(now on display in the NSA Museum in Fort Meade) [1]_.
The Vigenère cipher is a generalization of the shift cipher.
Whereas the shift cipher shifts each letter by the same amount (that amount
being the key of the shift cipher) the Vigenère cipher shifts
a letter by an amount determined by the key (which is a word or
phrase known only to the sender and receiver).
For example, if the key was a single letter, such as "C", then the
so-called Vigenere cipher is actually a shift cipher with a
shift of `2` (since "C" is the 2nd letter of the alphabet, if
you start counting at `0`). If the key was a word with two
letters, such as "CA", then the so-called Vigenère cipher will
shift letters in even positions by `2` and letters in odd positions
are left alone (shifted by `0`, since "A" is the 0th letter, if
you start counting at `0`).
ALGORITHM:
INPUT:
``key``: a string of upper-case letters (the secret key)
``m``: string of upper-case letters (the plaintext message)
OUTPUT:
``c``: string of upper-case letters (the ciphertext message)
STEPS:
0. Identify the alphabet A, ..., Z with the integers 0, ..., 25.
1. Compute from the string ``key`` a list ``L1`` of corresponding
integers. Let ``n1 = len(L1)``.
2. Compute from the string ``m`` a list ``L2`` of corresponding
integers. Let ``n2 = len(L2)``.
3. Break ``L2`` up sequencially into sublists of size ``n1``, and one sublist
at the end of size smaller or equal to ``n1``.
4. For each of these sublists ``L`` of ``L2``, compute a new list ``C`` given by
``C[i] = L[i] + L1[i] (mod 26)`` to the ``i``-th element in the sublist,
for each ``i``.
5. Assemble these lists ``C`` by concatenation into a new list of length ``n2``.
6. Compute from the new list a string ``c`` of corresponding letters.
Once it is known that the key is, say, `n` characters long, frequency analysis
can be applied to every `n`-th letter of the ciphertext to determine the plaintext.
This method is called *Kasiski examination* (although it was first discovered
by Babbage).
The cipher Vigenère actually discovered is an "auto-key" cipher
described as follows.
ALGORITHM:
INPUT:
``key``: a string of upper-case letters (the secret key)
``m``: string of upper-case letters (the plaintext message)
OUTPUT:
``c``: string of upper-case letters (the ciphertext message)
STEPS:
0. Identify the alphabet A, ..., Z with the integers 0, ..., 25.
1. Compute from the string ``m`` a list ``L2`` of corresponding
integers. Let ``n2 = len(L2)``.
2. Let ``n1`` be the length of the key. Concatenate the string
``key`` with the first ``n2 - n1`` characters of the plaintext message.
Compute from this string of length ``n2`` a list ``L1`` of corresponding
integers. Note ``n2 = len(L1)``.
3. Compute a new list ``C`` given by ``C[i] = L1[i] + L2[i] (mod 26)``,
for each ``i``. Note ``n2 = len(C)``.
4. Compute from the new list a string ``c`` of corresponding letters.
References
==========
.. [1] http://en.wikipedia.org/wiki/Vigenere_cipher
Examples
========
>>> from sympy.crypto.crypto import encipher_vigenere
>>> key = "encrypt"
>>> pt = "meet me on monday"
>>> encipher_vigenere(pt, key)
'QRGKKTHRZQEBPR'
"""
symbols = "".join(symbols)
A = alphabet_of_cipher(symbols)
N = len(A) # normally, 26
key0 = uniq(key)
key0 = [x.capitalize() for x in key0 if x.isalnum()]
K = [A.index(x) for x in key0]
k = len(K)
pt0 = [x.capitalize() for x in pt if x.isalnum()]
P = [A.index(x) for x in pt0]
n = len(P)
#m = n//k
C = [(K[i % k] + P[i]) % N for i in range(n)]
return "".join([str(A[x]) for x in C])
def decipher_vigenere(ct, key, symbols="ABCDEFGHIJKLMNOPQRSTUVWXYZ"):
"""
Decode using the Vigenère cipher.
Examples
========
>>> from sympy.crypto.crypto import decipher_vigenere
>>> key = "encrypt"
>>> ct = "QRGK kt HRZQE BPR"
>>> decipher_vigenere(ct, key)
'MEETMEONMONDAY'
"""
symbols = "".join(symbols)
A = alphabet_of_cipher(symbols)
N = len(A) # normally, 26
key0 = uniq(key)
key0 = [x.capitalize() for x in key0 if x.isalnum()]
K = [A.index(x) for x in key0]
k = len(K)
ct0 = [x.capitalize() for x in ct if x.isalnum()]
C = [A.index(x) for x in ct0]
n = len(C)
#m = n//k
P = [(-K[i % k] + C[i]) % N for i in range(n)]
return "".join([str(A[x]) for x in P])
#################### Hill cipher ########################
def encipher_hill(pt, key, symbols="ABCDEFGHIJKLMNOPQRSTUVWXYZ"):
r"""
Performs the Hill cipher encryption on plaintext ``pt``, and returns the ciphertext.
Notes
=====
The Hill cipher [1]_, invented by Lester S. Hill in the 1920's [2]_,
was the first polygraphic cipher in which it was practical (though barely)
to operate on more than three symbols at once. The following discussion assumes
an elementary knowledge of matrices.
First, each letter is first encoded as a number. We assume here that
"A" `\leftrightarrow` 0, "B" `\leftrightarrow` 1, ..., "Z" `\leftrightarrow` 25.
We denote the integers `\{0, 1, ..., 25\}`
by `Z_{26}`. Suppose your message m consists of `n` capital letters, with no spaces.
This may be regarded an `n`-tuple M of elements of `Z_{26}`. A key in the Hill cipher
is a `k x k` matrix `K`, all of whose entries are in `Z_{26}`, such that the matrix
`K` is invertible (ie, that the linear transformation `K: Z_{26}^k \rightarrow Z_{26}^k`
is one-to-one).
ALGORITHM:
INPUT:
``key``: a `k x k` invertible matrix `K`, all of whose entries are in `Z_{26}`
``m``: string of `n` upper-case letters (the plaintext message)
(Note: Sage assumes that `n` is a multiple of `k`.)
OUTPUT:
``c``: string of upper-case letters (the ciphertext message)
STEPS:
0. Identify the alphabet A, ..., Z with the integers 0, ..., 25.
1. Compute from the string ``m`` a list ``L`` of corresponding
integers. Let ``n = len(L)``.
2. Break the list ``L`` up into ``t = ceiling(n/k)`` sublists
``L_1``, ..., ``L_t`` of size ``k`` (where the last list might be
"padded" by 0's to ensure it is size ``k``).
3. Compute new list ``C_1``, ..., ``C_t`` given by ``C[i] = K*L_i``
(arithmetic is done mod 26), for each ``i``.
4. Concatenate these into a list ``C = C_1 + ... + C_t``.
5. Compute from ``C`` a string ``c`` of corresponding letters.
This has length ``k*t``.
References
==========
.. [1] en.wikipedia.org/wiki/Hill_cipher
.. [2] Lester S. Hill, Cryptography in an Algebraic Alphabet, The American
Mathematical Monthly Vol.36, June-July 1929, pp.306-312.
Examples
========
>>> from sympy.crypto.crypto import encipher_hill
>>> from sympy import Matrix
>>> pt = "meet me on monday"
>>> key = Matrix([[1, 2], [3, 5]])
>>> encipher_hill(pt, key)
'UEQDUEODOCTCWQ'
>>> pt = "meet me on tuesday"
>>> encipher_hill(pt, key)
'UEQDUEODHBOYDJYU'
>>> pt = "GONAVYBEATARMY"
>>> key = Matrix([[1, 0, 1], [0, 1, 1], [2, 2, 3]])
>>> encipher_hill(pt, key)
'TBBYTKBEKKRLMYU'
"""
symbols = "".join(symbols)
A = alphabet_of_cipher(symbols)
N = len(A) # normally, 26
k = key.cols
pt0 = [x.capitalize() for x in pt if x.isalnum()]
P = [A.index(x) for x in pt0]
n = len(P)
m = n//k
if n > m*k:
P = P + [0]*(n - m*k)
m = m + 1
C = [list(key*Matrix(k, 1, [P[i] for i in range(k*j, k*(j + 1))])) for j in range(m)]
C = flatten(C)
return "".join([A[i % N] for i in C])
def decipher_hill(ct, key, symbols="ABCDEFGHIJKLMNOPQRSTUVWXYZ"):
"""
Deciphering is the same as enciphering but using the inverse of the key matrix.
Examples
========
>>> from sympy.crypto.crypto import decipher_hill
>>> from sympy import Matrix
>>> ct = "UEQDUEODOCTCWQ"
>>> key = Matrix([[1, 2], [3, 5]])
>>> decipher_hill(ct, key)
'MEETMEONMONDAY'
>>> ct = "UEQDUEODHBOYDJYU"
>>> decipher_hill(ct, key)
'MEETMEONTUESDAYA'
"""
symbols = "".join(symbols)
A = alphabet_of_cipher(symbols)
N = len(A) # normally, 26
k = key.cols
ct0 = [x.capitalize() for x in ct if x.isalnum()]
C = [A.index(x) for x in ct0]
n = len(C)
m = n//k
if n > m*k:
C = C + [0]*(n - m*k)
m = m + 1
key_inv = key.inv_mod(N)
P = [list(key_inv*Matrix(k, 1, [C[i] for i in range(k*j, k*(j + 1))])) for j in range(m)]
P = flatten(P)
return "".join([A[i % N] for i in P])
#################### Bifid cipher ########################
def encipher_bifid5(pt, key):
r"""
Performs the Bifid cipher encryption on plaintext ``pt``, and returns the ciphertext.
This is the version of the Bifid cipher that uses the `5 \times 5` Polybius square.
Notes
=====
The Bifid cipher was invented around 1901 by Felix Delastelle.
It is a *fractional substitution* cipher, where letters are
replaced by pairs of symbols from a smaller alphabet. The
cipher uses a `5 \times 5` square filled with some ordering of the alphabet,
except that "i"s and "j"s are identified (this is a so-called
Polybius square; there is a `6 \times 6` analog if you add back in "j" and also
append onto the usual 26 letter alphabet, the digits 0, 1, ..., 9).
According to Helen Gaines' book *Cryptanalysis*, this type of cipher
was used in the field by the German Army during World War I.
ALGORITHM: (5x5 case)
INPUT:
``pt``: plaintext string (no "j"s)
``key``: short string for key (no repetitions, no "j"s)
OUTPUT:
ciphertext (using Bifid5 cipher in all caps, no spaces, no "J"s)
STEPS:
1. Create the `5 \times 5` Polybius square ``S`` associated to the ``k`` as
follows:
a) starting top left, moving left-to-right, top-to-bottom,
place the letters of the key into a 5x5 matrix,
b) when finished, add the letters of the alphabet
not in the key until the 5x5 square is filled
2. Create a list ``P`` of pairs of numbers which are the coordinates
in the Polybius square of the letters in ``pt``.
3. Let ``L1`` be the list of all first coordinates of ``P`` (length
of ``L1 = n``), let ``L2`` be the list of all second coordinates
of ``P`` (so the length of ``L2`` is also ``n``).
4. Let ``L`` be the concatenation of ``L1`` and ``L2`` (length ``L = 2*n``),
except that consecutive numbers are paired ``(L[2*i], L[2*i + 1])``.
You can regard ``L`` as a list of pairs of length ``n``.
5. Let ``C`` be the list of all letters which are of the form
``S[i, j]``, for all ``(i, j)`` in ``L``. As a string, this
is the ciphertext ``ct``.
Examples
========
>>> from sympy.crypto.crypto import encipher_bifid5
>>> pt = "meet me on monday"
>>> key = "encrypt"
>>> encipher_bifid5(pt, key)
'LNLLQNPPNPGADK'
>>> pt = "meet me on friday"
>>> encipher_bifid5(pt, key)
'LNLLFGPPNPGRSK'
"""
A = alphabet_of_cipher()
# first make sure the letters are capitalized
# and text has no spaces
key = uniq(key)
key0 = [x.capitalize() for x in key if x.isalnum()]
pt0 = [x.capitalize() for x in pt if x.isalnum()]
# create long key
long_key = key0 + [x for x in A if (not(x in key0) and x != "J")]
n = len(pt0)
# the fractionalization
pairs = [[long_key.index(x)//5, long_key.index(x) % 5] for x in pt0]
tmp_cipher = flatten([x[0] for x in pairs] + [x[1] for x in pairs])
ct = "".join([long_key[5*tmp_cipher[2*i] + tmp_cipher[2*i + 1]] for i in range(n)])
return ct
def decipher_bifid5(ct, key):
r"""
Performs the Bifid cipher decryption on ciphertext ``ct``, and returns the plaintext.
This is the version of the Bifid cipher that uses the `5 \times 5` Polybius square.
INPUT:
``ct``: ciphertext string (digits okay)
``key``: short string for key (no repetitions, digits okay)
OUTPUT:
plaintext from Bifid5 cipher (all caps, no spaces, no "J"s)
Examples
========
>>> from sympy.crypto.crypto import encipher_bifid5, decipher_bifid5
>>> key = "encrypt"
>>> pt = "meet me on monday"
>>> encipher_bifid5(pt, key)
'LNLLQNPPNPGADK'
>>> ct = 'LNLLQNPPNPGADK'
>>> decipher_bifid5(ct, key)
'MEETMEONMONDAY'
"""
A = alphabet_of_cipher()
# first make sure the letters are capitalized
# and text has no spaces
key = uniq(key)
key0 = [x.capitalize() for x in key if x.isalnum()]
ct0 = [x.capitalize() for x in ct if x.isalnum()]
# create long key
long_key = key0 + [x for x in A if (not(x in key0) and x != "J")]
n = len(ct0)
# the fractionalization
pairs = flatten([[long_key.index(x)//5, long_key.index(x) % 5] for x in ct0 if x != "J"])
tmp_plain = flatten([[pairs[i], pairs[n + i]] for i in range(n)])
pt = "".join([long_key[5*tmp_plain[2*i] + tmp_plain[2*i + 1]] for i in range(n)])
return pt
def bifid5_square(key):
r"""
5x5 Polybius square.
Produce the Polybius square for the `5 \times 5` Bifid cipher.
Examples
========
>>> from sympy.crypto.crypto import bifid5_square
>>> bifid5_square("gold bug")
Matrix([
[G, O, L, D, B],
[U, A, C, E, F],
[H, I, K, M, N],
[P, Q, R, S, T],
[V, W, X, Y, Z]])
"""
A = alphabet_of_cipher()
# first make sure the letters are capitalized
# and key has no spaces or duplicates
key = uniq(key)
key0 = [x.capitalize() for x in key if x.isalnum()]
# create long key
long_key = key0 + [x for x in A if (not(x in key0) and x != "J")]
f = lambda i, j: Symbol(long_key[5*i + j])
M = Matrix(5, 5, f)
return M
def encipher_bifid6(pt, key):
r"""
Performs the Bifid cipher encryption on plaintext ``pt``, and returns the ciphertext.
This is the version of the Bifid cipher that uses the `6 \times 6` Polybius square.
Assumes alphabet of symbols is "A", ..., "Z", "0", ..., "9".
INPUT:
``pt``: plaintext string (digits okay)
``key``: short string for key (no repetitions, digits okay)
OUTPUT:
ciphertext from Bifid cipher (all caps, no spaces)
Examples
========
>>> from sympy.crypto.crypto import encipher_bifid6
>>> key = "encrypt"
>>> pt = "meet me on monday at 8am"
>>> encipher_bifid6(pt, key)
'HNHOKNTA5MEPEGNQZYG'
>>> encipher_bifid6(pt, key)
'HNHOKNTA5MEPEGNQZYG'
"""
A = alphabet_of_cipher() + [str(a) for a in range(10)]
# first make sure the letters are capitalized
# and text has no spaces
key = uniq(key)
key0 = [x.capitalize() for x in key if x.isalnum()]
pt0 = [x.capitalize() for x in pt if x.isalnum()]
# create long key
long_key = key0 + [x for x in A if not(x in key0)]
n = len(pt0)
# the fractionalization
pairs = [[long_key.index(x)//6, long_key.index(x) % 6] for x in pt0]
tmp_cipher = flatten([x[0] for x in pairs] + [x[1] for x in pairs])
ct = "".join([long_key[6*tmp_cipher[2*i] + tmp_cipher[2*i + 1]] for i in range(n)])
return ct
def decipher_bifid6(ct, key):
r"""
Performs the Bifid cipher decryption on ciphertext ``ct``, and returns the plaintext.
This is the version of the Bifid cipher that uses the `6 \times 6` Polybius square.
Assumes alphabet of symbols is "A", ..., "Z", "0", ..., "9".
INPUT:
``ct``: ciphertext string (digits okay)
``key``: short string for key (no repetitions, digits okay)
OUTPUT:
plaintext from Bifid cipher (all caps, no spaces)
Examples
========
>>> from sympy.crypto.crypto import encipher_bifid6, decipher_bifid6
>>> key = "encrypt"
>>> pt = "meet me on monday at 8am"
>>> encipher_bifid6(pt, key)
'HNHOKNTA5MEPEGNQZYG'
>>> ct = "HNHOKNTA5MEPEGNQZYG"
>>> decipher_bifid6(ct, key)
'MEETMEONMONDAYAT8AM'
"""
A = alphabet_of_cipher() + [str(a) for a in range(10)]
# first make sure the letters are capitalized
# and text has no spaces
key = uniq(key)
key0 = [x.capitalize() for x in key if x.isalnum()]
ct0 = [x.capitalize() for x in ct if x.isalnum()]
# create long key
long_key = key0 + [x for x in A if not(x in key0)]
n = len(ct0)
# the fractionalization
pairs = flatten([[long_key.index(x)//6, long_key.index(x) % 6] for x in ct0])
tmp_plain = flatten([[pairs[i], pairs[n + i]] for i in range(n)])
pt = "".join([long_key[6*tmp_plain[2*i] + tmp_plain[2*i + 1]] for i in range(n)])
return pt
def bifid6_square(key):
r"""
6x6 Polybius square.
Produces the Polybius square for the `6 \times 6` Bifid cipher.
Assumes alphabet of symbols is "A", ..., "Z", "0", ..., "9".
Examples
========
>>> from sympy.crypto.crypto import bifid6_square
>>> key = "encrypt"
>>> bifid6_square(key)
Matrix([
[E, N, C, R, Y, P],
[T, A, B, D, F, G],
[H, I, J, K, L, M],
[O, Q, S, U, V, W],
[X, Z, 0, 1, 2, 3],
[4, 5, 6, 7, 8, 9]])
"""
A = alphabet_of_cipher() + [str(a) for a in range(10)]
# first make sure the letters are capitalized
# and text has no spaces
key = uniq(key)
key0 = [x.capitalize() for x in key if x.isalnum()]
# create long key
long_key = key0 + [x for x in A if not(x in key0)]
f = lambda i, j: Symbol(long_key[6*i + j])
M = Matrix(6, 6, f)
return M
def encipher_bifid7(pt, key):
r"""
Performs the Bifid cipher encryption on plaintext ``pt``, and returns the ciphertext.
This is the version of the Bifid cipher that uses the `7 \times 7` Polybius square.
Assumes alphabet of symbols is "A", ..., "Z", "0", ..., "22".
(Also, assumes you have some way of distinguishing "22"
from "2", "2" juxtaposed together for deciphering...)
INPUT:
``pt``: plaintext string (digits okay)
``key``: short string for key (no repetitions, digits okay)
OUTPUT:
ciphertext from Bifid7 cipher (all caps, no spaces)
Examples
========
>>> from sympy.crypto.crypto import encipher_bifid7
>>> key = "encrypt"
>>> pt = "meet me on monday at 8am"
>>> encipher_bifid7(pt, key)
'JEJJLNAA3ME19YF3J222R'
"""
A = alphabet_of_cipher() + [str(a) for a in range(23)]
# first make sure the letters are capitalized
# and text has no spaces
key = uniq(key)
key0 = [x.capitalize() for x in key if x.isalnum()]
pt0 = [x.capitalize() for x in pt if x.isalnum()]
# create long key
long_key = key0 + [x for x in A if not(x in key0)]
n = len(pt0)
# the fractionalization
pairs = [[long_key.index(x)//7, long_key.index(x) % 7] for x in pt0]
tmp_cipher = flatten([x[0] for x in pairs] + [x[1] for x in pairs])
ct = "".join([long_key[7*tmp_cipher[2*i] + tmp_cipher[2*i + 1]] for i in range(n)])
return ct
def bifid7_square(key):
r"""
7x7 Polybius square.
Produce the Polybius square for the `7 \times 7` Bifid cipher.
Assumes alphabet of symbols is "A", ..., "Z", "0", ..., "22".
(Also, assumes you have some way of distinguishing "22"
from "2", "2" juxtaposed together for deciphering...)
Examples
========
>>> from sympy.crypto.crypto import bifid7_square
>>> bifid7_square("gold bug")
Matrix([
[ G, O, L, D, B, U, A],
[ C, E, F, H, I, J, K],
[ M, N, P, Q, R, S, T],
[ V, W, X, Y, Z, 0, 1],
[ 2, 3, 4, 5, 6, 7, 8],
[ 9, 10, 11, 12, 13, 14, 15],
[16, 17, 18, 19, 20, 21, 22]])
"""
A = alphabet_of_cipher() + [str(a) for a in range(23)]
# first make sure the letters are capitalized
# and text has no spaces
key = uniq(key)
key0 = [x.capitalize() for x in key if x.isalnum()]
# create long key
long_key = key0 + [x for x in A if (not(x in key0))]
f = lambda i, j: Symbol(long_key[7*i + j])
M = Matrix(7, 7, f)
return M
#################### RSA #############################
def rsa_public_key(p, q, e):
r"""
The RSA *public key* is the pair `(n,e)`, where `n`
is a product of two primes and `e` is relatively
prime (coprime) to the Euler totient `\phi(n)`.
Examples
========
>>> from sympy.crypto.crypto import rsa_public_key
>>> p, q, e = 3, 5, 7
>>> n, e = rsa_public_key(p, q, e)
>>> n
15
>>> e
7
"""
n = p*q
phi = totient(n)
if isprime(p) and isprime(q) and gcd(e, phi) == 1:
return n, e
return False
def rsa_private_key(p, q, e):
r"""
The RSA *private key* is the pair `(n,d)`, where `n`
is a product of two primes and `d` is the inverse of
`e` (mod `\phi(n)`).
Examples
========
>>> from sympy.crypto.crypto import rsa_private_key
>>> p, q, e = 3, 5, 7
>>> rsa_private_key(p, q, e)
(15, 7)
"""
n = p*q
phi = totient(n)
if isprime(p) and isprime(q) and gcd(e, phi) == 1:
d = int(invert(e,phi))
return n, d
return False
def encipher_rsa(pt, puk):
"""
In RSA, a message `m` is encrypted by computing
`m^e` (mod `n`), where ``puk`` is the public key `(n,e)`.
Examples
========
>>> from sympy.crypto.crypto import encipher_rsa, rsa_public_key
>>> p, q, e = 3, 5, 7
>>> puk = rsa_public_key(p, q, e)
>>> pt = 12
>>> encipher_rsa(pt, puk)
3
"""
n, e = puk
return pow(pt, e, n)
def decipher_rsa(ct, prk):
"""
In RSA, a ciphertext `c` is decrypted by computing
`c^d` (mod `n`), where ``prk`` is the private key `(n, d)`.
Examples
========
>>> from sympy.crypto.crypto import decipher_rsa, rsa_private_key
>>> p, q, e = 3, 5, 7
>>> prk = rsa_private_key(p, q, e)
>>> ct = 3
>>> decipher_rsa(ct, prk)
12
"""
n, d = prk
return pow(ct, d, n)
#################### kid krypto (kid RSA) #############################
def kid_rsa_public_key(a, b, A, B):
r"""
Kid RSA is a version of RSA useful to teach grade school children
since it does not involve exponentiation.
Alice wants to talk to Bob. Bob generates keys as follows.
Key generation:
* Select positive integers `a, b, A, B` at random.
* Compute `M = a b - 1`, `e = A M + a`, `d = B M + b`, `n = (e d - 1) /M`.
* The *public key* is `(n, e)`. Bob sends these to Alice.
* The *private key* is `d`, which Bob keeps secret.
Encryption: If `m` is the plaintext message then the
ciphertext is `c = m e \pmod n`.
Decryption: If `c` is the ciphertext message then the
plaintext is `m = c d \pmod n`.
Examples
========
>>> from sympy.crypto.crypto import kid_rsa_public_key
>>> a, b, A, B = 3, 4, 5, 6
>>> kid_rsa_public_key(a, b, A, B)
(369, 58)
"""
M = S(a*b - 1)
e = S(A*M + a)
d = S(B*M + b)
n = S((e*d - 1)//M)
return n, e
def kid_rsa_private_key(a, b, A, B):
"""
Compute `M = a b - 1`, `e = A M + a`, `d = B M + b`, `n = (e d - 1) / M`.
The *private key* is `d`, which Bob keeps secret.
Examples
========
>>> from sympy.crypto.crypto import kid_rsa_private_key
>>> a, b, A, B = 3, 4, 5, 6
>>> kid_rsa_private_key(a, b, A, B)
(369, 70)
"""
M = S(a*b - 1)
e = S(A*M + a)
d = S(B*M + b)
n = S((e*d - 1)//M)
return n, d
def encipher_kid_rsa(pt, puk):
"""
Here ``pt`` is the plaintext and ``puk`` is the public key.
Examples
========
>>> from sympy.crypto.crypto import encipher_kid_rsa, kid_rsa_public_key
>>> pt = 200
>>> a, b, A, B = 3, 4, 5, 6
>>> pk = kid_rsa_public_key(a, b, A, B)
>>> encipher_kid_rsa(pt, pk)
161
"""
return (pt*puk[1]) % puk[0]
def decipher_kid_rsa(ct, prk):
"""
Here ``pt`` is the plaintext and ``prk`` is the private key.
Examples
========
>>> from sympy.crypto.crypto import kid_rsa_public_key, kid_rsa_private_key, decipher_kid_rsa, encipher_kid_rsa
>>> a, b, A, B = 3, 4, 5, 6
>>> d = kid_rsa_private_key(a, b, A, B)
>>> pt = 200
>>> pk = kid_rsa_public_key(a, b, A, B)
>>> prk = kid_rsa_private_key(a, b, A, B)
>>> ct = encipher_kid_rsa(pt, pk)
>>> decipher_kid_rsa(ct, prk)
200
"""
n = prk[0]
d = prk[1]
return (ct*d) % n
#################### Morse Code ######################################
def encode_morse(pt):
"""
Encodes a plaintext into popular Morse Code with letters separated by "|"
and words by "||".
References
==========
.. [1] http://en.wikipedia.org/wiki/Morse_code
Examples
========
>>> from sympy.crypto.crypto import encode_morse
>>> pt = 'ATTACK THE RIGHT FLANK'
>>> encode_morse(pt)
'.-|-|-|.-|-.-.|-.-||-|....|.||.-.|..|--.|....|-||..-.|.-..|.-|-.|-.-'
"""
morse_encoding_map = {"A": ".-", "B": "-...",
"C": "-.-.", "D": "-..",
"E": ".", "F": "..-.",
"G": "--.", "H": "....",
"I": "..", "J": ".---",
"K": "-.-", "L": ".-..",
"M": "--", "N": "-.",
"O": "---", "P": ".--.",
"Q": "--.-", "R": ".-.",
"S": "...", "T": "-",
"U": "..-", "V": "...-",
"W": ".--", "X": "-..-",
"Y": "-.--", "Z": "--..",
"0": "-----", "1": ".----",
"2": "..---", "3": "...--",
"4": "....-", "5": ".....",
"6": "-....", "7": "--...",
"8": "---..", "9": "----.",
".": ".-.-.-", ",": "--..--",
":": "---...", ";": "-.-.-.",
"?": "..--..", "-": "-...-",
"_": "..--.-", "(": "-.--.",
")": "-.--.-", "'": ".----.",
"=": "-...-", "+": ".-.-.",
"/": "-..-.", "@": ".--.-.",
"$": "...-..-", "!": "-.-.--" }
unusable_chars = "\"#%&*<>[\]^`{|}~"
morsestring = []
for i in unusable_chars:
pt = pt.replace(i, "")
pt = pt.upper()
words = pt.split(" ")
for word in words:
letters = list(word)
morseword = []
for letter in letters:
morseletter = morse_encoding_map[letter]
morseword.append(morseletter)
word = "|".join(morseword)
morsestring.append(word)
return "||".join(morsestring)
def decode_morse(mc):
"""
Decodes a Morse Code with letters separated by "|"
and words by "||" into plaintext.
References
==========
.. [1] http://en.wikipedia.org/wiki/Morse_code
Examples
========
>>> from sympy.crypto.crypto import decode_morse
>>> mc = '--|---|...-|.||.|.-|...|-'
>>> decode_morse(mc)
'MOVE EAST'
"""
morse_decoding_map = {".-": "A", "-...": "B",
"-.-.": "C", "-..": "D",
".": "E", "..-.": "F",
"--.": "G", "....": "H",
"..": "I", ".---": "J",
"-.-": "K", ".-..": "L",
"--": "M", "-.": "N",
"---": "O", ".--.": "P",
"--.-": "Q", ".-.": "R",
"...": "S", "-": "T",
"..-": "U", "...-": "V",
".--": "W", "-..-": "X",
"-.--": "Y", "--..": "Z",
"-----": "0", "----": "1",
"..---": "2", "...--": "3",
"....-": "4", ".....": "5",
"-....": "6", "--...": "7",
"---..": "8", "----.": "9",
".-.-.-": ".", "--..--": ",",
"---...": ":", "-.-.-.": ";",
"..--..": "?", "-...-": "-",
"..--.-": "_", "-.--.": "(",
"-.--.-": ")", ".----.": "'",
"-...-": "=", ".-.-.": "+",
"-..-.": "/", ".--.-.": "@",
"...-..-": "$", "-.-.--": "!"}
characterstring = []
if mc[-1] == "|" and mc[-2] == "|":
mc = mc[:-2]
words = mc.split("||")
for word in words:
letters = word.split("|")
characterword = []
for letter in letters:
try:
characterletter = morse_decoding_map[letter]
except KeyError:
return "Invalid Morse Code"
characterword.append(characterletter)
word = "".join(characterword)
characterstring.append(word)
return " ".join(characterstring)
#################### LFSRs ##########################################
def lfsr_sequence(key, fill, n):
r"""
This function creates an lfsr sequence.
INPUT:
``key``: a list of finite field elements,
`[c_0, c_1, \ldots, c_k].`
``fill``: the list of the initial terms of the lfsr
sequence, `[x_0, x_1, \ldots, x_k].`
``n``: number of terms of the sequence that the
function returns.
OUTPUT:
The lfsr sequence defined by `x_{n+1} = c_k x_n + \ldots + c_0 x_{n-k}`, for
`n \leq k`.
Notes
=====
S. Golomb [G]_ gives a list of three statistical properties a
sequence of numbers `a = \{a_n\}_{n=1}^\infty`,
`a_n \in \{0,1\}`, should display to be considered
"random". Define the autocorrelation of `a` to be
.. math::
C(k) = C(k,a) = \lim_{N\rightarrow \infty} {1\over N}\sum_{n=1}^N (-1)^{a_n + a_{n+k}}.
In the case where `a` is periodic with period
`P` then this reduces to
.. math::
C(k) = {1\over P}\sum_{n=1}^P (-1)^{a_n + a_{n+k}}.
Assume `a` is periodic with period `P`.
- balance:
.. math::
\left|\sum_{n=1}^P(-1)^{a_n}\right| \leq 1.
- low autocorrelation:
.. math::
C(k) = \left\{ \begin{array}{cc} 1,& k = 0,\\ \epsilon, & k \ne 0. \end{array} \right.
(For sequences satisfying these first two properties, it is known
that `\epsilon = -1/P` must hold.)
- proportional runs property: In each period, half the runs have
length `1`, one-fourth have length `2`, etc.
Moreover, there are as many runs of `1`'s as there are of
`0`'s.
References
==========
.. [G] Solomon Golomb, Shift register sequences, Aegean Park Press, Laguna Hills, Ca, 1967
Examples
========
>>> from sympy.crypto.crypto import lfsr_sequence
>>> from sympy.polys.domains import FF
>>> F = FF(2)
>>> fill = [F(1), F(1), F(0), F(1)]
>>> key = [F(1), F(0), F(0), F(1)]
>>> lfsr_sequence(key, fill, 10)
[1 mod 2, 1 mod 2, 0 mod 2, 1 mod 2, 0 mod 2, 1 mod 2, 1 mod 2, 0 mod 2, 0 mod 2, 1 mod 2]
"""
if not isinstance(key, list):
raise TypeError("key must be a list")
if not isinstance(fill, list):
raise TypeError("fill must be a list")
p = key[0].mod
F = FF(p)
s = fill
k = len(fill)
L = []
for i in range(n):
s0 = s[:]
L.append(s[0])
s = s[1:k]
x = sum([int(key[i]*s0[i]) for i in range(k)])
s.append(F(x))
return L # use [x.to_int() for x in L] for int version
def lfsr_autocorrelation(L, P, k):
"""
This function computes the lsfr autocorrelation function.
INPUT:
``L``: is a periodic sequence of elements of `GF(2)`.
``L`` must have length larger than ``P``.
``P``: the period of ``L``
``k``: an integer (`0 < k < p`)
OUTPUT:
the ``k``-th value of the autocorrelation of the LFSR ``L``
Examples
========
>>> from sympy.crypto.crypto import lfsr_sequence, lfsr_autocorrelation
>>> from sympy.polys.domains import FF
>>> F = FF(2)
>>> fill = [F(1), F(1), F(0), F(1)]
>>> key = [F(1), F(0), F(0), F(1)]
>>> s = lfsr_sequence(key, fill, 20)
>>> lfsr_autocorrelation(s, 15, 7)
-1/15
>>> lfsr_autocorrelation(s, 15, 0)
1
"""
if not isinstance(L, list):
raise TypeError("L (=%s) must be a list" % L)
P = int(P)
k = int(k)
L0 = L[:P] # slices makes a copy
L1 = L0 + L0[:k]
L2 = [(-1)**(L1[i].to_int() + L1[i + k].to_int()) for i in range(P)]
tot = sum(L2)
return Rational(tot, P)
def lfsr_connection_polynomial(s):
"""
This function computes the lsfr connection polynomial.
INPUT:
``s``: a sequence of elements of even length, with entries in a finite field
OUTPUT:
``C(x)``: the connection polynomial of a minimal LFSR yielding ``s``.
This implements the algorithm in section 3 of J. L. Massey's article [M]_.
References
==========
.. [M] James L. Massey, "Shift-Register Synthesis and BCH Decoding."
IEEE Trans. on Information Theory, vol. 15(1), pp. 122-127, Jan 1969.
Examples
========
>>> from sympy.crypto.crypto import lfsr_sequence, lfsr_connection_polynomial
>>> from sympy.polys.domains import FF
>>> F = FF(2)
>>> fill = [F(1), F(1), F(0), F(1)]
>>> key = [F(1), F(0), F(0), F(1)]
>>> s = lfsr_sequence(key, fill, 20)
>>> lfsr_connection_polynomial(s)
x**4 + x + 1
>>> fill = [F(1), F(0), F(0), F(1)]
>>> key = [F(1), F(1), F(0), F(1)]
>>> s = lfsr_sequence(key, fill, 20)
>>> lfsr_connection_polynomial(s)
x**3 + 1
>>> fill = [F(1), F(0), F(1)]
>>> key = [F(1), F(1), F(0)]
>>> s = lfsr_sequence(key, fill, 20)
>>> lfsr_connection_polynomial(s)
x**3 + x**2 + 1
>>> fill = [F(1), F(0), F(1)]
>>> key = [F(1), F(0), F(1)]
>>> s = lfsr_sequence(key, fill, 20)
>>> lfsr_connection_polynomial(s)
x**3 + x + 1
"""
# Initialization:
p = s[0].mod
F = FF(p)
x = Symbol("x")
C = 1*x**0
B = 1*x**0
m = 1
b = 1*x**0
L = 0
N = 0
while N < len(s):
if L > 0:
dC = Poly(C).degree()
r = min(L + 1, dC + 1)
coeffsC = [C.subs(x, 0)] + [C.coeff(x**i) for i in range(1, dC + 1)]
d = (s[N].to_int() + sum([coeffsC[i]*s[N - i].to_int() for i in range(1, r)])) % p
if L == 0:
d = s[N].to_int()*x**0
if d == 0:
m += 1
N += 1
if d > 0:
if 2*L > N:
C = (C - d*((b**(p - 2)) % p)*x**m*B).expand()
m += 1
N += 1
else:
T = C
C = (C - d*((b**(p - 2)) % p)*x**m*B).expand()
L = N + 1 - L
m = 1
b = d
B = T
N += 1
dC = Poly(C).degree()
coeffsC = [C.subs(x, 0)] + [C.coeff(x**i) for i in range(1, dC + 1)]
return sum([coeffsC[i] % p*x**i for i in range(dC + 1) if coeffsC[i] is not None])
#################### ElGamal #############################
def elgamal_private_key(digit=10):
"""
Return three number tuple as private key.
Elgamal encryption is based on the mathmatical problem
called the Discrete Logarithm Problem (DLP). For example,
`a^{b} \equiv c \pmod p`
In general, if a and b are known, c is easily
calculated. If b is unknown, it is hard to use
a and c to get b.
Parameters
==========
digit : Key length in binary
Returns
=======
(p, r, d) : p = prime number, r = primitive root, d = random number
Examples
========
>>> from sympy.crypto.crypto import elgamal_private_key
>>> from sympy.ntheory import is_primitive_root, isprime
>>> a, b, _ = elgamal_private_key()
>>> isprime(a)
True
>>> is_primitive_root(b, a)
True
"""
p = nextprime(2**digit)
return p, primitive_root(p), randrange(2, p)
def elgamal_public_key(prk):
"""
Return three number tuple as public key.
Parameters
==========
prk : Tuple (p, r, e) generated by ``elgamal_private_key``
Returns
=======
(p, r, e = r**d mod p) : d is a random number in private key.
Examples
========
>>> from sympy.crypto.crypto import elgamal_public_key
>>> elgamal_public_key((1031, 14, 636))
(1031, 14, 212)
"""
return prk[0], prk[1], pow(prk[1], prk[2], prk[0])
def encipher_elgamal(m, puk):
"""
Encrypt message with public key
m is plain text message in int. puk is
public key (p, r, e). In order to encrypt
a message, a random number ``a`` between ``2`` and ``p``,
encryped message is `c_{1}` and `c_{2}`
`c_{1} \equiv r^{a} \pmod p`
`c_{2} \equiv m e^{a} \pmod p`
Parameters
==========
m : int of encoded message
puk : public key
Returns
=======
(c1, c2) : Encipher into two number
Examples
========
>>> from sympy.crypto.crypto import encipher_elgamal
>>> encipher_elgamal(100, (1031, 14, 212)) # doctest: +SKIP
(835, 271)
"""
if m > puk[0]:
ValueError('Message {} should be less than prime {}'.format(m, puk[0]))
r = randrange(2, puk[0])
return pow(puk[1], r, puk[0]), m * pow(puk[2], r, puk[0]) % puk[0]
def decipher_elgamal(ct, prk):
r"""
Decrypt message with private key
`ct = (c_{1}, c_{2})`
`prk = (p, r, d)`
According to extended Eucliden theorem,
`u c_{1}^{d} + p n = 1`
`u \equiv 1/{{c_{1}}^d} \pmod p`
`u c_{2} \equiv \frac{1}{c_{1}^d} c_{2} \equiv \frac{1}{r^{ad}} c_{2} \pmod p`
`\frac{1}{r^{ad}} m e^a \equiv \frac{1}{r^{ad}} m {r^{d a}} \equiv m \pmod p`
Examples
========
>>> from sympy.crypto.crypto import decipher_elgamal
>>> decipher_elgamal((835, 271), (1031, 14, 636))
100
"""
u = igcdex(ct[0] ** prk[2], prk[0])[0]
return u * ct[1] % prk[0]
| bsd-3-clause |
FRCDiscord/Dozer | dozer/sources/RSSSources.py | 1 | 8500 | """Given an arbitrary RSS feed, get new posts from it"""
import re
import datetime
import xml.etree.ElementTree
import aiohttp
import discord
from .AbstractSources import Source
def clean_html(raw_html):
"""Clean all HTML tags.
From https://stackoverflow.com/questions/9662346/python-code-to-remove-html-tags-from-a-string"""
cleanr = re.compile('<.*?>')
cleantext = re.sub(cleanr, '', raw_html)
return cleantext
class RSSSource(Source):
"""Given an arbitrary RSS feed, get new posts from it"""
url = None
color = discord.colour.Color.blurple()
date_formats = ["%a, %d %b %Y %H:%M:%S %z",
"%a, %d %b %Y %H:%M:%S %Z"] # format for datetime.strptime()
base_url = None
read_more_str = "...\n Read More"
def __init__(self, aiohttp_session: aiohttp.ClientSession, bot):
super().__init__(aiohttp_session, bot)
self.guids_seen = set()
async def first_run(self):
"""Fetch the current posts in the feed and add them to the guids_seen set"""
response = await self.fetch()
self.parse(response, True)
async def get_new_posts(self):
"""Fetch the current posts in the feed, parse them for data and generate embeds/strings for them"""
response = await self.fetch()
items = self.parse(response)
new_posts = {
'source': {
'embed': [],
'plain': []
}
}
for item in items:
data = self.get_data(item)
new_posts['source']['embed'].append(self.generate_embed(data))
new_posts['source']['plain'].append(self.generate_plain_text(data))
return new_posts
async def fetch(self):
"""Use aiohttp to get the source feed"""
response = await self.http_session.get(url=self.url)
return await response.text()
def parse(self, response, first_time=False):
"""Use xml ElementTrees to get individual Elements for new posts"""
new_items = set()
root = xml.etree.ElementTree.fromstring(response)
channel = root[0]
for child in channel:
if child.tag == 'item':
guid = child.find('guid')
if first_time:
self.guids_seen.add(guid.text)
continue
new = self.determine_if_new(guid.text)
if new:
new_items.add(child)
return new_items
def determine_if_new(self, guid):
"""Given a RSS item's guid, determine if this item is new or not. Store GUID if new."""
if guid not in self.guids_seen:
self.guids_seen.add(guid)
return True
else:
return False
def get_data(self, item):
"""Given a xml Element, extract it into readable data"""
types = {
'title': 'title',
'url': 'url',
'{http://purl.org/dc/elements/1.1/}creator': 'author',
'description': 'description'
}
data = {}
for key, value in types.items():
element = item.find(key)
if element is not None:
data[value] = element.text
else:
data[value] = None
if data['url'] is None:
if item.find('link') is not None:
data['url'] = item.find('link').text
elif item.find('guid').attrib['isPermaLink'] == 'true':
data['url'] = item.find('guid').text
date_string = item.find('pubDate')
if date_string is not None:
formatted = False
for date_format in self.date_formats:
try:
data['date'] = datetime.datetime.strptime(date_string.text, date_format)
formatted = True
except ValueError:
continue
if not formatted:
data['data'] = datetime.datetime.now()
else:
data['date'] = datetime.datetime.now()
desc = clean_html(data['description'])
#length = 1024 - len(self.read_more_str)
length = 500
if len(desc) >= length:
data['description'] = desc[0:length] + self.read_more_str
else:
data['description'] = desc
return data
def generate_embed(self, data):
"""Given a dictionary of data, generate a discord.Embed using that data"""
embed = discord.Embed()
embed.title = f"New Post From {self.full_name}!"
embed.colour = self.color
embed.description = f"[{data['title']}]({data['url']})"
embed.url = self.base_url
embed.add_field(name="Description", value=data['description'])
embed.set_author(name=data['author'])
embed.timestamp = data['date']
return embed
def generate_plain_text(self, data):
"""Given a dictionary of data, generate a string using that data"""
return f"New Post from {self.full_name} from {data['author']}:\n" \
f"{data['title']}\n" \
f">>> {data['description']}\n" \
f"Read more at {data['url']}"
class FRCBlogPosts(RSSSource):
"""Official blog posts from the FIRST Robotics Competition"""
url = "https://www.firstinspires.org/robotics/frc/blog-rss"
base_url = "https://www.firstinspires.org/robotics/frc/blog/"
full_name = "FRC Blog Posts"
short_name = "frc"
description = "Official blog posts from the FIRST Robotics Competition"
color = discord.colour.Color.dark_blue()
class TBABlog(RSSSource):
"""Posts from The Blue Alliance's Blog"""
url = "https://blog.thebluealliance.com/feed/"
base_url = "https://blog.thebluealliance.com/"
full_name = "The Blue Alliance Blog"
short_name = "tba-blog"
description = "Posts from The Blue Alliance's Blog"
color = discord.colour.Color.dark_blue()
class CDLatest(RSSSource):
"""Official blog posts from the FIRST Robotics Competition"""
url = "https://www.chiefdelphi.com/latest.rss"
base_url = "https://www.chiefdelphi.com/latest"
full_name = "Chief Delphi"
short_name = "cd"
description = "Official blog posts from the FIRST Robotics Competition"
color = discord.colour.Color.orange()
class FRCQA(RSSSource):
"""Answers from the official FIRST Robotics Competition Q&A system"""
url = "https://frc-qa.firstinspires.org/rss/answers.rss"
base_url = "https://frc-qa.firstinspires.org/"
full_name = "FRC Q&A Answers"
short_name = "frc-qa"
description = "Answers from the official FIRST Robotics Competition Q&A system"
color = discord.colour.Color.dark_blue()
class FTCBlogPosts(RSSSource):
"""The official FTC Blogspot blog"""
url = "http://firsttechchallenge.blogspot.com//feeds/posts/default"
base_url = "http://firsttechchallenge.blogspot.com/"
full_name = "FTC Blog Posts"
short_name = "ftc"
description = "Official blog posts from the FIRST Tech Challenge"
color = discord.colour.Color.orange()
class FTCForum(RSSSource):
"""The official FTC Forum posts"""
url = "https://ftcforum.firstinspires.org/external?type=rss2&nodeid=1"
base_url = "https://ftcforum.firstinspires.org/"
full_name = "FTC Forum Posts"
short_name = "ftcforum"
description = "The official FTC Forum Posts"
color = discord.colour.Color.orange()
class JVNBlog(RSSSource):
"""Blog posts by John V Neun, 148 Head Engineer"""
url = "https://johnvneun.com/blog?format=rss"
base_url = "https://johnvneun.com/"
full_name = "JVN's Blog"
short_name = "jvn"
aliases = ['148', 'robowranglers']
description = "Blog posts by John V Neun, 148 Head Engineer"
color = discord.colour.Color(value=000000)
class SpectrumBlog(RSSSource):
"""Blog Posts from team 3847, Spectrum"""
url = "http://spectrum3847.org/feed/"
base_url = "http://spectrum3847.org/category/blog/"
full_name = "Spectrum Blog"
short_name = "spectrum"
aliases = ['3847']
description = "Blog Posts from team 3847, Spectrum"
color = discord.colour.Color.purple()
class TestSource(RSSSource):
"""A source for testing. Make sure to disable this before committing."""
url = "http://lorem-rss.herokuapp.com/feed?interval=1"
base_url = "http://lorem-rss.herokuapp.com"
full_name = "Test Source"
short_name = "test"
description = "Test Source Please Ignore"
disabled = True
| gpl-3.0 |
tempbottle/pycapnp | test/test_schema.py | 3 | 1639 | import pytest
import capnp
import os
this_dir = os.path.dirname(__file__)
@pytest.fixture
def addressbook():
return capnp.load(os.path.join(this_dir, 'addressbook.capnp'))
@pytest.fixture
def annotations():
return capnp.load(os.path.join(this_dir, 'annotations.capnp'))
def test_basic_schema(addressbook):
assert addressbook.Person.schema.fieldnames[0] == 'id'
def test_list_schema(addressbook):
peopleField = addressbook.AddressBook.schema.fields['people']
personType = peopleField.schema.elementType
assert personType.node.id == addressbook.Person.schema.node.id
personListSchema = capnp.ListSchema(addressbook.Person)
assert personListSchema.elementType.node.id == addressbook.Person.schema.node.id
def test_annotations(annotations):
assert annotations.schema.node.annotations[0].value.text == 'TestFile'
annotation = annotations.TestAnnotationOne.schema.node.annotations[0]
assert annotation.value.text == 'Test'
annotation = annotations.TestAnnotationTwo.schema.node.annotations[0]
assert annotation.value.struct.as_struct(annotations.AnnotationStruct).test == 100
annotation = annotations.TestAnnotationThree.schema.node.annotations[0]
annotation_list = annotation.value.list.as_list(capnp.ListSchema(annotations.AnnotationStruct))
assert annotation_list[0].test == 100
assert annotation_list[1].test == 101
annotation = annotations.TestAnnotationFour.schema.node.annotations[0]
annotation_list = annotation.value.list.as_list(capnp.ListSchema(capnp.types.UInt16))
assert annotation_list[0] == 200
assert annotation_list[1] == 201
| bsd-2-clause |
vakhet/rathena-utils | dev/foo.py | 1 | 6608 | from re import search, findall, split, match
def valid(line):
if any(i in line for i in INVALID) or line == '':
return False
return True
def rathena_db_parse(table, schema, file, db):
# Parse any *db.txt from rAthena
# table - table's name
# schema - [('id', 'INTEGER PRIMARY KEY'), ('name', 'TEXT'), etc...]
# file - file handler
# db - sqlite3 db handler
table_init = 'DROP TABLE IF EXIST {0}; CREATE TABLE {0} ('.format(table)
for column in schema:
table_init += '{0} {1}, '.format(*column)
table_init = table_init.rstrip(', ') + ')'
db.executescript(table_init)
for line in file.readlines():
if len(line.split(',')) != len(schema.keys()):
continue
row = [i.strip() for i in line.split(',')]
row_script = 'INSERT OR IGNORE INTO {} VALUES ('.format(table)
for column in row:
row_script += '{}, '.format(column)
row_script = row_script.rstrip(', ') + ')'
db.execute(row_script)
"""
if not line[0].isdigit():
continue
# get scripts - last 3 parameters
line, tail = line.split(',{', 1)
tail = tail.split('},{')
tail[2] = tail[2][:-2]
for i in (0, 1, 2):
tail[i] = tail[i].strip()
line = line.split(',')
# handling values with ':'
if ':' in line[16]:
tmp = line[16].split(':')
line[16] = tmp[1]
line.insert(16, tmp[0])
else:
line.insert(17, '')
if ':' in line[7]:
tmp = line[7].split(':')
line[7] = tmp[1]
line.insert(7, tmp[0])
else:
line.insert(8, '')
# converting num strings to integers
for i in range(len(line)):
try:
line[i] = int(line[i])
except Exception:
pass
line += tail
db.execute('''INSERT INTO itemdb VALUES (?,?,?,?,?,?,
?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)''', tuple(line))
"""
def read_iteminfo_lua(file, db):
with open(file, 'r', encoding='cp1251', errors='ignore') as fh:
for line in fh.readlines()[1:-1]:
if line[0] == '}':
break
if line[1] == '[':
item = {}
item['id'] = int(search('\d+', line)[0])
continue
elif line[1] == '}':
item['slotCount'] = int(item['slotCount'])
item['ClassNum'] = int(item['ClassNum'])
item['unidentifiedDescriptionName'] = ''.join(i+'___'
for i in item['unidentifiedDescriptionName']).rstrip('___')
item['identifiedDescriptionName'] = ''.join(i+'___'
for i in item['identifiedDescriptionName']).rstrip('___')
try:
db.execute('''INSERT INTO item_desc VALUES
(?, ?, ?, ?, ?, ?, ?, ?, ?)''',
tuple(item.values()))
except Exception:
print('count:', len(item), '\n', item, '\n========')
quit()
continue
tab = len(findall('\t', line))
if tab == 2 and search('},$', line) is None:
s = search('\t+(.+) = (.+)', line)
if s[2][0] == '"':
item[s[1]] = s[2][:-1].strip('"')
elif s[2][0] == '{':
item[s[1]] = []
else:
item[s[1]] = s[2].rstrip(',')
continue
elif tab == 3:
item[s[1]].append(search('"(.*)"', line)[1])
continue
def read_translate(file, db):
with open(file, 'r', encoding='utf8', errors='ignore') as fh:
line = ''
while not line.startswith('[variable]'):
line = fh.readline()
while not line.startswith('[script]'):
line = fh.readline()
if match('[A-Z]', line):
variable = line.strip().split(maxsplit=1)
db.execute("INSERT OR IGNORE INTO variables VALUES (?, ?)",
tuple(variable))
while not line.startswith('[other]'):
line = fh.readline()
if match('[a-z]', line):
script, translate = split(';[\s]*', line, maxsplit=1)
name, variables = split(',', script, maxsplit=1)
db.execute("INSERT OR IGNORE INTO scripts VALUES (?, ?, ?)",
(name, variables, translate.strip()))
def translate(script):
global db
name, var = script.split(',', maxsplit=1)
db.execute("""SELECT var, translate FROM scripts WHERE name=?""",
name)
row = db.fetchone()
try:
translate = row[1].format(*var)
except Exception:
translate = script
return translate
INVALID = ('@', 'getrefine', 'readparam', 'readParam', 'autobonus', 'getskill',
'getgroupitem', 'getrandgroupitem', 'callfunc', 'getitem',
'rentitem', 'monster ', 'pet ', 'Roulette', 'specialeffect',
'mercenary_create', 'getcharid', 'rand(', 'itemheal', 'produce',
'if(', 'if (', '/*', 'min('
)
DB_INIT = ("""
DROP TABLE IF EXISTS itemdb;
DROP TABLE IF EXISTS item_desc;
DROP TABLE IF EXISTS variables;
DROP TABLE IF EXISTS scripts;
CREATE TABLE itemdb (
ID INTEGER PRIMARY KEY,
AegisName TEXT,
Name TEXT,
Type INTEGER,
Buy INTEGER,
Sell INTEGER,
Weight INTEGER,
ATK INTEGER,
MATK INTEGER,
DEF INTEGER,
Range INTEGER,
Slots INTEGER,
Job INTEGER,
Class INTEGER,
Gender INTEGER,
Loc INTEGER,
wLV INTEGER,
eLV INTEGER,
maxLevel INTEGER,
Refineable INTEGER,
View INTEGER,
Script TEXT,
OnEquip_Script TEXT,
OnUnequip_Script TEXT
);
CREATE TABLE item_desc (
ID INTEGER PRIMARY KEY,
unidentifiedDisplayName TEXT,
unidentifiedResourceName TEXT,
unidentifiedDescriptionName TEXT,
identifiedDisplayName TEXT,
identifiedResourceName TEXT,
identifiedDescriptionName TEXT,
slotCount INTEGER,
ClassNum INTEGER
);
CREATE TABLE variables (
original TEXT UNIQUE,
translate TEXT
);
CREATE TABLE scripts (
name TEXT UNIQUE,
var TEXT,
translate TEXT
)
""")
| mit |
ErBa508/data-science-from-scratch | code/statistics.py | 60 | 5779 | from __future__ import division
from collections import Counter
from linear_algebra import sum_of_squares, dot
import math
num_friends = [100,49,41,40,25,21,21,19,19,18,18,16,15,15,15,15,14,14,13,13,13,13,12,12,11,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,8,8,8,8,8,8,8,8,8,8,8,8,8,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
def make_friend_counts_histogram(plt):
friend_counts = Counter(num_friends)
xs = range(101)
ys = [friend_counts[x] for x in xs]
plt.bar(xs, ys)
plt.axis([0,101,0,25])
plt.title("Histogram of Friend Counts")
plt.xlabel("# of friends")
plt.ylabel("# of people")
plt.show()
num_points = len(num_friends) # 204
largest_value = max(num_friends) # 100
smallest_value = min(num_friends) # 1
sorted_values = sorted(num_friends)
smallest_value = sorted_values[0] # 1
second_smallest_value = sorted_values[1] # 1
second_largest_value = sorted_values[-2] # 49
# this isn't right if you don't from __future__ import division
def mean(x):
return sum(x) / len(x)
def median(v):
"""finds the 'middle-most' value of v"""
n = len(v)
sorted_v = sorted(v)
midpoint = n // 2
if n % 2 == 1:
# if odd, return the middle value
return sorted_v[midpoint]
else:
# if even, return the average of the middle values
lo = midpoint - 1
hi = midpoint
return (sorted_v[lo] + sorted_v[hi]) / 2
def quantile(x, p):
"""returns the pth-percentile value in x"""
p_index = int(p * len(x))
return sorted(x)[p_index]
def mode(x):
"""returns a list, might be more than one mode"""
counts = Counter(x)
max_count = max(counts.values())
return [x_i for x_i, count in counts.iteritems()
if count == max_count]
# "range" already means something in Python, so we'll use a different name
def data_range(x):
return max(x) - min(x)
def de_mean(x):
"""translate x by subtracting its mean (so the result has mean 0)"""
x_bar = mean(x)
return [x_i - x_bar for x_i in x]
def variance(x):
"""assumes x has at least two elements"""
n = len(x)
deviations = de_mean(x)
return sum_of_squares(deviations) / (n - 1)
def standard_deviation(x):
return math.sqrt(variance(x))
def interquartile_range(x):
return quantile(x, 0.75) - quantile(x, 0.25)
####
#
# CORRELATION
#
#####
daily_minutes = [1,68.77,51.25,52.08,38.36,44.54,57.13,51.4,41.42,31.22,34.76,54.01,38.79,47.59,49.1,27.66,41.03,36.73,48.65,28.12,46.62,35.57,32.98,35,26.07,23.77,39.73,40.57,31.65,31.21,36.32,20.45,21.93,26.02,27.34,23.49,46.94,30.5,33.8,24.23,21.4,27.94,32.24,40.57,25.07,19.42,22.39,18.42,46.96,23.72,26.41,26.97,36.76,40.32,35.02,29.47,30.2,31,38.11,38.18,36.31,21.03,30.86,36.07,28.66,29.08,37.28,15.28,24.17,22.31,30.17,25.53,19.85,35.37,44.6,17.23,13.47,26.33,35.02,32.09,24.81,19.33,28.77,24.26,31.98,25.73,24.86,16.28,34.51,15.23,39.72,40.8,26.06,35.76,34.76,16.13,44.04,18.03,19.65,32.62,35.59,39.43,14.18,35.24,40.13,41.82,35.45,36.07,43.67,24.61,20.9,21.9,18.79,27.61,27.21,26.61,29.77,20.59,27.53,13.82,33.2,25,33.1,36.65,18.63,14.87,22.2,36.81,25.53,24.62,26.25,18.21,28.08,19.42,29.79,32.8,35.99,28.32,27.79,35.88,29.06,36.28,14.1,36.63,37.49,26.9,18.58,38.48,24.48,18.95,33.55,14.24,29.04,32.51,25.63,22.22,19,32.73,15.16,13.9,27.2,32.01,29.27,33,13.74,20.42,27.32,18.23,35.35,28.48,9.08,24.62,20.12,35.26,19.92,31.02,16.49,12.16,30.7,31.22,34.65,13.13,27.51,33.2,31.57,14.1,33.42,17.44,10.12,24.42,9.82,23.39,30.93,15.03,21.67,31.09,33.29,22.61,26.89,23.48,8.38,27.81,32.35,23.84]
def covariance(x, y):
n = len(x)
return dot(de_mean(x), de_mean(y)) / (n - 1)
def correlation(x, y):
stdev_x = standard_deviation(x)
stdev_y = standard_deviation(y)
if stdev_x > 0 and stdev_y > 0:
return covariance(x, y) / stdev_x / stdev_y
else:
return 0 # if no variation, correlation is zero
outlier = num_friends.index(100) # index of outlier
num_friends_good = [x
for i, x in enumerate(num_friends)
if i != outlier]
daily_minutes_good = [x
for i, x in enumerate(daily_minutes)
if i != outlier]
if __name__ == "__main__":
print "num_points", len(num_friends)
print "largest value", max(num_friends)
print "smallest value", min(num_friends)
print "second_smallest_value", sorted_values[1]
print "second_largest_value", sorted_values[-2]
print "mean(num_friends)", mean(num_friends)
print "median(num_friends)", median(num_friends)
print "quantile(num_friends, 0.10)", quantile(num_friends, 0.10)
print "quantile(num_friends, 0.25)", quantile(num_friends, 0.25)
print "quantile(num_friends, 0.75)", quantile(num_friends, 0.75)
print "quantile(num_friends, 0.90)", quantile(num_friends, 0.90)
print "mode(num_friends)", mode(num_friends)
print "data_range(num_friends)", data_range(num_friends)
print "variance(num_friends)", variance(num_friends)
print "standard_deviation(num_friends)", standard_deviation(num_friends)
print "interquartile_range(num_friends)", interquartile_range(num_friends)
print "covariance(num_friends, daily_minutes)", covariance(num_friends, daily_minutes)
print "correlation(num_friends, daily_minutes)", correlation(num_friends, daily_minutes)
print "correlation(num_friends_good, daily_minutes_good)", correlation(num_friends_good, daily_minutes_good)
| unlicense |
pombreda/syzygy | third_party/numpy/files/numpy/distutils/mingw32ccompiler.py | 76 | 19091 | """
Support code for building Python extensions on Windows.
# NT stuff
# 1. Make sure libpython<version>.a exists for gcc. If not, build it.
# 2. Force windows to use gcc (we're struggling with MSVC and g77 support)
# 3. Force windows to use g77
"""
import os
import subprocess
import sys
import subprocess
import re
# Overwrite certain distutils.ccompiler functions:
import numpy.distutils.ccompiler
if sys.version_info[0] < 3:
import log
else:
from numpy.distutils import log
# NT stuff
# 1. Make sure libpython<version>.a exists for gcc. If not, build it.
# 2. Force windows to use gcc (we're struggling with MSVC and g77 support)
# --> this is done in numpy/distutils/ccompiler.py
# 3. Force windows to use g77
import distutils.cygwinccompiler
from distutils.version import StrictVersion
from numpy.distutils.ccompiler import gen_preprocess_options, gen_lib_options
from distutils.errors import DistutilsExecError, CompileError, UnknownFileError
from distutils.unixccompiler import UnixCCompiler
from distutils.msvccompiler import get_build_version as get_build_msvc_version
from numpy.distutils.misc_util import msvc_runtime_library, get_build_architecture
# Useful to generate table of symbols from a dll
_START = re.compile(r'\[Ordinal/Name Pointer\] Table')
_TABLE = re.compile(r'^\s+\[([\s*[0-9]*)\] ([a-zA-Z0-9_]*)')
# the same as cygwin plus some additional parameters
class Mingw32CCompiler(distutils.cygwinccompiler.CygwinCCompiler):
""" A modified MingW32 compiler compatible with an MSVC built Python.
"""
compiler_type = 'mingw32'
def __init__ (self,
verbose=0,
dry_run=0,
force=0):
distutils.cygwinccompiler.CygwinCCompiler.__init__ (self,
verbose,dry_run, force)
# we need to support 3.2 which doesn't match the standard
# get_versions methods regex
if self.gcc_version is None:
import re
p = subprocess.Popen(['gcc', '-dumpversion'], shell=True,
stdout=subprocess.PIPE)
out_string = p.stdout.read()
p.stdout.close()
result = re.search('(\d+\.\d+)',out_string)
if result:
self.gcc_version = StrictVersion(result.group(1))
# A real mingw32 doesn't need to specify a different entry point,
# but cygwin 2.91.57 in no-cygwin-mode needs it.
if self.gcc_version <= "2.91.57":
entry_point = '--entry _DllMain@12'
else:
entry_point = ''
if self.linker_dll == 'dllwrap':
# Commented out '--driver-name g++' part that fixes weird
# g++.exe: g++: No such file or directory
# error (mingw 1.0 in Enthon24 tree, gcc-3.4.5).
# If the --driver-name part is required for some environment
# then make the inclusion of this part specific to that environment.
self.linker = 'dllwrap' # --driver-name g++'
elif self.linker_dll == 'gcc':
self.linker = 'g++'
# **changes: eric jones 4/11/01
# 1. Check for import library on Windows. Build if it doesn't exist.
build_import_library()
# **changes: eric jones 4/11/01
# 2. increased optimization and turned off all warnings
# 3. also added --driver-name g++
#self.set_executables(compiler='gcc -mno-cygwin -O2 -w',
# compiler_so='gcc -mno-cygwin -mdll -O2 -w',
# linker_exe='gcc -mno-cygwin',
# linker_so='%s --driver-name g++ -mno-cygwin -mdll -static %s'
# % (self.linker, entry_point))
# MS_WIN64 should be defined when building for amd64 on windows, but
# python headers define it only for MS compilers, which has all kind of
# bad consequences, like using Py_ModuleInit4 instead of
# Py_ModuleInit4_64, etc... So we add it here
if get_build_architecture() == 'AMD64':
if self.gcc_version < "4.":
self.set_executables(
compiler='gcc -g -DDEBUG -DMS_WIN64 -mno-cygwin -O0 -Wall',
compiler_so='gcc -g -DDEBUG -DMS_WIN64 -mno-cygwin -O0 -Wall -Wstrict-prototypes',
linker_exe='gcc -g -mno-cygwin',
linker_so='gcc -g -mno-cygwin -shared')
else:
# gcc-4 series releases do not support -mno-cygwin option
self.set_executables(
compiler='gcc -g -DDEBUG -DMS_WIN64 -O0 -Wall',
compiler_so='gcc -g -DDEBUG -DMS_WIN64 -O0 -Wall -Wstrict-prototypes',
linker_exe='gcc -g',
linker_so='gcc -g -shared')
else:
if self.gcc_version <= "3.0.0":
self.set_executables(compiler='gcc -mno-cygwin -O2 -w',
compiler_so='gcc -mno-cygwin -mdll -O2 -w -Wstrict-prototypes',
linker_exe='g++ -mno-cygwin',
linker_so='%s -mno-cygwin -mdll -static %s'
% (self.linker, entry_point))
elif self.gcc_version < "4.":
self.set_executables(compiler='gcc -mno-cygwin -O2 -Wall',
compiler_so='gcc -mno-cygwin -O2 -Wall -Wstrict-prototypes',
linker_exe='g++ -mno-cygwin',
linker_so='g++ -mno-cygwin -shared')
else:
# gcc-4 series releases do not support -mno-cygwin option
self.set_executables(compiler='gcc -O2 -Wall',
compiler_so='gcc -O2 -Wall -Wstrict-prototypes',
linker_exe='g++ ',
linker_so='g++ -shared')
# added for python2.3 support
# we can't pass it through set_executables because pre 2.2 would fail
self.compiler_cxx = ['g++']
# Maybe we should also append -mthreads, but then the finished
# dlls need another dll (mingwm10.dll see Mingw32 docs)
# (-mthreads: Support thread-safe exception handling on `Mingw32')
# no additional libraries needed
#self.dll_libraries=[]
return
# __init__ ()
def link(self,
target_desc,
objects,
output_filename,
output_dir,
libraries,
library_dirs,
runtime_library_dirs,
export_symbols = None,
debug=0,
extra_preargs=None,
extra_postargs=None,
build_temp=None,
target_lang=None):
# Include the appropiate MSVC runtime library if Python was built
# with MSVC >= 7.0 (MinGW standard is msvcrt)
runtime_library = msvc_runtime_library()
if runtime_library:
if not libraries:
libraries = []
libraries.append(runtime_library)
args = (self,
target_desc,
objects,
output_filename,
output_dir,
libraries,
library_dirs,
runtime_library_dirs,
None, #export_symbols, we do this in our def-file
debug,
extra_preargs,
extra_postargs,
build_temp,
target_lang)
if self.gcc_version < "3.0.0":
func = distutils.cygwinccompiler.CygwinCCompiler.link
else:
func = UnixCCompiler.link
if sys.version_info[0] >= 3:
func(*args[:func.__code__.co_argcount])
else:
func(*args[:func.im_func.func_code.co_argcount])
return
def object_filenames (self,
source_filenames,
strip_dir=0,
output_dir=''):
if output_dir is None: output_dir = ''
obj_names = []
for src_name in source_filenames:
# use normcase to make sure '.rc' is really '.rc' and not '.RC'
(base, ext) = os.path.splitext (os.path.normcase(src_name))
# added these lines to strip off windows drive letters
# without it, .o files are placed next to .c files
# instead of the build directory
drv,base = os.path.splitdrive(base)
if drv:
base = base[1:]
if ext not in (self.src_extensions + ['.rc','.res']):
raise UnknownFileError(
"unknown file type '%s' (from '%s')" % \
(ext, src_name))
if strip_dir:
base = os.path.basename (base)
if ext == '.res' or ext == '.rc':
# these need to be compiled to object files
obj_names.append (os.path.join (output_dir,
base + ext + self.obj_extension))
else:
obj_names.append (os.path.join (output_dir,
base + self.obj_extension))
return obj_names
# object_filenames ()
def find_python_dll():
maj, min, micro = [int(i) for i in sys.version_info[:3]]
dllname = 'python%d%d.dll' % (maj, min)
print ("Looking for %s" % dllname)
# We can't do much here:
# - find it in python main dir
# - in system32,
# - ortherwise (Sxs), I don't know how to get it.
lib_dirs = []
lib_dirs.append(os.path.join(sys.prefix, 'lib'))
try:
lib_dirs.append(os.path.join(os.environ['SYSTEMROOT'], 'system32'))
except KeyError:
pass
for d in lib_dirs:
dll = os.path.join(d, dllname)
if os.path.exists(dll):
return dll
raise ValueError("%s not found in %s" % (dllname, lib_dirs))
def dump_table(dll):
st = subprocess.Popen(["objdump.exe", "-p", dll], stdout=subprocess.PIPE)
return st.stdout.readlines()
def generate_def(dll, dfile):
"""Given a dll file location, get all its exported symbols and dump them
into the given def file.
The .def file will be overwritten"""
dump = dump_table(dll)
for i in range(len(dump)):
if _START.match(dump[i]):
break
if i == len(dump):
raise ValueError("Symbol table not found")
syms = []
for j in range(i+1, len(dump)):
m = _TABLE.match(dump[j])
if m:
syms.append((int(m.group(1).strip()), m.group(2)))
else:
break
if len(syms) == 0:
log.warn('No symbols found in %s' % dll)
d = open(dfile, 'w')
d.write('LIBRARY %s\n' % os.path.basename(dll))
d.write(';CODE PRELOAD MOVEABLE DISCARDABLE\n')
d.write(';DATA PRELOAD SINGLE\n')
d.write('\nEXPORTS\n')
for s in syms:
#d.write('@%d %s\n' % (s[0], s[1]))
d.write('%s\n' % s[1])
d.close()
def build_import_library():
if os.name != 'nt':
return
arch = get_build_architecture()
if arch == 'AMD64':
return _build_import_library_amd64()
elif arch == 'Intel':
return _build_import_library_x86()
else:
raise ValueError("Unhandled arch %s" % arch)
def _build_import_library_amd64():
dll_file = find_python_dll()
out_name = "libpython%d%d.a" % tuple(sys.version_info[:2])
out_file = os.path.join(sys.prefix, 'libs', out_name)
if os.path.isfile(out_file):
log.debug('Skip building import library: "%s" exists' % (out_file))
return
def_name = "python%d%d.def" % tuple(sys.version_info[:2])
def_file = os.path.join(sys.prefix,'libs',def_name)
log.info('Building import library (arch=AMD64): "%s" (from %s)' \
% (out_file, dll_file))
generate_def(dll_file, def_file)
cmd = ['dlltool', '-d', def_file, '-l', out_file]
subprocess.Popen(cmd)
def _build_import_library_x86():
""" Build the import libraries for Mingw32-gcc on Windows
"""
lib_name = "python%d%d.lib" % tuple(sys.version_info[:2])
lib_file = os.path.join(sys.prefix,'libs',lib_name)
out_name = "libpython%d%d.a" % tuple(sys.version_info[:2])
out_file = os.path.join(sys.prefix,'libs',out_name)
if not os.path.isfile(lib_file):
log.warn('Cannot build import library: "%s" not found' % (lib_file))
return
if os.path.isfile(out_file):
log.debug('Skip building import library: "%s" exists' % (out_file))
return
log.info('Building import library (ARCH=x86): "%s"' % (out_file))
from numpy.distutils import lib2def
def_name = "python%d%d.def" % tuple(sys.version_info[:2])
def_file = os.path.join(sys.prefix,'libs',def_name)
nm_cmd = '%s %s' % (lib2def.DEFAULT_NM, lib_file)
nm_output = lib2def.getnm(nm_cmd)
dlist, flist = lib2def.parse_nm(nm_output)
lib2def.output_def(dlist, flist, lib2def.DEF_HEADER, open(def_file, 'w'))
dll_name = "python%d%d.dll" % tuple(sys.version_info[:2])
args = (dll_name,def_file,out_file)
cmd = 'dlltool --dllname %s --def %s --output-lib %s' % args
status = os.system(cmd)
# for now, fail silently
if status:
log.warn('Failed to build import library for gcc. Linking will fail.')
#if not success:
# msg = "Couldn't find import library, and failed to build it."
# raise DistutilsPlatformError, msg
return
#=====================================
# Dealing with Visual Studio MANIFESTS
#=====================================
# Functions to deal with visual studio manifests. Manifest are a mechanism to
# enforce strong DLL versioning on windows, and has nothing to do with
# distutils MANIFEST. manifests are XML files with version info, and used by
# the OS loader; they are necessary when linking against a DLL not in the
# system path; in particular, official python 2.6 binary is built against the
# MS runtime 9 (the one from VS 2008), which is not available on most windows
# systems; python 2.6 installer does install it in the Win SxS (Side by side)
# directory, but this requires the manifest for this to work. This is a big
# mess, thanks MS for a wonderful system.
# XXX: ideally, we should use exactly the same version as used by python. I
# submitted a patch to get this version, but it was only included for python
# 2.6.1 and above. So for versions below, we use a "best guess".
_MSVCRVER_TO_FULLVER = {}
if sys.platform == 'win32':
try:
import msvcrt
if hasattr(msvcrt, "CRT_ASSEMBLY_VERSION"):
_MSVCRVER_TO_FULLVER['90'] = msvcrt.CRT_ASSEMBLY_VERSION
else:
_MSVCRVER_TO_FULLVER['90'] = "9.0.21022.8"
# I took one version in my SxS directory: no idea if it is the good
# one, and we can't retrieve it from python
_MSVCRVER_TO_FULLVER['80'] = "8.0.50727.42"
except ImportError:
# If we are here, means python was not built with MSVC. Not sure what to do
# in that case: manifest building will fail, but it should not be used in
# that case anyway
log.warn('Cannot import msvcrt: using manifest will not be possible')
def msvc_manifest_xml(maj, min):
"""Given a major and minor version of the MSVCR, returns the
corresponding XML file."""
try:
fullver = _MSVCRVER_TO_FULLVER[str(maj * 10 + min)]
except KeyError:
raise ValueError("Version %d,%d of MSVCRT not supported yet" \
% (maj, min))
# Don't be fooled, it looks like an XML, but it is not. In particular, it
# should not have any space before starting, and its size should be
# divisible by 4, most likely for alignement constraints when the xml is
# embedded in the binary...
# This template was copied directly from the python 2.6 binary (using
# strings.exe from mingw on python.exe).
template = """\
<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
<trustInfo xmlns="urn:schemas-microsoft-com:asm.v3">
<security>
<requestedPrivileges>
<requestedExecutionLevel level="asInvoker" uiAccess="false"></requestedExecutionLevel>
</requestedPrivileges>
</security>
</trustInfo>
<dependency>
<dependentAssembly>
<assemblyIdentity type="win32" name="Microsoft.VC%(maj)d%(min)d.CRT" version="%(fullver)s" processorArchitecture="*" publicKeyToken="1fc8b3b9a1e18e3b"></assemblyIdentity>
</dependentAssembly>
</dependency>
</assembly>"""
return template % {'fullver': fullver, 'maj': maj, 'min': min}
def manifest_rc(name, type='dll'):
"""Return the rc file used to generate the res file which will be embedded
as manifest for given manifest file name, of given type ('dll' or
'exe').
Parameters
---------- name: str
name of the manifest file to embed
type: str ('dll', 'exe')
type of the binary which will embed the manifest"""
if type == 'dll':
rctype = 2
elif type == 'exe':
rctype = 1
else:
raise ValueError("Type %s not supported" % type)
return """\
#include "winuser.h"
%d RT_MANIFEST %s""" % (rctype, name)
def check_embedded_msvcr_match_linked(msver):
"""msver is the ms runtime version used for the MANIFEST."""
# check msvcr major version are the same for linking and
# embedding
msvcv = msvc_runtime_library()
if msvcv:
maj = int(msvcv[5:6])
if not maj == int(msver):
raise ValueError(
"Discrepancy between linked msvcr " \
"(%d) and the one about to be embedded " \
"(%d)" % (int(msver), maj))
def configtest_name(config):
base = os.path.basename(config._gen_temp_sourcefile("yo", [], "c"))
return os.path.splitext(base)[0]
def manifest_name(config):
# Get configest name (including suffix)
root = configtest_name(config)
exext = config.compiler.exe_extension
return root + exext + ".manifest"
def rc_name(config):
# Get configest name (including suffix)
root = configtest_name(config)
return root + ".rc"
def generate_manifest(config):
msver = get_build_msvc_version()
if msver is not None:
if msver >= 8:
check_embedded_msvcr_match_linked(msver)
ma = int(msver)
mi = int((msver - ma) * 10)
# Write the manifest file
manxml = msvc_manifest_xml(ma, mi)
man = open(manifest_name(config), "w")
config.temp_files.append(manifest_name(config))
man.write(manxml)
man.close()
# # Write the rc file
# manrc = manifest_rc(manifest_name(self), "exe")
# rc = open(rc_name(self), "w")
# self.temp_files.append(manrc)
# rc.write(manrc)
# rc.close()
| apache-2.0 |
goblincoding/ninja-ide | ninja_ide/gui/dialogs/add_to_project.py | 7 | 4094 | # -*- coding: utf-8 -*-
#
# This file is part of NINJA-IDE (http://ninja-ide.org).
#
# NINJA-IDE is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# any later version.
#
# NINJA-IDE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NINJA-IDE; If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
from PyQt4.QtGui import QDialog
from PyQt4.QtGui import QListWidget
from PyQt4.QtGui import QTreeView
from PyQt4.QtGui import QVBoxLayout
from PyQt4.QtGui import QHBoxLayout
from PyQt4.QtGui import QPushButton
from PyQt4.QtGui import QAbstractItemView
from PyQt4.QtGui import QFileSystemModel
from PyQt4.QtGui import QHeaderView
from PyQt4.QtCore import QDir
from PyQt4.QtCore import SIGNAL
from ninja_ide import translations
class AddToProject(QDialog):
"""Dialog to let the user choose one of the folders from the opened proj"""
def __init__(self, projects, parent=None):
super(AddToProject, self).__init__(parent)
#pathProjects must be a list
self._projects = projects
self.setWindowTitle(translations.TR_ADD_FILE_TO_PROJECT)
self.pathSelected = ''
vbox = QVBoxLayout(self)
hbox = QHBoxLayout()
self._list = QListWidget()
for project in self._projects:
self._list.addItem(project.name)
self._list.setCurrentRow(0)
self._tree = QTreeView()
#self._tree.header().setHidden(True)
self._tree.setSelectionMode(QTreeView.SingleSelection)
self._tree.setAnimated(True)
self.load_tree(self._projects[0])
hbox.addWidget(self._list)
hbox.addWidget(self._tree)
vbox.addLayout(hbox)
hbox2 = QHBoxLayout()
btnAdd = QPushButton(translations.TR_ADD_HERE)
btnCancel = QPushButton(translations.TR_CANCEL)
hbox2.addWidget(btnCancel)
hbox2.addWidget(btnAdd)
vbox.addLayout(hbox2)
self.connect(btnCancel, SIGNAL("clicked()"), self.close)
self.connect(btnAdd, SIGNAL("clicked()"), self._select_path)
self.connect(self._list,
SIGNAL("currentItemChanged(QTreeWidgetItem*, QTreeWidgetItem*)"),
self._project_changed)
def _project_changed(self, item, previous):
#FIXME, this is not being called, at least in osx
for each_project in self._projects:
if each_project.name == item.text():
self.load_tree(each_project)
def load_tree(self, project):
"""Load the tree view on the right based on the project selected."""
qfsm = QFileSystemModel()
qfsm.setRootPath(project.path)
load_index = qfsm.index(qfsm.rootPath())
qfsm.setFilter(QDir.AllDirs | QDir.NoDotAndDotDot)
qfsm.setNameFilterDisables(False)
pext = ["*{0}".format(x) for x in project.extensions]
qfsm.setNameFilters(pext)
self._tree.setModel(qfsm)
self._tree.setRootIndex(load_index)
t_header = self._tree.header()
t_header.setHorizontalScrollMode(QAbstractItemView.ScrollPerPixel)
t_header.setResizeMode(0, QHeaderView.Stretch)
t_header.setStretchLastSection(False)
t_header.setClickable(True)
self._tree.hideColumn(1) # Size
self._tree.hideColumn(2) # Type
self._tree.hideColumn(3) # Modification date
#FIXME: Changing the name column's title requires some magic
#Please look at the project tree
def _select_path(self):
"""Set pathSelected to the folder selected in the tree."""
path = self._tree.model().filePath(self._tree.currentIndex())
if path:
self.pathSelected = path
self.close() | gpl-3.0 |
luxus/home-assistant | homeassistant/components/light/hyperion.py | 4 | 3572 | """
Support for Hyperion remotes.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/light.hyperion/
"""
import json
import logging
import socket
from homeassistant.components.light import ATTR_RGB_COLOR, Light
from homeassistant.const import CONF_HOST
_LOGGER = logging.getLogger(__name__)
REQUIREMENTS = []
def setup_platform(hass, config, add_devices_callback, discovery_info=None):
"""Setup a Hyperion server remote."""
host = config.get(CONF_HOST, None)
port = config.get("port", 19444)
device = Hyperion(host, port)
if device.setup():
add_devices_callback([device])
return True
else:
return False
class Hyperion(Light):
"""Representation of a Hyperion remote."""
def __init__(self, host, port):
"""Initialize the light."""
self._host = host
self._port = port
self._name = host
self._is_available = True
self._rgb_color = [255, 255, 255]
@property
def name(self):
"""Return the hostname of the server."""
return self._name
@property
def rgb_color(self):
"""Return last RGB color value set."""
return self._rgb_color
@property
def is_on(self):
"""Return true if the device is online."""
return self._is_available
def turn_on(self, **kwargs):
"""Turn the lights on."""
if self._is_available:
if ATTR_RGB_COLOR in kwargs:
self._rgb_color = kwargs[ATTR_RGB_COLOR]
self.json_request({"command": "color", "priority": 128,
"color": self._rgb_color})
def turn_off(self, **kwargs):
"""Disconnect the remote."""
self.json_request({"command": "clearall"})
def update(self):
"""Ping the remote."""
# just see if the remote port is open
self._is_available = self.json_request()
def setup(self):
"""Get the hostname of the remote."""
response = self.json_request({"command": "serverinfo"})
if response:
self._name = response["info"]["hostname"]
return True
return False
def json_request(self, request=None, wait_for_response=False):
"""Communicate with the JSON server."""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(5)
try:
sock.connect((self._host, self._port))
except OSError:
sock.close()
return False
if not request:
# No communication needed, simple presence detection returns True
sock.close()
return True
sock.send(bytearray(json.dumps(request) + "\n", "utf-8"))
try:
buf = sock.recv(4096)
except socket.timeout:
# Something is wrong, assume it's offline
sock.close()
return False
# Read until a newline or timeout
buffering = True
while buffering:
if "\n" in str(buf, "utf-8"):
response = str(buf, "utf-8").split("\n")[0]
buffering = False
else:
try:
more = sock.recv(4096)
except socket.timeout:
more = None
if not more:
buffering = False
response = str(buf, "utf-8")
else:
buf += more
sock.close()
return json.loads(response)
| mit |
phillxnet/rockstor-core | src/rockstor/storageadmin/tests/test_snapshot.py | 2 | 12564 | """
Copyright (c) 2012-2013 RockStor, Inc. <http://rockstor.com>
This file is part of RockStor.
RockStor is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published
by the Free Software Foundation; either version 2 of the License,
or (at your option) any later version.
RockStor is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from rest_framework import status
from rest_framework.test import APITestCase
import mock
from mock import patch
from storageadmin.models import Snapshot, Share, Pool
from storageadmin.tests.test_api import APITestMixin
"""Using fixture fix5.json where hard coded data to pre-populate database
before the tests run. Created pool1 Created share1, share2 using pool1 Created
snapshot snap1 for share1 with uvisible false Created snapshot snap2 for share2
with uvisible True
"""
class SnapshotTests(APITestMixin, APITestCase):
fixtures = ['fix5.json']
BASE_URL = '/api/shares'
@classmethod
def setUpClass(cls):
super(SnapshotTests, cls).setUpClass()
# post mocks
cls.patch_add_snap = patch('storageadmin.views.snapshot.add_snap')
cls.mock_add_snap = cls.patch_add_snap.start()
cls.mock_add_snap.return_value = 'out', 'err', 0
cls.patch_share_id = patch('storageadmin.views.snapshot.share_id')
cls.mock_share_id = cls.patch_share_id.start()
cls.mock_share_id.return_value = 1
cls.patch_qgroup_assign = patch('storageadmin.views.snapshot.'
'qgroup_assign')
cls.mock_qgroup_assign = cls.patch_qgroup_assign.start()
# cls.mock_qgroup_assign.return_value = 1
cls.mock_qgroup_assign.return_value = True
# Changed from share_usage to volume_usage, potential issue as
# Potential issue here as volume_usage returns either 2 or 4 values
# When called with 2 parameters (pool, volume_id) it returns 2 values.
# But with 3 parameters (pool, volume_id, pvolume_id) it returns 4
# values if the last parameter is != None.
cls.patch_volume_usage = patch('storageadmin.views.snapshot.'
'volume_usage')
cls.mock_volume_usage = cls.patch_volume_usage.start()
cls.mock_volume_usage.return_value = 16, 16
cls.patch_mount_snap = patch('storageadmin.views.snapshot.mount_snap')
cls.mock_mount_snap = cls.patch_mount_snap.start()
cls.mock_mount_snap.return_value = 'out', 'err', 0
cls.patch_remove_snap = patch('storageadmin.views.snapshot.'
'remove_snap')
cls.mock_remove_snap = cls.patch_remove_snap.start()
cls.mock_remove_snap.return_value = True
cls.patch_create_clone = patch('storageadmin.views.snapshot.'
'create_clone')
cls.mock_create_clone = cls.patch_create_clone.start()
@classmethod
def tearDownClass(cls):
super(SnapshotTests, cls).tearDownClass()
def test_get(self):
"""
Test GET request
1. Get base URL
"""
response = self.client.get('/api/snapshots')
self.assertEqual(response.status_code, status.HTTP_200_OK,
msg=response.data)
def test_post_requests_1(self):
"""
invalid snapshot post operation via invalid share name
"""
# Invalid share name
data = {'snapshot-name': 'snap3', 'shares': 'invalid', 'writable':
'rw', 'uvisible': 'invalid'}
snap_name = 'snap3'
share_name = 'invalid'
share_id = 99999 # invalid share id for invalid share name.
response = self.client.post(
'{}/{}/snapshots/{}'.format(self.BASE_URL, share_id, snap_name),
data=data, sname=share_name, snap_name=snap_name)
self.assertEqual(response.status_code,
status.HTTP_500_INTERNAL_SERVER_ERROR,
msg=response.data)
e_msg = "Share with id ({}) does not exist.".format(share_id)
self.assertEqual(response.data[0], e_msg)
@mock.patch('storageadmin.views.snapshot.Share')
@mock.patch('storageadmin.views.share.Pool')
@mock.patch('storageadmin.views.snapshot.NFSExport')
def test_post_requests_2(self, mock_nfs, mock_pool, mock_share):
"""
1. Create snapshot providing invalid uvisible bool type
2. Create snapshot providing invalid writable bool type
3. happy path to create snapshot
2. Create a snapshot with duplicate name
"""
temp_pool = Pool(id=1, name='rockstor_rockstor', size=88025459)
mock_pool.objects.get.return_value = temp_pool
temp_share = Share(id=3, name='share1', pool=temp_pool, size=8025459)
mock_share.objects.get.return_value = temp_share
# mock_snapshot.objects.get.side_effect = Snapshot.DoesNotExist
# Invalid uvisible bool type
data = {'snapshot-name': 'snap3', 'shares': 'share1',
'writable': False, 'uvisible': 'invalid'}
snap_name = 'snap3'
share_name = 'share1'
share_id = 3 # from fix5.json
response = self.client.post(
'%s/%s/snapshots/%s' % (self.BASE_URL, share_id, snap_name),
data=data, sname=share_name, snap_name=snap_name)
self.assertEqual(response.status_code,
status.HTTP_500_INTERNAL_SERVER_ERROR,
msg=response.data)
e_msg = "Element 'uvisible' must be a boolean, not (<type 'unicode'>)."
self.assertEqual(response.data[0], e_msg)
# Invalid writable bool type
data = {'snapshot-name': 'snap3', 'shares': 'share1',
'writable': 'invalid', 'uvisible': True}
snap_name = 'snap3'
share = 'share1'
share_id = 3 # from fix5.json and above mocking object
mock_nfs.objects.filter(share=share).exists.return_value = True
response = self.client.post(
'{}/{}/snapshots/{}'.format(self.BASE_URL, share_id, snap_name),
data=data, sname=share, snap_name=snap_name)
self.assertEqual(response.status_code,
status.HTTP_500_INTERNAL_SERVER_ERROR,
msg=response.data)
# TODO consider changing tested code to unify quota types to single
# as per "Invalid uvisible bool type" to remove need for escaping here.
e_msg = ('Element "writable" must be a boolean, not '
'(<type \'unicode\'>).')
self.assertEqual(response.data[0], e_msg)
# # Happy Path creating a snapshot by name snap3
data = {'snapshot-name': 'snap3', 'shares': 'share1',
'writable': False, 'uvisible': False}
snap_name = 'snap3'
share = 'share1'
share_id = 3 # from fix5.json and above mocking object
mock_nfs.objects.filter(share=share).exists.return_value = True
response = self.client.post(
'{}/{}/snapshots/{}'.format(self.BASE_URL, share_id, snap_name),
data=data, sname=share, snap_name=snap_name)
self.assertEqual(response.status_code,
status.HTTP_200_OK, msg=response.data)
# # Create duplicate snapshot by name snap3
data = {'snapshot-name': 'snap2', 'shares': 'share2',
'writable': True, 'uvisible': True}
snap_name = 'snap3'
share_name = 'share1'
share_id = 3 # from fix5.json
response = self.client.post(
'{}/{}/snapshots/{}'.format(self.BASE_URL, share_id, snap_name),
data=data, sname=share_name, snap_name=snap_name)
self.assertEqual(response.status_code,
status.HTTP_500_INTERNAL_SERVER_ERROR,
msg=response.data)
e_msg = ('Snapshot ({}) already exists for the '
'share ({}).').format(snap_name, share_name)
self.assertEqual(response.data[0], e_msg)
@mock.patch('storageadmin.views.share_command.Snapshot')
@mock.patch('storageadmin.views.snapshot.Share')
@mock.patch('storageadmin.views.share.Pool')
def test_clone_command(self, mock_pool, mock_share, mock_snapshot):
temp_pool = Pool(id=1, name='rockstor_rockstor', size=88025459)
mock_pool.objects.get.return_value = temp_pool
temp_share = Share(id=4, name='share2', pool=temp_pool, size=8025459)
mock_share.objects.get.return_value = temp_share
mock_snapshot.objects.get.side_effect = Snapshot.DoesNotExist
data = {'name': 'clonesnap2'}
snap_name = 'clonesnap2'
share = 'share2'
share_id = 4 # from fix5.json
response = self.client.post(
'{}/{}/snapshots/{}'.format(self.BASE_URL, share_id, snap_name),
data=data, sname=share, snap_name=snap_name, command='clone')
self.assertEqual(response.status_code,
status.HTTP_200_OK, msg=response.data)
@mock.patch('storageadmin.views.share_command.Snapshot')
@mock.patch('storageadmin.views.snapshot.Share')
@mock.patch('storageadmin.views.share.Pool')
def test_delete_requests(self, mock_pool, mock_share, mock_snapshot):
"""
1. Delete snapshot that does not exist
2. Delete snapshot with no name specified
"""
temp_pool = Pool(id=1, name='rockstor_rockstor', size=88025459)
mock_pool.objects.get.return_value = temp_pool
temp_share = Share(id=3, name='share1', pool=temp_pool, size=8025459)
mock_share.objects.get.return_value = temp_share
mock_snapshot.objects.get.side_effect = Snapshot.DoesNotExist
# # Delete snapshot that does not exists
snap_name = 'snap3'
share_name = 'share1'
share_id = 3 # from fix5.json
response = self.client.delete(
'{}/{}/snapshots/{}'.format(self.BASE_URL, share_id, snap_name))
self.assertEqual(response.status_code,
status.HTTP_500_INTERNAL_SERVER_ERROR,
msg=response.data)
e_msg = 'Snapshot name (snap3) does not exist.'
self.assertEqual(response.data[0], e_msg)
temp_share2 = Share(id=4, name='share2', pool=temp_pool, size=8025459)
mock_share.objects.get.return_value = temp_share2
# Delete without snapshot name
share_name = 'share1'
share_id = 3 # from fix5.json
response = self.client.delete('{}/{}/snapshots'.format(self.BASE_URL,
share_id))
self.assertEqual(response.status_code,
status.HTTP_200_OK, msg=response.data)
# # Delete snapshot happy path
# # creating a snapshot just for the next test.
# # TODO: replace this repeat post test with a proper mock of a snapshot
# # ie attempted to use:
#
# # temp_snap = Snapshot(id=2, name='snap2', share=temp_share2,
# # snap_type='admin')
# # mock_snapshot.objects.get.return_value = temp_snap
# # mock_snapshot.objects.filter(share='share2', name='snap2'
# # ).exists.return_value = True
# # but received:
# # 'Snapshot name (snap2) does not exist.'
#
# data = {'snapshot-name': 'snap2', 'shares': 'share2',
# 'writable': False, 'uvisible': False}
# snap_name = 'snap2'
# share = 'share2'
# share_id = 4
# response = self.client.post(
# '{}/{}/snapshots/{}'.format(self.BASE_URL, share_id, snap_name),
# data=data, sname=share, snap_name=snap_name)
# self.assertEqual(response.status_code,
# status.HTTP_200_OK, msg=response.data)
# # now move to our happy path delete test of just created 'snap2'
# response = self.client.delete(
# '{}/{}/snapshots/{}'.format(self.BASE_URL, share_id, snap_name))
# self.assertEqual(response.status_code,
# status.HTTP_200_OK, msg=response.data)
| gpl-3.0 |
srimai/odoo | openerp/addons/base/res/res_bank.py | 242 | 10554 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class Bank(osv.osv):
_description='Bank'
_name = 'res.bank'
_order = 'name'
_columns = {
'name': fields.char('Name', required=True),
'street': fields.char('Street'),
'street2': fields.char('Street2'),
'zip': fields.char('Zip', change_default=True, size=24),
'city': fields.char('City'),
'state': fields.many2one("res.country.state", 'Fed. State',
domain="[('country_id', '=', country)]"),
'country': fields.many2one('res.country', 'Country'),
'email': fields.char('Email'),
'phone': fields.char('Phone'),
'fax': fields.char('Fax'),
'active': fields.boolean('Active'),
'bic': fields.char('Bank Identifier Code', size=64,
help="Sometimes called BIC or Swift."),
}
_defaults = {
'active': lambda *a: 1,
}
def name_get(self, cr, uid, ids, context=None):
result = []
for bank in self.browse(cr, uid, ids, context):
result.append((bank.id, (bank.bic and (bank.bic + ' - ') or '') + bank.name))
return result
class res_partner_bank_type(osv.osv):
_description='Bank Account Type'
_name = 'res.partner.bank.type'
_order = 'name'
_columns = {
'name': fields.char('Name', required=True, translate=True),
'code': fields.char('Code', size=64, required=True),
'field_ids': fields.one2many('res.partner.bank.type.field', 'bank_type_id', 'Type Fields'),
'format_layout': fields.text('Format Layout', translate=True)
}
_defaults = {
'format_layout': lambda *args: "%(bank_name)s: %(acc_number)s"
}
class res_partner_bank_type_fields(osv.osv):
_description='Bank type fields'
_name = 'res.partner.bank.type.field'
_order = 'name'
_columns = {
'name': fields.char('Field Name', required=True, translate=True),
'bank_type_id': fields.many2one('res.partner.bank.type', 'Bank Type', required=True, ondelete='cascade'),
'required': fields.boolean('Required'),
'readonly': fields.boolean('Readonly'),
'size': fields.integer('Max. Size'),
}
class res_partner_bank(osv.osv):
'''Bank Accounts'''
_name = "res.partner.bank"
_rec_name = "acc_number"
_description = __doc__
_order = 'sequence'
def _bank_type_get(self, cr, uid, context=None):
bank_type_obj = self.pool.get('res.partner.bank.type')
result = []
type_ids = bank_type_obj.search(cr, uid, [])
bank_types = bank_type_obj.browse(cr, uid, type_ids, context=context)
for bank_type in bank_types:
result.append((bank_type.code, bank_type.name))
return result
def _default_value(self, cursor, user, field, context=None):
if context is None: context = {}
if field in ('country_id', 'state_id'):
value = False
else:
value = ''
if not context.get('address'):
return value
for address in self.pool.get('res.partner').resolve_2many_commands(
cursor, user, 'address', context['address'], ['type', field], context=context):
if address.get('type') == 'default':
return address.get(field, value)
elif not address.get('type'):
value = address.get(field, value)
return value
_columns = {
'name': fields.char('Bank Account'), # to be removed in v6.2 ?
'acc_number': fields.char('Account Number', size=64, required=True),
'bank': fields.many2one('res.bank', 'Bank'),
'bank_bic': fields.char('Bank Identifier Code', size=16),
'bank_name': fields.char('Bank Name'),
'owner_name': fields.char('Account Owner Name'),
'street': fields.char('Street'),
'zip': fields.char('Zip', change_default=True, size=24),
'city': fields.char('City'),
'country_id': fields.many2one('res.country', 'Country',
change_default=True),
'state_id': fields.many2one("res.country.state", 'Fed. State',
change_default=True, domain="[('country_id','=',country_id)]"),
'company_id': fields.many2one('res.company', 'Company',
ondelete='cascade', help="Only if this bank account belong to your company"),
'partner_id': fields.many2one('res.partner', 'Account Owner', ondelete='cascade', select=True, domain=['|',('is_company','=',True),('parent_id','=',False)]),
'state': fields.selection(_bank_type_get, 'Bank Account Type', required=True,
change_default=True),
'sequence': fields.integer('Sequence'),
'footer': fields.boolean("Display on Reports", help="Display this bank account on the footer of printed documents like invoices and sales orders.")
}
_defaults = {
'owner_name': lambda obj, cursor, user, context: obj._default_value(
cursor, user, 'name', context=context),
'street': lambda obj, cursor, user, context: obj._default_value(
cursor, user, 'street', context=context),
'city': lambda obj, cursor, user, context: obj._default_value(
cursor, user, 'city', context=context),
'zip': lambda obj, cursor, user, context: obj._default_value(
cursor, user, 'zip', context=context),
'country_id': lambda obj, cursor, user, context: obj._default_value(
cursor, user, 'country_id', context=context),
'state_id': lambda obj, cursor, user, context: obj._default_value(
cursor, user, 'state_id', context=context),
'name': '/'
}
def fields_get(self, cr, uid, allfields=None, context=None, write_access=True, attributes=None):
res = super(res_partner_bank, self).fields_get(cr, uid, allfields=allfields, context=context, write_access=write_access, attributes=attributes)
bank_type_obj = self.pool.get('res.partner.bank.type')
type_ids = bank_type_obj.search(cr, uid, [])
types = bank_type_obj.browse(cr, uid, type_ids)
for type in types:
for field in type.field_ids:
if field.name in res:
res[field.name].setdefault('states', {})
res[field.name]['states'][type.code] = [
('readonly', field.readonly),
('required', field.required)]
return res
def _prepare_name_get(self, cr, uid, bank_dicts, context=None):
""" Format the name of a res.partner.bank.
This function is designed to be inherited to add replacement fields.
:param bank_dicts: a list of res.partner.bank dicts, as returned by the method read()
:return: [(id, name), ...], as returned by the method name_get()
"""
# prepare a mapping {code: format_layout} for all bank types
bank_type_obj = self.pool.get('res.partner.bank.type')
bank_types = bank_type_obj.browse(cr, uid, bank_type_obj.search(cr, uid, []), context=context)
bank_code_format = dict((bt.code, bt.format_layout) for bt in bank_types)
res = []
for data in bank_dicts:
name = data['acc_number']
if data['state'] and bank_code_format.get(data['state']):
try:
if not data.get('bank_name'):
data['bank_name'] = _('BANK')
data = dict((k, v or '') for (k, v) in data.iteritems())
name = bank_code_format[data['state']] % data
except Exception:
raise osv.except_osv(_("Formating Error"), _("Invalid Bank Account Type Name format."))
res.append((data.get('id', False), name))
return res
def name_get(self, cr, uid, ids, context=None):
if not len(ids):
return []
bank_dicts = self.read(cr, uid, ids, self.fields_get_keys(cr, uid, context=context), context=context)
return self._prepare_name_get(cr, uid, bank_dicts, context=context)
def onchange_company_id(self, cr, uid, ids, company_id, context=None):
result = {}
if company_id:
c = self.pool.get('res.company').browse(cr, uid, company_id, context=context)
if c.partner_id:
r = self.onchange_partner_id(cr, uid, ids, c.partner_id.id, context=context)
r['value']['partner_id'] = c.partner_id.id
r['value']['footer'] = 1
result = r
return result
def onchange_bank_id(self, cr, uid, ids, bank_id, context=None):
result = {}
if bank_id:
bank = self.pool.get('res.bank').browse(cr, uid, bank_id, context=context)
result['bank_name'] = bank.name
result['bank_bic'] = bank.bic
return {'value': result}
def onchange_partner_id(self, cr, uid, ids, partner_id, context=None):
result = {}
if partner_id is not False:
# be careful: partner_id may be a NewId
part = self.pool['res.partner'].browse(cr, uid, [partner_id], context=context)
result['owner_name'] = part.name
result['street'] = part.street or False
result['city'] = part.city or False
result['zip'] = part.zip or False
result['country_id'] = part.country_id.id
result['state_id'] = part.state_id.id
return {'value': result}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
denovator/mochafac | lib/setuptools/tests/test_dist_info.py | 452 | 2615 | """Test .dist-info style distributions.
"""
import os
import shutil
import tempfile
import unittest
import textwrap
try:
import ast
except:
pass
import pkg_resources
from setuptools.tests.py26compat import skipIf
def DALS(s):
"dedent and left-strip"
return textwrap.dedent(s).lstrip()
class TestDistInfo(unittest.TestCase):
def test_distinfo(self):
dists = {}
for d in pkg_resources.find_distributions(self.tmpdir):
dists[d.project_name] = d
assert len(dists) == 2, dists
unversioned = dists['UnversionedDistribution']
versioned = dists['VersionedDistribution']
assert versioned.version == '2.718' # from filename
assert unversioned.version == '0.3' # from METADATA
@skipIf('ast' not in globals(),
"ast is used to test conditional dependencies (Python >= 2.6)")
def test_conditional_dependencies(self):
requires = [pkg_resources.Requirement.parse('splort==4'),
pkg_resources.Requirement.parse('quux>=1.1')]
for d in pkg_resources.find_distributions(self.tmpdir):
self.assertEqual(d.requires(), requires[:1])
self.assertEqual(d.requires(extras=('baz',)), requires)
self.assertEqual(d.extras, ['baz'])
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
versioned = os.path.join(self.tmpdir,
'VersionedDistribution-2.718.dist-info')
os.mkdir(versioned)
metadata_file = open(os.path.join(versioned, 'METADATA'), 'w+')
try:
metadata_file.write(DALS(
"""
Metadata-Version: 1.2
Name: VersionedDistribution
Requires-Dist: splort (4)
Provides-Extra: baz
Requires-Dist: quux (>=1.1); extra == 'baz'
"""))
finally:
metadata_file.close()
unversioned = os.path.join(self.tmpdir,
'UnversionedDistribution.dist-info')
os.mkdir(unversioned)
metadata_file = open(os.path.join(unversioned, 'METADATA'), 'w+')
try:
metadata_file.write(DALS(
"""
Metadata-Version: 1.2
Name: UnversionedDistribution
Version: 0.3
Requires-Dist: splort (==4)
Provides-Extra: baz
Requires-Dist: quux (>=1.1); extra == 'baz'
"""))
finally:
metadata_file.close()
def tearDown(self):
shutil.rmtree(self.tmpdir)
| apache-2.0 |
sfairhur/pycbc | pycbc/distributions/qnm.py | 8 | 10576 | # Copyright (C) 2018 Miriam Cabero, Collin Capano
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import re
import numpy
import pycbc
from pycbc import conversions, boundaries
from . import uniform, bounded
class UniformF0Tau(uniform.Uniform):
"""A distribution uniform in QNM frequency and damping time.
Constraints may be placed to exclude frequencies and damping times
corresponding to specific masses and spins.
To ensure a properly normalized pdf that accounts for the constraints
on final mass and spin, a renormalization factor is calculated upon
initialization. This is calculated numerically: f0 and tau are drawn
randomly, then the norm is scaled by the fraction of points that yield
final masses and spins within the constraints. The `norm_tolerance` keyword
arguments sets the error on the estimate of the norm from this numerical
method. If this value is too large, such that no points are found in
the allowed region, a ValueError is raised.
Parameters
----------
f0 : tuple or boundaries.Bounds
The range of QNM frequencies (in Hz).
tau : tuple or boundaries.Bounds
The range of QNM damping times (in s).
final_mass : tuple or boundaries.Bounds, optional
The range of final masses to allow. Default is [0,inf).
final_spin : tuple or boundaries.Bounds, optional
The range final spins to allow. Must be in [-0.996, 0.996], which is
the default.
rdfreq : str, optional
Use the given string as the name for the f0 parameter. Default is 'f0'.
damping_time : str, optional
Use the given string as the name for the tau parameter. Default is
'tau'.
norm_tolerance : float, optional
The tolerance on the estimate of the normalization. Default is 1e-3.
norm_seed : int, optional
Seed to use for the random number generator when estimating the norm.
Default is 0. After the norm is estimated, the random number generator
is set back to the state it was in upon initialization.
Examples
--------
Create a distribution:
>>> dist = UniformF0Tau(f0=(10., 2048.), tau=(1e-4,1e-2))
Check that all random samples drawn from the distribution yield final
masses > 1:
>>> from pycbc import conversions
>>> samples = dist.rvs(size=1000)
>>> (conversions.final_mass_from_f0_tau(samples['f0'],
samples['tau']) > 1.).all()
True
Create a distribution with tighter bounds on final mass and spin:
>>> dist = UniformF0Tau(f0=(10., 2048.), tau=(1e-4,1e-2),
final_mass=(20., 200.), final_spin=(0,0.996))
Check that all random samples drawn from the distribution are in the
final mass and spin constraints:
>>> samples = dist.rvs(size=1000)
>>> (conversions.final_mass_from_f0_tau(samples['f0'],
samples['tau']) >= 20.).all()
True
>>> (conversions.final_mass_from_f0_tau(samples['f0'],
samples['tau']) < 200.).all()
True
>>> (conversions.final_spin_from_f0_tau(samples['f0'],
samples['tau']) >= 0.).all()
True
>>> (conversions.final_spin_from_f0_tau(samples['f0'],
samples['tau']) < 0.996).all()
True
"""
name = 'uniform_f0_tau'
def __init__(self, f0=None, tau=None, final_mass=None, final_spin=None,
rdfreq='f0', damping_time='tau', norm_tolerance=1e-3,
norm_seed=0):
if f0 is None:
raise ValueError("must provide a range for f0")
if tau is None:
raise ValueError("must provide a range for tau")
self.rdfreq = rdfreq
self.damping_time = damping_time
parent_args = {rdfreq: f0, damping_time: tau}
super(UniformF0Tau, self).__init__(**parent_args)
if final_mass is None:
final_mass = (0., numpy.inf)
if final_spin is None:
final_spin = (-0.996, 0.996)
self.final_mass_bounds = boundaries.Bounds(
min_bound=final_mass[0], max_bound=final_mass[1])
self.final_spin_bounds = boundaries.Bounds(
min_bound=final_spin[0], max_bound=final_spin[1])
# Re-normalize to account for cuts: we'll do this by just sampling
# a large number of spaces f0 taus, and seeing how many are in the
# desired range.
# perseve the current random state
s = numpy.random.get_state()
numpy.random.seed(norm_seed)
nsamples = int(1./norm_tolerance**2)
draws = super(UniformF0Tau, self).rvs(size=nsamples)
# reset the random state
numpy.random.set_state(s)
num_in = self._constraints(draws).sum()
# if num_in is 0, than the requested tolerance is too large
if num_in == 0:
raise ValueError("the normalization is < then the norm_tolerance; "
"try again with a smaller nrom_tolerance")
self._lognorm += numpy.log(num_in) - numpy.log(nsamples)
self._norm = numpy.exp(self._lognorm)
def __contains__(self, params):
isin = super(UniformF0Tau, self).__contains__(params)
if isin:
isin &= self._constraints(params)
return isin
def _constraints(self, params):
f0 = params[self.rdfreq]
tau = params[self.damping_time]
# check if we need to specify a particular mode (l,m) != (2,2)
if re.match(r'f_\d{3}', self.rdfreq):
mode = self.rdfreq.strip('f_')
l, m = int(mode[0]), int(mode[1])
else:
l, m = 2, 2
# temporarily silence invalid warnings... these will just be ruled out
# automatically
orig = numpy.seterr(invalid='ignore')
mf = conversions.final_mass_from_f0_tau(f0, tau, l=l, m=m)
sf = conversions.final_spin_from_f0_tau(f0, tau, l=l, m=m)
isin = (self.final_mass_bounds.__contains__(mf)) & (
self.final_spin_bounds.__contains__(sf))
numpy.seterr(**orig)
return isin
def rvs(self, size=1):
"""Draw random samples from this distribution.
Parameters
----------
size : int, optional
The number of draws to do. Default is 1.
Returns
-------
array
A structured array of the random draws.
"""
size = int(size)
dtype = [(p, float) for p in self.params]
arr = numpy.zeros(size, dtype=dtype)
remaining = size
keepidx = 0
while remaining:
draws = super(UniformF0Tau, self).rvs(size=remaining)
mask = self._constraints(draws)
addpts = mask.sum()
arr[keepidx:keepidx+addpts] = draws[mask]
keepidx += addpts
remaining = size - keepidx
return arr
@classmethod
def from_config(cls, cp, section, variable_args):
"""Initialize this class from a config file.
Bounds on ``f0``, ``tau``, ``final_mass`` and ``final_spin`` should
be specified by providing ``min-{param}`` and ``max-{param}``. If
the ``f0`` or ``tau`` param should be renamed, ``rdfreq`` and
``damping_time`` should be provided; these must match
``variable_args``. If ``rdfreq`` and ``damping_time`` are not
provided, ``variable_args`` are expected to be ``f0`` and ``tau``.
Only ``min/max-f0`` and ``min/max-tau`` need to be provided.
Example:
.. code-block:: ini
[{section}-f0+tau]
name = uniform_f0_tau
min-f0 = 10
max-f0 = 2048
min-tau = 0.0001
max-tau = 0.010
min-final_mass = 10
Parameters
----------
cp : pycbc.workflow.WorkflowConfigParser
WorkflowConfigParser instance to read.
section : str
The name of the section to read.
variable_args : str
The name of the variable args. These should be separated by
``pycbc.VARARGS_DELIM``.
Returns
-------
UniformF0Tau :
This class initialized with the parameters provided in the config
file.
"""
tag = variable_args
variable_args = set(variable_args.split(pycbc.VARARGS_DELIM))
# get f0 and tau
f0 = bounded.get_param_bounds_from_config(cp, section, tag, 'f0')
tau = bounded.get_param_bounds_from_config(cp, section, tag, 'tau')
# see if f0 and tau should be renamed
if cp.has_option_tag(section, 'rdfreq', tag):
rdfreq = cp.get_opt_tag(section, 'rdfreq', tag)
else:
rdfreq = 'f0'
if cp.has_option_tag(section, 'damping_time', tag):
damping_time = cp.get_opt_tag(section, 'damping_time', tag)
else:
damping_time = 'tau'
# check that they match whats in the variable args
if not variable_args == set([rdfreq, damping_time]):
raise ValueError("variable args do not match rdfreq and "
"damping_time names")
# get the final mass and spin values, if provided
final_mass = bounded.get_param_bounds_from_config(
cp, section, tag, 'final_mass')
final_spin = bounded.get_param_bounds_from_config(
cp, section, tag, 'final_spin')
extra_opts = {}
if cp.has_option_tag(section, 'norm_tolerance', tag):
extra_opts['norm_tolerance'] = float(
cp.get_opt_tag(section, 'norm_tolerance', tag))
if cp.has_option_tag(section, 'norm_seed', tag):
extra_opts['norm_seed'] = int(
cp.get_opt_tag(section, 'norm_seed', tag))
return cls(f0=f0, tau=tau,
final_mass=final_mass, final_spin=final_spin,
rdfreq=rdfreq, damping_time=damping_time,
**extra_opts)
| gpl-3.0 |
Kongsea/tensorflow | tensorflow/python/kernel_tests/aggregate_ops_test.py | 53 | 5017 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for aggregate_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.core.framework import tensor_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class AddNTest(test.TestCase):
# AddN special-cases adding the first M inputs to make (N - M) divisible by 8,
# after which it adds the remaining (N - M) tensors 8 at a time in a loop.
# Test N in [1, 10] so we check each special-case from 1 to 9 and one
# iteration of the loop.
_MAX_N = 10
def _supported_types(self):
if test.is_gpu_available():
return [dtypes.float16, dtypes.float32, dtypes.float64, dtypes.complex64,
dtypes.complex128]
return [dtypes.int8, dtypes.int16, dtypes.int32, dtypes.int64,
dtypes.float16, dtypes.float32, dtypes.float64, dtypes.complex64,
dtypes.complex128]
def _buildData(self, shape, dtype):
data = np.random.randn(*shape).astype(dtype.as_numpy_dtype)
# For complex types, add an index-dependent imaginary component so we can
# tell we got the right value.
if dtype.is_complex:
return data + 10j * data
return data
def testAddN(self):
np.random.seed(12345)
with self.test_session(use_gpu=True) as sess:
for dtype in self._supported_types():
for count in range(1, self._MAX_N + 1):
data = [self._buildData((2, 2), dtype) for _ in range(count)]
actual = sess.run(math_ops.add_n(data))
expected = np.sum(np.vstack(
[np.expand_dims(d, 0) for d in data]), axis=0)
tol = 5e-3 if dtype == dtypes.float16 else 5e-7
self.assertAllClose(expected, actual, rtol=tol, atol=tol)
def testUnknownShapes(self):
np.random.seed(12345)
with self.test_session(use_gpu=True) as sess:
for dtype in self._supported_types():
data = self._buildData((2, 2), dtype)
for count in range(1, self._MAX_N + 1):
data_ph = array_ops.placeholder(dtype=dtype)
actual = sess.run(math_ops.add_n([data_ph] * count), {data_ph: data})
expected = np.sum(np.vstack([np.expand_dims(data, 0)] * count),
axis=0)
tol = 5e-3 if dtype == dtypes.float16 else 5e-7
self.assertAllClose(expected, actual, rtol=tol, atol=tol)
def testVariant(self):
def create_constant_variant(value):
return constant_op.constant(
tensor_pb2.TensorProto(
dtype=dtypes.variant.as_datatype_enum,
tensor_shape=tensor_shape.TensorShape([]).as_proto(),
variant_val=[
tensor_pb2.VariantTensorDataProto(
# Match registration in variant_op_registry.cc
type_name=b"int",
metadata=np.array(value, dtype=np.int32).tobytes())
]))
# TODO(ebrevdo): Re-enable use_gpu=True once non-DMA Variant
# copying between CPU and GPU is supported.
with self.test_session(use_gpu=False):
variant_const_3 = create_constant_variant(3)
variant_const_4 = create_constant_variant(4)
variant_const_5 = create_constant_variant(5)
# 3 + 3 + 5 + 4 = 15.
result = math_ops.add_n((variant_const_3, variant_const_3,
variant_const_5, variant_const_4))
# Smoke test -- ensure this executes without trouble.
# Right now, non-numpy-compatible objects cannot be returned from a
# session.run call; similarly, objects that can't be converted to
# native numpy types cannot be passed to ops.convert_to_tensor.
# For now, run the test and examine the output to see that the result is
# equal to 15.
result_op = logging_ops.Print(
result, [variant_const_3, variant_const_4, variant_const_5, result],
message=("Variants stored an int: c(3), c(4), c(5), "
"add_n(c(3), c(3), c(5), c(4)): ")).op
result_op.run()
if __name__ == "__main__":
test.main()
| apache-2.0 |
racker/protobuf | python/google/protobuf/reflection.py | 260 | 5864 | # Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# This code is meant to work on Python 2.4 and above only.
"""Contains a metaclass and helper functions used to create
protocol message classes from Descriptor objects at runtime.
Recall that a metaclass is the "type" of a class.
(A class is to a metaclass what an instance is to a class.)
In this case, we use the GeneratedProtocolMessageType metaclass
to inject all the useful functionality into the classes
output by the protocol compiler at compile-time.
The upshot of all this is that the real implementation
details for ALL pure-Python protocol buffers are *here in
this file*.
"""
__author__ = 'robinson@google.com (Will Robinson)'
from google.protobuf.internal import api_implementation
from google.protobuf import descriptor as descriptor_mod
_FieldDescriptor = descriptor_mod.FieldDescriptor
if api_implementation.Type() == 'cpp':
from google.protobuf.internal import cpp_message
_NewMessage = cpp_message.NewMessage
_InitMessage = cpp_message.InitMessage
else:
from google.protobuf.internal import python_message
_NewMessage = python_message.NewMessage
_InitMessage = python_message.InitMessage
class GeneratedProtocolMessageType(type):
"""Metaclass for protocol message classes created at runtime from Descriptors.
We add implementations for all methods described in the Message class. We
also create properties to allow getting/setting all fields in the protocol
message. Finally, we create slots to prevent users from accidentally
"setting" nonexistent fields in the protocol message, which then wouldn't get
serialized / deserialized properly.
The protocol compiler currently uses this metaclass to create protocol
message classes at runtime. Clients can also manually create their own
classes at runtime, as in this example:
mydescriptor = Descriptor(.....)
class MyProtoClass(Message):
__metaclass__ = GeneratedProtocolMessageType
DESCRIPTOR = mydescriptor
myproto_instance = MyProtoClass()
myproto.foo_field = 23
...
"""
# Must be consistent with the protocol-compiler code in
# proto2/compiler/internal/generator.*.
_DESCRIPTOR_KEY = 'DESCRIPTOR'
def __new__(cls, name, bases, dictionary):
"""Custom allocation for runtime-generated class types.
We override __new__ because this is apparently the only place
where we can meaningfully set __slots__ on the class we're creating(?).
(The interplay between metaclasses and slots is not very well-documented).
Args:
name: Name of the class (ignored, but required by the
metaclass protocol).
bases: Base classes of the class we're constructing.
(Should be message.Message). We ignore this field, but
it's required by the metaclass protocol
dictionary: The class dictionary of the class we're
constructing. dictionary[_DESCRIPTOR_KEY] must contain
a Descriptor object describing this protocol message
type.
Returns:
Newly-allocated class.
"""
descriptor = dictionary[GeneratedProtocolMessageType._DESCRIPTOR_KEY]
_NewMessage(descriptor, dictionary)
superclass = super(GeneratedProtocolMessageType, cls)
new_class = superclass.__new__(cls, name, bases, dictionary)
setattr(descriptor, '_concrete_class', new_class)
return new_class
def __init__(cls, name, bases, dictionary):
"""Here we perform the majority of our work on the class.
We add enum getters, an __init__ method, implementations
of all Message methods, and properties for all fields
in the protocol type.
Args:
name: Name of the class (ignored, but required by the
metaclass protocol).
bases: Base classes of the class we're constructing.
(Should be message.Message). We ignore this field, but
it's required by the metaclass protocol
dictionary: The class dictionary of the class we're
constructing. dictionary[_DESCRIPTOR_KEY] must contain
a Descriptor object describing this protocol message
type.
"""
descriptor = dictionary[GeneratedProtocolMessageType._DESCRIPTOR_KEY]
_InitMessage(descriptor, cls)
superclass = super(GeneratedProtocolMessageType, cls)
superclass.__init__(name, bases, dictionary)
| bsd-3-clause |
cinashp/whatsappbot | yowsup/layers/coder/tokendictionary.py | 14 | 11699 | class TokenDictionary:
def __init__(self):
self.dictionary = [
'',
'',
'',
'account',
'ack',
'action',
'active',
'add',
'after',
'all',
'allow',
'apple',
'audio',
'auth',
'author',
'available',
'bad-protocol',
'bad-request',
'before',
'bits',
'body',
'broadcast',
'cancel',
'category',
'challenge',
'chat',
'clean',
'code',
'composing',
'config',
'contacts',
'count',
'create',
'creation',
'debug',
'default',
'delete',
'delivery',
'delta',
'deny',
'digest',
'dirty',
'duplicate',
'elapsed',
'enable',
'encoding',
'encrypt',
'error',
'event',
'expiration',
'expired',
'fail',
'failure',
'false',
'favorites',
'feature',
'features',
'feature-not-implemented',
'field',
'file',
'filehash',
'first',
'free',
'from',
'g.us',
'gcm',
'get',
'google',
'group',
'groups',
'groups_v2',
'http://etherx.jabber.org/streams',
'http://jabber.org/protocol/chatstates',
'ib',
'id',
'image',
'img',
'index',
'internal-server-error',
'ip',
'iq',
'item-not-found',
'item',
'jabber:iq:last',
'jabber:iq:privacy',
'jabber:x:event',
'jid',
'kind',
'last',
'leave',
'list',
'max',
'mechanism',
'media',
'message_acks',
'message',
'method',
'microsoft',
'mimetype',
'missing',
'modify',
'msg',
'mute',
'name',
'nokia',
'none',
'not-acceptable',
'not-allowed',
'not-authorized',
'notification',
'notify',
'off',
'offline',
'order',
'owner',
'owning',
'p_o',
'p_t',
'paid',
'participant',
'participants',
'participating',
'paused',
'picture',
'pin',
'ping',
'pkmsg',
'platform',
'port',
'presence',
'preview',
'probe',
'prop',
'props',
'qcount',
'query',
'raw',
'read',
'readreceipts',
'reason',
'receipt',
'relay',
'remote-server-timeout',
'remove',
'request',
'required',
'resource-constraint',
'resource',
'response',
'result',
'retry',
'rim',
's_o',
's_t',
's.us',
's.whatsapp.net',
'seconds',
'server-error',
'server',
'service-unavailable',
'set',
'show',
'silent',
'size',
'skmsg',
'stat',
'state',
'status',
'stream:error',
'stream:features',
'subject',
'subscribe',
'success',
'sync',
't',
'text',
'timeout',
'timestamp',
'tizen',
'to',
'true',
'type',
'unavailable',
'unsubscribe',
'upgrade',
'uri',
'url',
'urn:ietf:params:xml:ns:xmpp-sasl',
'urn:ietf:params:xml:ns:xmpp-stanzas',
'urn:ietf:params:xml:ns:xmpp-streams',
'urn:xmpp:ping',
'urn:xmpp:whatsapp:account',
'urn:xmpp:whatsapp:dirty',
'urn:xmpp:whatsapp:mms',
'urn:xmpp:whatsapp:push',
'urn:xmpp:whatsapp',
'user',
'user-not-found',
'v',
'value',
'version',
'voip',
'w:g',
'w:p:r',
'w:p',
'w:profile:picture',
'w',
'wait',
'WAUTH-2',
'xmlns:stream',
'xmlns',
'1',
'chatstate',
'crypto',
'phash',
'enc',
'class',
'off_cnt',
'w:g2',
'promote',
'demote',
'creator',
'background',
'backoff',
'chunked',
'context',
'full',
'in',
'interactive',
'out',
'registration',
'sid',
'urn:xmpp:whatsapp:sync',
'flt',
's16',
'u8',
]
self.secondaryDictionary = [
'adpcm',
'amrnb',
'amrwb',
'mp3',
'pcm',
'qcelp',
'wma',
'h263',
'h264',
'jpeg',
'mpeg4',
'wmv',
'audio/3gpp',
'audio/aac',
'audio/amr',
'audio/mp4',
'audio/mpeg',
'audio/ogg',
'audio/qcelp',
'audio/wav',
'audio/webm',
'audio/x-caf',
'audio/x-ms-wma',
'image/gif',
'image/jpeg',
'image/png',
'video/3gpp',
'video/avi',
'video/mp4',
'video/mpeg',
'video/quicktime',
'video/x-flv',
'video/x-ms-asf',
'302',
'400',
'401',
'402',
'403',
'404',
'405',
'406',
'407',
'409',
'410',
'500',
'501',
'503',
'504',
'abitrate',
'acodec',
'app_uptime',
'asampfmt',
'asampfreq',
'clear',
'conflict',
'conn_no_nna',
'cost',
'currency',
'duration',
'extend',
'fps',
'g_notify',
'g_sound',
'gone',
'google_play',
'hash',
'height',
'invalid',
'jid-malformed',
'latitude',
'lc',
'lg',
'live',
'location',
'log',
'longitude',
'max_groups',
'max_participants',
'max_subject',
'mode',
'napi_version',
'normalize',
'orighash',
'origin',
'passive',
'password',
'played',
'policy-violation',
'pop_mean_time',
'pop_plus_minus',
'price',
'pricing',
'redeem',
'Replaced by new connection',
'resume',
'signature',
'sound',
'source',
'system-shutdown',
'username',
'vbitrate',
'vcard',
'vcodec',
'video',
'width',
'xml-not-well-formed',
'checkmarks',
'image_max_edge',
'image_max_kbytes',
'image_quality',
'ka',
'ka_grow',
'ka_shrink',
'newmedia',
'library',
'caption',
'forward',
'c0',
'c1',
'c2',
'c3',
'clock_skew',
'cts',
'k0',
'k1',
'login_rtt',
'm_id',
'nna_msg_rtt',
'nna_no_off_count',
'nna_offline_ratio',
'nna_push_rtt',
'no_nna_con_count',
'off_msg_rtt',
'on_msg_rtt',
'stat_name',
'sts',
'suspect_conn',
'lists',
'self',
'qr',
'web',
'w:b',
'recipient',
'w:stats',
'forbidden',
'max_list_recipients',
'en-AU',
'en-GB',
'es-MX',
'pt-PT',
'zh-Hans',
'zh-Hant',
'relayelection',
'relaylatency',
'interruption',
'Bell.caf',
'Boing.caf',
'Glass.caf',
'Harp.caf',
'TimePassing.caf',
'Tri-tone.caf',
'Xylophone.caf',
'aurora.m4r',
'bamboo.m4r',
'chord.m4r',
'circles.m4r',
'complete.m4r',
'hello.m4r',
'input.m4r',
'keys.m4r',
'note.m4r',
'popcorn.m4r',
'pulse.m4r',
'synth.m4r',
'Apex.m4r',
'Beacon.m4r',
'Bulletin.m4r',
'By The Seaside.m4r',
'Chimes.m4r',
'Circuit.m4r',
'Constellation.m4r',
'Cosmic.m4r',
'Crystals.m4r',
'Hillside.m4r',
'Illuminate.m4r',
'Night Owl.m4r',
'Opening.m4r',
'Playtime.m4r',
'Presto.m4r',
'Radar.m4r',
'Radiate.m4r',
'Ripples.m4r',
'Sencha.m4r',
'Signal.m4r',
'Silk.m4r',
'Slow Rise.m4r',
'Stargaze.m4r',
'Summit.m4r',
'Twinkle.m4r',
'Uplift.m4r',
'Waves.m4r',
'eligible',
'planned',
'current',
'future',
'disable',
'expire',
'start',
'stop',
'accuracy',
'speed',
'bearing',
'recording',
'key',
'identity',
'w:gp2',
'admin',
'locked',
'unlocked',
'new',
'battery',
'archive',
'adm',
'plaintext_size',
'plaintext_disabled',
'plaintext_reenable_threshold',
'compressed_size',
'delivered',
'everyone',
'transport',
'mspes',
'e2e_groups',
'e2e_images',
'encr_media',
'encrypt_v2',
'encrypt_image',
'encrypt_sends_push',
'force_long_connect',
'audio_opus',
'video_max_edge',
'call-id',
'call',
'preaccept',
'accept',
'offer',
'reject',
'busy',
'te',
'terminate',
'begin',
'end',
'opus',
'rtt',
'token',
'priority',
'p2p',
'rate',
'amr',
'ptt',
'srtp',
'os',
'browser',
'encrypt_group_gen2'
]
def getToken(self, index, secondary = False):
targetDict = self.dictionary
if secondary:
targetDict = self.secondaryDictionary
elif index > 236 and index < (236 + len(self.secondaryDictionary)):
targetDict = self.secondaryDictionary
index = index - 237
if index < 0 or index > len(targetDict) - 1:
return None
return targetDict[index]
def getIndex(self, token):
if token in self.dictionary:
return (self.dictionary.index(token), False)
elif token in self.secondaryDictionary:
return (self.secondaryDictionary.index(token), True)
return None
| gpl-3.0 |
chromaway/ngcccbase | ngcccbase/p2ptrade/tests/test_ewctrl.py | 4 | 4017 |
import unittest
from coloredcoinlib import (
ZeroSelectError, SimpleColorValue, UNCOLORED_MARKER, ColorDefinition
)
from ngcccbase.pwallet import PersistentWallet
from ngcccbase.txcons import InsufficientFundsError, RawTxSpec
from ngcccbase.wallet_controller import WalletController
from ngcccbase.p2ptrade.ewctrl import EWalletController, OperationalETxSpec
from ngcccbase.p2ptrade.protocol_objects import ETxSpec, MyEOffer
class TestEWalletController(unittest.TestCase):
def setUp(self):
self.pwallet = PersistentWallet(None, True)
self.pwallet.init_model()
self.model = self.pwallet.get_model()
adm = self.model.get_asset_definition_manager()
# make sure you have the asset 'testobc' in your testnet.wallet !!
self.asset = adm.get_asset_by_moniker('testobc')
self.color_spec = self.asset.get_color_set().get_data()[0]
self.wc = WalletController(self.model)
self.ewc = EWalletController(self.model, self.wc)
def null(a):
pass
self.wc.publish_tx = null
def test_resolve_color_spec(self):
self.cd =self.ewc.resolve_color_spec('')
self.assertRaises(KeyError, self.ewc.resolve_color_spec, 'nonexistent')
self.assertTrue(isinstance(self.cd, ColorDefinition))
self.assertEqual(self.cd.get_color_id(), 0)
def test_select_inputs(self):
cv = SimpleColorValue(colordef=UNCOLORED_MARKER, value=10000000000000)
self.assertRaises(InsufficientFundsError, self.ewc.select_inputs, cv)
def test_tx_spec(self):
alice_cv = { 'color_spec' : self.color_spec, 'value' : 10 }
bob_cv = { 'color_spec' : "", 'value' : 500 }
alice_offer = MyEOffer(None, alice_cv, bob_cv)
bob_offer = MyEOffer(None, bob_cv, alice_cv)
bob_etx = self.ewc.make_etx_spec(bob_cv, alice_cv)
self.assertTrue(isinstance(bob_etx, ETxSpec))
for target in bob_etx.targets:
# check address
address = target[0]
self.assertTrue(isinstance(address, type(u"unicode")))
# TODO check address is correct format
# check color_spec
color_spec = target[1]
self.assertTrue(isinstance(color_spec, type("str")))
color_spec_parts = len(color_spec.split(":"))
self.assertTrue(color_spec_parts == 4 or color_spec_parts == 1)
# check value
value = target[2]
self.assertTrue(isinstance(value, type(10)))
signed = self.ewc.make_reply_tx(bob_etx, alice_cv, bob_cv)
self.assertTrue(isinstance(signed, RawTxSpec))
self.ewc.publish_tx(signed, alice_offer)
alice_etx = self.ewc.make_etx_spec(alice_cv, bob_cv)
self.assertTrue(isinstance(alice_etx, ETxSpec))
for target in alice_etx.targets:
# check address
address = target[0]
self.assertTrue(isinstance(address, type(u"unicode")))
# TODO check address is correct format
# check color_spec
color_spec = target[1]
self.assertTrue(isinstance(color_spec, type("str")))
color_spec_parts = len(color_spec.split(":"))
self.assertTrue(color_spec_parts == 4 or color_spec_parts == 1)
# check value
value = target[2]
self.assertTrue(isinstance(value, type(10)))
signed = self.ewc.make_reply_tx(alice_etx, bob_cv, alice_cv)
self.assertTrue(isinstance(signed, RawTxSpec))
oets = OperationalETxSpec(self.model, self.ewc)
oets.set_our_value_limit(bob_cv)
oets.prepare_inputs(alice_etx)
zero = SimpleColorValue(colordef=UNCOLORED_MARKER, value=0)
self.assertRaises(ZeroSelectError, oets.select_coins, zero)
toomuch = SimpleColorValue(colordef=UNCOLORED_MARKER, value=10000000000000)
self.assertRaises(InsufficientFundsError, oets.select_coins, toomuch)
if __name__ == '__main__':
unittest.main()
| mit |
arpho/mmasgis5 | Albero/progressBar.py | 2 | 1482 | from Ui_progressBar import *
from PyQt4 import QtCore,QtGui
from PyQt4.QtGui import QMainWindow
from PyQt4.QtCore import pyqtSignature
class ProgressBarWindow(QMainWindow, Ui_ProgressBar):
""" wrapper di Ui_Progressbar
"""
def __init__(self,visible1, visible2,parent=None):
"""
@param bool: True per visualizzare la progressBar per l'avanzamento complessivo
@param bool: True per visualizzare la progresBar per l'avanzamento parziale
"""
QMainWindow.__init__(self,parent)
#self.w=Ui_ProgressBar()
self.setupUi(self)
self.progressBar_1.setVisible(visible1)
self.progressBar_2.setVisible(visible2)
#self.progressBar_1.setMaximum(100)
#self.progressBar_1.setValue(40)
def setMaximumPartial(self,max):
"""
setta il valore massimo in progressBarOverall
@param int:
"""
self.progressBar_2.setMaximum(max)
def setMaximumOverall(self,max):
"""
setta il valore massimo in progressBarPartial
@param int:
"""
self.progressBar_1.setMaximum(max)
def setValueOverall(self,v):
"""setta il valore della progressBar in alto, indicante l'avanzamento complessivo
@param int:
"""
self.progressBar_1.setValue(v)
def setValueParticular(self,v):
"""setta il valore della progressBar in basso, indicante l'avanzamento complessivo
@param int:
"""
self.progressBar_2.setValue(v)
def setValueLabel(self,txt):
""" setta il testo della label nella finestra
@param string
"""
self.label.setText(txt)
| mit |
Trust-Code/addons-yelizariev | res_users_signature/res_users_signature_models.py | 16 | 7872 | from openerp.osv import fields as old_fields
from openerp import api,models,fields,tools
try:
from openerp.addons.email_template.email_template import mako_template_env
except ImportError:
try:
from openerp.addons.mail.mail_template import mako_template_env
except ImportError:
pass
from openerp.loglevels import ustr
from email.mime.text import MIMEText
from email.mime.base import MIMEBase
from email.mime.multipart import MIMEMultipart
from email.charset import Charset
from email.header import Header
from email.utils import formatdate, make_msgid, COMMASPACE, parseaddr
from email import Encoders
import openerp.tools as tools
from openerp.tools.translate import _
from openerp.tools import html2text
import openerp.tools as tools
import re
import base64
from openerp.addons.base.ir.ir_mail_server import encode_rfc2822_address_header, encode_header, encode_header_param
class res_users(models.Model):
_inherit = 'res.users'
signature_id = fields.Many2one('res.users.signature', string='Signature template', help='Keep empty to edit signature manually')
_columns = {
'signature': old_fields.html('Signature', sanitize=False)
}
@api.one
@api.onchange('signature_id')
def render_signature_id(self):
if not self.signature_id:
return
mako = mako_template_env.from_string(tools.ustr(self.signature_id.template))
html = mako.render({'user':self})
if html != self.signature:
self.signature = html
@api.one
def write(self, vals):
res = super(res_users, self).write(vals)
if any([k in vals for k in ['company_id']]):
self.render_signature_id()
return res
class res_users_signature(models.Model):
_name = 'res.users.signature'
name = fields.Char('Name')
comment = fields.Text('Internal note')
template = fields.Html('Template', sanitize=False, help='''You can use variables:
* ${user.name}
* ${user.function} (job position)
* ${user.partner_id.company_id.name} (company in a partner form)
* ${user.company_id.name} (current company)
* ${user.email}
* ${user.phone}
* ${user.mobile}
* etc. (contact your administrator for further information)
You can use control structures:
% if user.mobile
Mobile: ${user.mobile}
% endif
''')
user_ids = fields.One2many('res.users', 'signature_id', string='Users')
@api.one
def write(self, vals):
res = super(res_users_signature, self).write(vals)
self.action_update_signature()
return res
@api.one
def action_update_signature(self):
self.user_ids.render_signature_id()
class res_partner(models.Model):
_inherit = 'res.partner'
@api.one
def write(self, vals):
res = super(res_partner, self).write(vals)
if self.user_ids:
self.user_ids.render_signature_id()
return res
class ir_mail_server(models.Model):
_inherit = "ir.mail_server"
def build_email(self, email_from, email_to, subject, body, email_cc=None, email_bcc=None, reply_to=False,
attachments=None, message_id=None, references=None, object_id=False, subtype='plain', headers=None,
body_alternative=None, subtype_alternative='plain'):
""" copy-pasted from openerp/addons/base/ir/ir_mail_server.py::build_email """
ftemplate = '__image-%s__'
fcounter = 0
attachments = attachments or []
pattern = re.compile(r'"data:image/png;base64,[^"]*"')
pos = 0
new_body = ''
while True:
match = pattern.search(body, pos)
if not match:
break
s = match.start()
e = match.end()
data = body[s+len('"data:image/png;base64,'):e-1]
new_body += body[pos:s]
fname = ftemplate % fcounter
fcounter += 1
attachments.append( (fname, base64.b64decode(data)) )
new_body += '"cid:%s"' % fname
pos = e
new_body += body[pos:]
body = new_body
email_from = email_from or tools.config.get('email_from')
assert email_from, "You must either provide a sender address explicitly or configure "\
"a global sender address in the server configuration or with the "\
"--email-from startup parameter."
# Note: we must force all strings to to 8-bit utf-8 when crafting message,
# or use encode_header() for headers, which does it automatically.
headers = headers or {} # need valid dict later
if not email_cc: email_cc = []
if not email_bcc: email_bcc = []
if not body: body = u''
email_body_utf8 = ustr(body).encode('utf-8')
email_text_part = MIMEText(email_body_utf8, _subtype=subtype, _charset='utf-8')
msg = MIMEMultipart()
if not message_id:
if object_id:
message_id = tools.generate_tracking_message_id(object_id)
else:
message_id = make_msgid()
msg['Message-Id'] = encode_header(message_id)
if references:
msg['references'] = encode_header(references)
msg['Subject'] = encode_header(subject)
msg['From'] = encode_rfc2822_address_header(email_from)
del msg['Reply-To']
if reply_to:
msg['Reply-To'] = encode_rfc2822_address_header(reply_to)
else:
msg['Reply-To'] = msg['From']
msg['To'] = encode_rfc2822_address_header(COMMASPACE.join(email_to))
if email_cc:
msg['Cc'] = encode_rfc2822_address_header(COMMASPACE.join(email_cc))
if email_bcc:
msg['Bcc'] = encode_rfc2822_address_header(COMMASPACE.join(email_bcc))
msg['Date'] = formatdate()
# Custom headers may override normal headers or provide additional ones
for key, value in headers.iteritems():
msg[ustr(key).encode('utf-8')] = encode_header(value)
if subtype == 'html' and not body_alternative and html2text:
# Always provide alternative text body ourselves if possible.
text_utf8 = tools.html2text(email_body_utf8.decode('utf-8')).encode('utf-8')
alternative_part = MIMEMultipart(_subtype="alternative")
alternative_part.attach(MIMEText(text_utf8, _charset='utf-8', _subtype='plain'))
alternative_part.attach(email_text_part)
msg.attach(alternative_part)
elif body_alternative:
# Include both alternatives, as specified, within a multipart/alternative part
alternative_part = MIMEMultipart(_subtype="alternative")
body_alternative_utf8 = ustr(body_alternative).encode('utf-8')
alternative_body_part = MIMEText(body_alternative_utf8, _subtype=subtype_alternative, _charset='utf-8')
alternative_part.attach(alternative_body_part)
alternative_part.attach(email_text_part)
msg.attach(alternative_part)
else:
msg.attach(email_text_part)
if attachments:
for (fname, fcontent) in attachments:
filename_rfc2047 = encode_header_param(fname)
part = MIMEBase('application', "octet-stream")
# The default RFC2231 encoding of Message.add_header() works in Thunderbird but not GMail
# so we fix it by using RFC2047 encoding for the filename instead.
part.set_param('name', filename_rfc2047)
part.add_header('Content-Disposition', 'attachment', filename=filename_rfc2047)
part.add_header('Content-ID', '<%s>' % filename_rfc2047) # NEW STUFF
part.set_payload(fcontent)
Encoders.encode_base64(part)
msg.attach(part)
return msg
| lgpl-3.0 |
ryfeus/lambda-packs | Tensorflow/source/tensorflow/contrib/boosted_trees/lib/learner/batch/base_split_handler.py | 53 | 6483 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base class for creating split nodes using one or more features."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
from tensorflow.contrib.boosted_trees.python.ops import batch_ops_utils
from tensorflow.python.ops import control_flow_ops
class BaseSplitHandler(object):
"""Abstract Base class defining split handlers interface."""
__metaclass__ = abc.ABCMeta
def __init__(self,
l1_regularization,
l2_regularization,
tree_complexity_regularization,
min_node_weight,
feature_column_group_id,
gradient_shape,
hessian_shape,
multiclass_strategy,
name=None):
"""Constructor for BaseSplitHandler.
Args:
l1_regularization: L1 regularization applied for this split handler.
l2_regularization: L2 regularization applied for this split handler.
tree_complexity_regularization: Tree complexity regularization applied
for this split handler.
min_node_weight: Minimum sum of weights of examples in each partition to
be considered for splitting.
feature_column_group_id: Feature column group index.
gradient_shape: A TensorShape, containing shape of gradients.
hessian_shape: A TensorShape, containing shape of hessians.
multiclass_strategy: Strategy describing how to treat multiclass problems.
name: An optional handler name.
"""
self._l1_regularization = l1_regularization
self._l2_regularization = l2_regularization
self._tree_complexity_regularization = tree_complexity_regularization
self._min_node_weight = min_node_weight
self._feature_column_group_id = feature_column_group_id
self._name = name or ""
self._multiclass_strategy = multiclass_strategy
self._hessian_shape = hessian_shape
self._gradient_shape = gradient_shape
def scheduled_reads(self):
"""Returns the list of `ScheduledOp`s required for update_stats."""
return []
@abc.abstractmethod
def update_stats(self, stamp_token, example_partition_ids, gradients,
hessians, empty_gradients, empty_hessians, weights,
is_active, scheduled_reads):
"""Updates the state for this split handler.
Args:
stamp_token: An int32 scalar tensor containing the current stamp token.
example_partition_ids: A dense tensor, containing an int32 for each
example which is the partition id that the example ends up in.
gradients: A dense tensor of gradients.
hessians: A dense tensor of hessians.
empty_gradients: A dense empty tensor of the same shape (for dimensions >
0) as gradients.
empty_hessians: A dense empty tensor of the same shape (for dimensions >
0) as hessians.
weights: A dense float32 tensor with a weight for each example.
is_active: A boolean tensor that says if this handler is active or not.
One value for the current layer and one value for the next layer.
scheduled_reads: List of results from the scheduled reads.
Returns:
A tuple of the op that updates the stats for this handler and a list of
`ScheduledOp`s.
"""
def update_stats_sync(self, stamp_token, example_partition_ids, gradients,
hessians, empty_gradients, empty_hessians, weights,
is_active):
"""Updates the state for this split handler running the scheduled I/O.
Args:
stamp_token: An int32 scalar tensor containing the current stamp token.
example_partition_ids: A dense tensor, containing an int32 for each
example which is the partition id that the example ends up in.
gradients: A dense tensor of gradients.
hessians: A dense tensor of hessians.
empty_gradients: A dense empty tensor of the same shape (for dimensions >
0) as gradients.
empty_hessians: A dense empty tensor of the same shape (for dimensions >
0) as hessians.
weights: A dense float32 tensor with a weight for each example.
is_active: A boolean tensor that says if this handler is active or not.
One value for the current layer and one value for the next layer.
Returns:
Op that updates the stats for this handler.
"""
handler_reads = {self: self.scheduled_reads()}
handler_results = batch_ops_utils.run_handler_scheduled_ops(
handler_reads, stamp_token, None)
update_1, scheduled_updates = self.update_stats(
stamp_token, example_partition_ids, gradients, hessians,
empty_gradients, empty_hessians, weights, is_active,
handler_results[self])
update_2 = batch_ops_utils.run_handler_scheduled_ops({
self: scheduled_updates
}, stamp_token, None)
return control_flow_ops.group(update_1, *update_2[self])
@abc.abstractmethod
def make_splits(self, stamp_token, next_stamp_token, class_id):
"""Create the best split using the accumulated stats and flush the state.
This should only be called by the master.
Args:
stamp_token: An int32 scalar tensor containing the current stamp token.
next_stamp_token: An int32 scalar tensor containing the stamp token for
the next iteration.
class_id: what class id the handler gathers stats for (for tree per class
strategy). When class_id=-1, the strategy is not tree per class.
Returns:
A tuple (are_splits_ready, partition_id, gain, split_info) where
are_splits_ready is a scalar boolean tensor, partition_id is a rank 1,
int32 tensor, gain is a rank 1 float32 tensor and split_info is a rank 1
string tensor containing serialized SplitInfo protos.
"""
| mit |
ssdongdongwang/arisgames | zxing-master/cpp/scons/scons-local-2.0.0.final.0/SCons/Tool/hplink.py | 34 | 2388 | """SCons.Tool.hplink
Tool-specific initialization for the HP linker.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/hplink.py 5023 2010/06/14 22:05:46 scons"
import os
import os.path
import SCons.Util
import link
ccLinker = None
# search for the acc compiler and linker front end
try:
dirs = os.listdir('/opt')
except (IOError, OSError):
# Not being able to read the directory because it doesn't exist
# (IOError) or isn't readable (OSError) is okay.
dirs = []
for dir in dirs:
linker = '/opt/' + dir + '/bin/aCC'
if os.path.exists(linker):
ccLinker = linker
break
def generate(env):
"""
Add Builders and construction variables for Visual Age linker to
an Environment.
"""
link.generate(env)
env['LINKFLAGS'] = SCons.Util.CLVar('-Wl,+s -Wl,+vnocompatwarnings')
env['SHLINKFLAGS'] = SCons.Util.CLVar('$LINKFLAGS -b')
env['SHLIBSUFFIX'] = '.sl'
def exists(env):
return ccLinker
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| mit |
ClaudioNahmad/Servicio-Social | Parametros/CosmoMC/prerrequisitos/plc-2.0/build/pyfits-3.2.2/lib/pyfits/scripts/fitsdiff.py | 3 | 9985 | import glob
import logging
import optparse
import os
import sys
import textwrap
import pyfits.diff
from pyfits.util import fill
log = logging.getLogger('fitsdiff')
USAGE = """
Compare two FITS image files and report the differences in header keywords and
data.
fitsdiff [options] filename1 filename2
where filename1 filename2 are the two files to be compared. They may also be
wild cards, in such cases, they must be enclosed by double or single quotes, or
they may be directory names. If both are directory names, all files in each of
the directories will be included; if only one is directory name, then the
directory name will be prefixed to the file name(s) specified by the other
argument. for example::
fitsdiff "*.fits" "/machine/data1"
will compare all FITS files in the current directory to the corresponding files
in the directory /machine/data1.
""".strip()
EPILOG = """
If the two files are identical within the specified conditions, it will report
"No difference is found." If the value(s) of -c and -k takes the form
'@filename', list is in the text file 'filename', and each line in that text
file contains one keyword.
Example
-------
fitsdiff -k filename,filtnam1 -n 5 -d 1.e-6 test1.fits test2
This command will compare files test1.fits and test2.fits, report maximum of 5
different pixels values per extension, only report data values larger than
1.e-6 relative to each other, and will neglect the different values of keywords
FILENAME and FILTNAM1 (or their very existence).
fitsdiff commandline arguments can also be set using the environment variable
FITSDIFF_SETTINGS. If the FITSDIFF_SETTINGS environment variable is present,
each argument present will override the corresponding argument on the
commandline. This environment variable exists to make it easier to change the
behavior of fitsdiff on a global level, such as in a set of regression tests.
""".strip()
class HelpFormatter(optparse.TitledHelpFormatter):
def format_epilog(self, epilog):
return '\n%s\n' % fill(epilog, self.width)
def handle_options(argv=None):
# This is a callback--less trouble than actually adding a new action type
def store_list(option, opt, value, parser):
setattr(parser.values, option.dest, [])
# Accept either a comma-separated list or a filename (starting with @)
# containing a value on each line
if value and value[0] == '@':
value = value[1:]
if not os.path.exists(value):
log.warning('%s argument %s does not exist' % (opt, value))
return
try:
values = [v.strip() for v in open(value, 'r').readlines()]
setattr(parser.values, option.dest, values)
except IOError, e:
log.warning('reading %s for %s failed: %s; ignoring this '
'argument' % (value, opt, e))
else:
setattr(parser.values, option.dest,
[v.strip() for v in value.split(',')])
parser = optparse.OptionParser(usage=USAGE, epilog=EPILOG,
formatter=HelpFormatter())
parser.add_option(
'-q', '--quiet', action='store_true',
help='Produce no output and just return a status code.')
parser.add_option(
'-n', '--num-diffs', type='int', default=10, dest='numdiffs',
metavar='INTEGER',
help='Max number of data differences (image pixel or table element) '
'to report per extension (default %default).')
parser.add_option(
'-d', '--difference-tolerance', type='float', default=0.,
dest='tolerance', metavar='NUMBER',
help='The relative tolerance for comparison of two numbers, '
'specifically two floating point numbers. This applies to data '
'in both images and tables, and to floating point keyword values '
'in headers (default %default).')
parser.add_option(
'-b', '--no-ignore-blanks', action='store_false', dest='ignore_blanks',
default=True,
help="Don't ignore trailing blanks (whitespace) in string values. "
"Otherwise trailing blanks both in header keywords/values and in "
"table column values) are not treated as significant i.e. "
"without this option 'ABC ' and 'ABC' are considered "
"equivalent.")
parser.add_option(
'--no-ignore-blank-cards', action='store_false',
dest='ignore_blank_cards', default=True,
help="Don't ignore entirey blank cards in headers. Normally fitsdiff "
"does not consider blank cards when comparing headers, but this "
"will ensure that even blank cards match up.")
parser.add_option(
'-o', '--output-file', metavar='FILE',
help='Output results to this file; otherwise results are printed to '
'stdout.')
group = optparse.OptionGroup(parser, 'Header Comparison Options')
group.add_option(
'-k', '--ignore-keywords', action='callback', callback=store_list,
nargs=1, type='str', default=[], dest='ignore_keywords',
metavar='KEYWORDS',
help='Comma-separated list of keywords not to be compared. Keywords '
'may contain wildcard patterns. To exclude all keywords, use '
'"*"; make sure to have double or single quotes around the '
'asterisk.')
group.add_option(
'-c', '--ignore-comments', action='callback', callback=store_list,
nargs=1, type='str', default=[], dest='ignore_comments',
metavar='KEYWORDS',
help='Comma-separated list of keywords whose comments will not be '
'compared. Wildcards may be used as with --ignore-keywords.')
parser.add_option_group(group)
group = optparse.OptionGroup(parser, 'Table Comparison Options')
group.add_option(
'-f', '--ignore-fields', action='callback', callback=store_list,
nargs=1, type='str', default=[], dest='ignore_fields',
metavar='COLUMNS',
help='Comma-separated list of fields (i.e. columns) not to be '
'compared. All columns may be excluded using "*" as with '
'--ignore-keywords.')
parser.add_option_group(group)
options, args = parser.parse_args(argv)
# Determine which filenames to compare
if len(args) != 2:
parser.error('\n' + textwrap.fill(
'fitsdiff requires two arguments; see `fitsdiff --help` for more '
'details.', parser.formatter.width))
return options, args
def setup_logging(outfile=None):
log.setLevel(logging.INFO)
error_handler = logging.StreamHandler(sys.stderr)
error_handler.setFormatter(logging.Formatter('%(levelname)s: %(message)s'))
error_handler.setLevel(logging.WARNING)
log.addHandler(error_handler)
if outfile is not None:
output_handler = logging.FileHandler(outfile)
else:
output_handler = logging.StreamHandler()
class LevelFilter(logging.Filter):
"""Log only messages matching the specified level."""
def __init__(self, name='', level=logging.NOTSET):
logging.Filter.__init__(self, name)
self.level = level
def filter(self, rec):
return rec.levelno == self.level
# File output logs all messages, but stdout logs only INFO messages
# (since errors are already logged to stderr)
output_handler.addFilter(LevelFilter(level=logging.INFO))
output_handler.setFormatter(logging.Formatter('%(message)s'))
log.addHandler(output_handler)
def match_files(paths):
filelists = []
for path in paths:
if glob.has_magic(path):
files = [os.path.abspath(f) for f in glob.glob(path)]
if not files:
log.error(
'Wildcard pattern %r did not match any files.' % path)
sys.exit(2)
filelists.append(files)
elif os.path.isdir(path):
filelists.append([os.path.abspath(f) for f in os.listdir(path)])
elif os.path.isfile(path):
filelists.append([path])
else:
log.error(
'%r is not an existing file, directory, or wildcard pattern; '
'see `fitsdiff --help` for more usage help.' % path)
sys.exit(2)
filelists[0].sort()
filelists[1].sort()
for a, b in [(0, 1), (1, 0)]:
if len(filelists[a]) > len(filelists[b]):
for extra in filelists[a][len(filelists[b]):]:
log.warning('%r has no match in %r' % (extra, paths[b]))
filelists[a] = filelists[a][:len(filelists[b])]
break
return zip(*filelists)
def main():
if 'FITSDIFF_SETTINGS' in os.environ:
argv = os.environ['FITSDIFF_SETTINGS'].split() + sys.argv[1:]
else:
argv = sys.argv[1:]
opts, args = handle_options(argv)
if not opts.quiet:
setup_logging(opts.output_file)
files = match_files(args)
close_file = False
if opts.quiet:
out_file = None
elif opts.output_file:
out_file = open(opts.output_file, 'wb')
close_file = True
else:
out_file = sys.stdout
identical = []
try:
for a, b in files:
# TODO: pass in any additonal arguments here too
diff = pyfits.diff.FITSDiff(
a, b, ignore_keywords=opts.ignore_keywords,
ignore_comments=opts.ignore_comments,
ignore_fields=opts.ignore_fields, numdiffs=opts.numdiffs,
tolerance=opts.tolerance, ignore_blanks=opts.ignore_blanks,
ignore_blank_cards=opts.ignore_blank_cards)
diff.report(fileobj=out_file)
identical.append(diff.identical)
return int(not all(identical))
finally:
if close_file:
out_file.close()
| gpl-3.0 |
jrha/aquilon | lib/python2.6/aquilon/worker/commands/search_host.py | 2 | 13235 | # -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2008,2009,2010,2011,2012,2013 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains the logic for `aq search host`."""
from sqlalchemy.orm import aliased, contains_eager
from sqlalchemy.sql import and_, or_
from aquilon.exceptions_ import NotFoundException
from aquilon.aqdb.model import (Host, Cluster, Archetype, Personality,
PersonalityGrnMap, HostGrnMap, HostLifecycle,
OperatingSystem, Service, Share, VirtualDisk,
Disk, Machine, Model, DnsRecord, ARecord, Fqdn,
DnsDomain, Interface, AddressAssignment,
NetworkEnvironment, Network, MetaCluster,
VirtualMachine, ClusterResource)
from aquilon.aqdb.model.dns_domain import parse_fqdn
from aquilon.worker.broker import BrokerCommand # pylint: disable=W0611
from aquilon.worker.formats.host import SimpleHostList
from aquilon.worker.dbwrappers.service_instance import get_service_instance
from aquilon.worker.dbwrappers.branch import get_branch_and_author
from aquilon.worker.dbwrappers.grn import lookup_grn
from aquilon.worker.dbwrappers.location import get_location
from aquilon.worker.dbwrappers.network import get_network_byip
from aquilon.worker.dbwrappers.user_principal import get_user_principal
class CommandSearchHost(BrokerCommand):
required_parameters = []
def render(self, session, logger, hostname, machine, archetype,
buildstatus, personality, osname, osversion, service, instance,
model, machine_type, vendor, serial, cluster,
guest_on_cluster, guest_on_share, member_cluster_share,
domain, sandbox, branch, sandbox_owner,
dns_domain, shortname, mac, ip, networkip, network_environment,
exact_location, server_of_service, server_of_instance, grn,
eon_id, fullinfo, **arguments):
dbnet_env = NetworkEnvironment.get_unique_or_default(session,
network_environment)
q = session.query(Host)
if machine:
dbmachine = Machine.get_unique(session, machine, compel=True)
q = q.filter_by(machine=dbmachine)
# Add the machine definition and the primary name. Use aliases to make
# sure the end result will be ordered by primary name.
PriDns = aliased(DnsRecord)
PriFqdn = aliased(Fqdn)
PriDomain = aliased(DnsDomain)
q = q.join(Machine,
(PriDns, PriDns.id == Machine.primary_name_id),
(PriFqdn, PriDns.fqdn_id == PriFqdn.id),
(PriDomain, PriFqdn.dns_domain_id == PriDomain.id))
q = q.order_by(PriFqdn.name, PriDomain.name)
q = q.options(contains_eager('machine'),
contains_eager('machine.primary_name', alias=PriDns),
contains_eager('machine.primary_name.fqdn', alias=PriFqdn),
contains_eager('machine.primary_name.fqdn.dns_domain',
alias=PriDomain))
q = q.reset_joinpoint()
# Hardware-specific filters
dblocation = get_location(session, **arguments)
if dblocation:
if exact_location:
q = q.filter(Machine.location == dblocation)
else:
childids = dblocation.offspring_ids()
q = q.filter(Machine.location_id.in_(childids))
if model or vendor or machine_type:
subq = Model.get_matching_query(session, name=model, vendor=vendor,
machine_type=machine_type,
compel=True)
q = q.filter(Machine.model_id.in_(subq))
if serial:
self.deprecated_option("serial", "Please use search machine --serial instead.",
logger=logger, **arguments)
q = q.filter(Machine.serial_no == serial)
# DNS IP address related filters
if mac or ip or networkip or hostname or dns_domain or shortname:
# Inner joins are cheaper than outer joins, so make some effort to
# use inner joins when possible
if mac or ip or networkip:
q = q.join(Interface)
else:
q = q.outerjoin(Interface)
if ip or networkip:
q = q.join(AddressAssignment, Network, from_joinpoint=True)
else:
q = q.outerjoin(AddressAssignment, Network, from_joinpoint=True)
if mac:
self.deprecated_option("mac", "Please use search machine "
"--mac instead.", logger=logger,
**arguments)
q = q.filter(Interface.mac == mac)
if ip:
q = q.filter(AddressAssignment.ip == ip)
q = q.filter(Network.network_environment == dbnet_env)
if networkip:
dbnetwork = get_network_byip(session, networkip, dbnet_env)
q = q.filter(AddressAssignment.network == dbnetwork)
dbdns_domain = None
if hostname:
(shortname, dbdns_domain) = parse_fqdn(session, hostname)
if dns_domain:
dbdns_domain = DnsDomain.get_unique(session, dns_domain, compel=True)
if shortname or dbdns_domain:
ARecAlias = aliased(ARecord)
ARecFqdn = aliased(Fqdn)
q = q.outerjoin((ARecAlias,
and_(ARecAlias.ip == AddressAssignment.ip,
ARecAlias.network_id == AddressAssignment.network_id)),
(ARecFqdn, ARecAlias.fqdn_id == ARecFqdn.id))
if shortname:
q = q.filter(or_(ARecFqdn.name == shortname,
PriFqdn.name == shortname))
if dbdns_domain:
q = q.filter(or_(ARecFqdn.dns_domain == dbdns_domain,
PriFqdn.dns_domain == dbdns_domain))
q = q.reset_joinpoint()
(dbbranch, dbauthor) = get_branch_and_author(session, logger,
domain=domain,
sandbox=sandbox,
branch=branch)
if sandbox_owner:
dbauthor = get_user_principal(session, sandbox_owner)
if dbbranch:
q = q.filter_by(branch=dbbranch)
if dbauthor:
q = q.filter_by(sandbox_author=dbauthor)
if archetype:
# Added to the searches as appropriate below.
dbarchetype = Archetype.get_unique(session, archetype, compel=True)
if personality and archetype:
dbpersonality = Personality.get_unique(session,
archetype=dbarchetype,
name=personality,
compel=True)
q = q.filter_by(personality=dbpersonality)
elif personality:
PersAlias = aliased(Personality)
q = q.join(PersAlias).filter_by(name=personality)
q = q.reset_joinpoint()
elif archetype:
PersAlias = aliased(Personality)
q = q.join(PersAlias).filter_by(archetype=dbarchetype)
q = q.reset_joinpoint()
if buildstatus:
dbbuildstatus = HostLifecycle.get_unique(session, buildstatus,
compel=True)
q = q.filter_by(status=dbbuildstatus)
if osname and osversion and archetype:
# archetype was already resolved above
dbos = OperatingSystem.get_unique(session, name=osname,
version=osversion,
archetype=dbarchetype,
compel=True)
q = q.filter_by(operating_system=dbos)
elif osname or osversion:
q = q.join('operating_system')
if osname:
q = q.filter_by(name=osname)
if osversion:
q = q.filter_by(version=osversion)
q = q.reset_joinpoint()
if service:
dbservice = Service.get_unique(session, service, compel=True)
if instance:
dbsi = get_service_instance(session, dbservice, instance)
q = q.filter(Host.services_used.contains(dbsi))
else:
q = q.join('services_used')
q = q.filter_by(service=dbservice)
q = q.reset_joinpoint()
elif instance:
q = q.join('services_used')
q = q.filter_by(name=instance)
q = q.reset_joinpoint()
if server_of_service:
dbserver_service = Service.get_unique(session, server_of_service,
compel=True)
if server_of_instance:
dbssi = get_service_instance(session, dbserver_service,
server_of_instance)
q = q.join('_services_provided')
q = q.filter_by(service_instance=dbssi)
q = q.reset_joinpoint()
else:
q = q.join('_services_provided', 'service_instance')
q = q.filter_by(service=dbserver_service)
q = q.reset_joinpoint()
elif server_of_instance:
q = q.join('_services_provided', 'service_instance')
q = q.filter_by(name=server_of_instance)
q = q.reset_joinpoint()
if cluster:
dbcluster = Cluster.get_unique(session, cluster, compel=True)
if isinstance(dbcluster, MetaCluster):
q = q.join('_cluster', 'cluster', '_metacluster')
q = q.filter_by(metacluster=dbcluster)
else:
q = q.filter_by(cluster=dbcluster)
q = q.reset_joinpoint()
if guest_on_cluster:
# TODO: this does not handle metaclusters according to Wes
dbcluster = Cluster.get_unique(session, guest_on_cluster,
compel=True)
q = q.join('machine', VirtualMachine, ClusterResource)
q = q.filter_by(cluster=dbcluster)
q = q.reset_joinpoint()
if guest_on_share:
#v2
v2shares = session.query(Share.id).filter_by(name=guest_on_share).all()
if not v2shares:
raise NotFoundException("No shares found with name {0}."
.format(guest_on_share))
NasAlias = aliased(VirtualDisk)
q = q.join('machine', 'disks', (NasAlias, NasAlias.id == Disk.id))
q = q.filter(
NasAlias.share_id.in_(map(lambda s: s[0], v2shares)))
q = q.reset_joinpoint()
if member_cluster_share:
#v2
v2shares = session.query(Share.id).filter_by(name=member_cluster_share).all()
if not v2shares:
raise NotFoundException("No shares found with name {0}."
.format(guest_on_share))
NasAlias = aliased(VirtualDisk)
q = q.join('_cluster', 'cluster', 'resholder', VirtualMachine,
'machine', 'disks', (NasAlias, NasAlias.id == Disk.id))
q = q.filter(
NasAlias.share_id.in_(map(lambda s: s[0], v2shares)))
q = q.reset_joinpoint()
if grn or eon_id:
dbgrn = lookup_grn(session, grn, eon_id, autoupdate=False)
persq = session.query(Personality.id)
persq = persq.outerjoin(PersonalityGrnMap)
persq = persq.filter(or_(Personality.owner_eon_id == dbgrn.eon_id,
PersonalityGrnMap.eon_id == dbgrn.eon_id))
q = q.outerjoin(HostGrnMap)
q = q.filter(or_(Host.owner_eon_id == dbgrn.eon_id,
HostGrnMap.eon_id == dbgrn.eon_id,
Host.personality_id.in_(persq.subquery())))
q = q.reset_joinpoint()
if fullinfo:
return q.all()
return SimpleHostList(q.all())
| apache-2.0 |
saisaizhang/Food | flask/lib/python2.7/site-packages/sqlalchemy/ext/automap.py | 33 | 39713 | # ext/automap.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Define an extension to the :mod:`sqlalchemy.ext.declarative` system
which automatically generates mapped classes and relationships from a database
schema, typically though not necessarily one which is reflected.
.. versionadded:: 0.9.1 Added :mod:`sqlalchemy.ext.automap`.
.. note::
The :mod:`sqlalchemy.ext.automap` extension should be considered
**experimental** as of 0.9.1. Featureset and API stability is
not guaranteed at this time.
It is hoped that the :class:`.AutomapBase` system provides a quick
and modernized solution to the problem that the very famous
`SQLSoup <https://sqlsoup.readthedocs.org/en/latest/>`_
also tries to solve, that of generating a quick and rudimentary object
model from an existing database on the fly. By addressing the issue strictly
at the mapper configuration level, and integrating fully with existing
Declarative class techniques, :class:`.AutomapBase` seeks to provide
a well-integrated approach to the issue of expediently auto-generating ad-hoc
mappings.
Basic Use
=========
The simplest usage is to reflect an existing database into a new model.
We create a new :class:`.AutomapBase` class in a similar manner as to how
we create a declarative base class, using :func:`.automap_base`.
We then call :meth:`.AutomapBase.prepare` on the resulting base class,
asking it to reflect the schema and produce mappings::
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine
Base = automap_base()
# engine, suppose it has two tables 'user' and 'address' set up
engine = create_engine("sqlite:///mydatabase.db")
# reflect the tables
Base.prepare(engine, reflect=True)
# mapped classes are now created with names by default
# matching that of the table name.
User = Base.classes.user
Address = Base.classes.address
session = Session(engine)
# rudimentary relationships are produced
session.add(Address(email_address="foo@bar.com", user=User(name="foo")))
session.commit()
# collection-based relationships are by default named
# "<classname>_collection"
print (u1.address_collection)
Above, calling :meth:`.AutomapBase.prepare` while passing along the
:paramref:`.AutomapBase.prepare.reflect` parameter indicates that the
:meth:`.MetaData.reflect` method will be called on this declarative base
classes' :class:`.MetaData` collection; then, each viable
:class:`.Table` within the :class:`.MetaData` will get a new mapped class
generated automatically. The :class:`.ForeignKeyConstraint` objects which
link the various tables together will be used to produce new, bidirectional
:func:`.relationship` objects between classes. The classes and relationships
follow along a default naming scheme that we can customize. At this point,
our basic mapping consisting of related ``User`` and ``Address`` classes is
ready to use in the traditional way.
Generating Mappings from an Existing MetaData
=============================================
We can pass a pre-declared :class:`.MetaData` object to :func:`.automap_base`.
This object can be constructed in any way, including programmatically, from
a serialized file, or from itself being reflected using
:meth:`.MetaData.reflect`. Below we illustrate a combination of reflection and
explicit table declaration::
from sqlalchemy import create_engine, MetaData, Table, Column, ForeignKey
engine = create_engine("sqlite:///mydatabase.db")
# produce our own MetaData object
metadata = MetaData()
# we can reflect it ourselves from a database, using options
# such as 'only' to limit what tables we look at...
metadata.reflect(engine, only=['user', 'address'])
# ... or just define our own Table objects with it (or combine both)
Table('user_order', metadata,
Column('id', Integer, primary_key=True),
Column('user_id', ForeignKey('user.id'))
)
# we can then produce a set of mappings from this MetaData.
Base = automap_base(metadata=metadata)
# calling prepare() just sets up mapped classes and relationships.
Base.prepare()
# mapped classes are ready
User, Address, Order = Base.classes.user, Base.classes.address,\
Base.classes.user_order
Specifying Classes Explcitly
============================
The :mod:`.sqlalchemy.ext.automap` extension allows classes to be defined
explicitly, in a way similar to that of the :class:`.DeferredReflection` class.
Classes that extend from :class:`.AutomapBase` act like regular declarative
classes, but are not immediately mapped after their construction, and are
instead mapped when we call :meth:`.AutomapBase.prepare`. The
:meth:`.AutomapBase.prepare` method will make use of the classes we've
established based on the table name we use. If our schema contains tables
``user`` and ``address``, we can define one or both of the classes to be used::
from sqlalchemy.ext.automap import automap_base
from sqlalchemy import create_engine
# automap base
Base = automap_base()
# pre-declare User for the 'user' table
class User(Base):
__tablename__ = 'user'
# override schema elements like Columns
user_name = Column('name', String)
# override relationships too, if desired.
# we must use the same name that automap would use for the
# relationship, and also must refer to the class name that automap will
# generate for "address"
address_collection = relationship("address", collection_class=set)
# reflect
engine = create_engine("sqlite:///mydatabase.db")
Base.prepare(engine, reflect=True)
# we still have Address generated from the tablename "address",
# but User is the same as Base.classes.User now
Address = Base.classes.address
u1 = session.query(User).first()
print (u1.address_collection)
# the backref is still there:
a1 = session.query(Address).first()
print (a1.user)
Above, one of the more intricate details is that we illustrated overriding
one of the :func:`.relationship` objects that automap would have created.
To do this, we needed to make sure the names match up with what automap
would normally generate, in that the relationship name would be
``User.address_collection`` and the name of the class referred to, from
automap's perspective, is called ``address``, even though we are referring to
it as ``Address`` within our usage of this class.
Overriding Naming Schemes
=========================
:mod:`.sqlalchemy.ext.automap` is tasked with producing mapped classes and
relationship names based on a schema, which means it has decision points in how
these names are determined. These three decision points are provided using
functions which can be passed to the :meth:`.AutomapBase.prepare` method, and
are known as :func:`.classname_for_table`,
:func:`.name_for_scalar_relationship`,
and :func:`.name_for_collection_relationship`. Any or all of these
functions are provided as in the example below, where we use a "camel case"
scheme for class names and a "pluralizer" for collection names using the
`Inflect <https://pypi.python.org/pypi/inflect>`_ package::
import re
import inflect
def camelize_classname(base, tablename, table):
"Produce a 'camelized' class name, e.g. "
"'words_and_underscores' -> 'WordsAndUnderscores'"
return str(tablename[0].upper() + \\
re.sub(r'_(\w)', lambda m: m.group(1).upper(), tablename[1:]))
_pluralizer = inflect.engine()
def pluralize_collection(base, local_cls, referred_cls, constraint):
"Produce an 'uncamelized', 'pluralized' class name, e.g. "
"'SomeTerm' -> 'some_terms'"
referred_name = referred_cls.__name__
uncamelized = referred_name[0].lower() + \\
re.sub(r'\W',
lambda m: "_%s" % m.group(0).lower(),
referred_name[1:])
pluralized = _pluralizer.plural(uncamelized)
return pluralized
from sqlalchemy.ext.automap import automap_base
Base = automap_base()
engine = create_engine("sqlite:///mydatabase.db")
Base.prepare(engine, reflect=True,
classname_for_table=camelize_classname,
name_for_collection_relationship=pluralize_collection
)
From the above mapping, we would now have classes ``User`` and ``Address``,
where the collection from ``User`` to ``Address`` is called
``User.addresses``::
User, Address = Base.classes.User, Base.classes.Address
u1 = User(addresses=[Address(email="foo@bar.com")])
Relationship Detection
======================
The vast majority of what automap accomplishes is the generation of
:func:`.relationship` structures based on foreign keys. The mechanism
by which this works for many-to-one and one-to-many relationships is as
follows:
1. A given :class:`.Table`, known to be mapped to a particular class,
is examined for :class:`.ForeignKeyConstraint` objects.
2. From each :class:`.ForeignKeyConstraint`, the remote :class:`.Table`
object present is matched up to the class to which it is to be mapped,
if any, else it is skipped.
3. As the :class:`.ForeignKeyConstraint` we are examining corresponds to a
reference from the immediate mapped class, the relationship will be set up
as a many-to-one referring to the referred class; a corresponding
one-to-many backref will be created on the referred class referring
to this class.
4. The names of the relationships are determined using the
:paramref:`.AutomapBase.prepare.name_for_scalar_relationship` and
:paramref:`.AutomapBase.prepare.name_for_collection_relationship`
callable functions. It is important to note that the default relationship
naming derives the name from the **the actual class name**. If you've
given a particular class an explicit name by declaring it, or specified an
alternate class naming scheme, that's the name from which the relationship
name will be derived.
5. The classes are inspected for an existing mapped property matching these
names. If one is detected on one side, but none on the other side,
:class:`.AutomapBase` attempts to create a relationship on the missing side,
then uses the :paramref:`.relationship.back_populates` parameter in order to
point the new relationship to the other side.
6. In the usual case where no relationship is on either side,
:meth:`.AutomapBase.prepare` produces a :func:`.relationship` on the
"many-to-one" side and matches it to the other using the
:paramref:`.relationship.backref` parameter.
7. Production of the :func:`.relationship` and optionally the :func:`.backref`
is handed off to the :paramref:`.AutomapBase.prepare.generate_relationship`
function, which can be supplied by the end-user in order to augment
the arguments passed to :func:`.relationship` or :func:`.backref` or to
make use of custom implementations of these functions.
Custom Relationship Arguments
-----------------------------
The :paramref:`.AutomapBase.prepare.generate_relationship` hook can be used
to add parameters to relationships. For most cases, we can make use of the
existing :func:`.automap.generate_relationship` function to return
the object, after augmenting the given keyword dictionary with our own
arguments.
Below is an illustration of how to send
:paramref:`.relationship.cascade` and
:paramref:`.relationship.passive_deletes`
options along to all one-to-many relationships::
from sqlalchemy.ext.automap import generate_relationship
def _gen_relationship(base, direction, return_fn,
attrname, local_cls, referred_cls, **kw):
if direction is interfaces.ONETOMANY:
kw['cascade'] = 'all, delete-orphan'
kw['passive_deletes'] = True
# make use of the built-in function to actually return
# the result.
return generate_relationship(base, direction, return_fn,
attrname, local_cls, referred_cls, **kw)
from sqlalchemy.ext.automap import automap_base
from sqlalchemy import create_engine
# automap base
Base = automap_base()
engine = create_engine("sqlite:///mydatabase.db")
Base.prepare(engine, reflect=True,
generate_relationship=_gen_relationship)
Many-to-Many relationships
--------------------------
:mod:`.sqlalchemy.ext.automap` will generate many-to-many relationships, e.g.
those which contain a ``secondary`` argument. The process for producing these
is as follows:
1. A given :class:`.Table` is examined for :class:`.ForeignKeyConstraint`
objects, before any mapped class has been assigned to it.
2. If the table contains two and exactly two :class:`.ForeignKeyConstraint`
objects, and all columns within this table are members of these two
:class:`.ForeignKeyConstraint` objects, the table is assumed to be a
"secondary" table, and will **not be mapped directly**.
3. The two (or one, for self-referential) external tables to which the
:class:`.Table` refers to are matched to the classes to which they will be
mapped, if any.
4. If mapped classes for both sides are located, a many-to-many bi-directional
:func:`.relationship` / :func:`.backref` pair is created between the two
classes.
5. The override logic for many-to-many works the same as that of one-to-many/
many-to-one; the :func:`.generate_relationship` function is called upon
to generate the strucures and existing attributes will be maintained.
Relationships with Inheritance
------------------------------
:mod:`.sqlalchemy.ext.automap` will not generate any relationships between
two classes that are in an inheritance relationship. That is, with two
classes given as follows::
class Employee(Base):
__tablename__ = 'employee'
id = Column(Integer, primary_key=True)
type = Column(String(50))
__mapper_args__ = {
'polymorphic_identity':'employee', 'polymorphic_on': type
}
class Engineer(Employee):
__tablename__ = 'engineer'
id = Column(Integer, ForeignKey('employee.id'), primary_key=True)
__mapper_args__ = {
'polymorphic_identity':'engineer',
}
The foreign key from ``Engineer`` to ``Employee`` is used not for a
relationship, but to establish joined inheritance between the two classes.
Note that this means automap will not generate *any* relationships
for foreign keys that link from a subclass to a superclass. If a mapping
has actual relationships from subclass to superclass as well, those
need to be explicit. Below, as we have two separate foreign keys
from ``Engineer`` to ``Employee``, we need to set up both the relationship
we want as well as the ``inherit_condition``, as these are not things
SQLAlchemy can guess::
class Employee(Base):
__tablename__ = 'employee'
id = Column(Integer, primary_key=True)
type = Column(String(50))
__mapper_args__ = {
'polymorphic_identity':'employee', 'polymorphic_on':type
}
class Engineer(Employee):
__tablename__ = 'engineer'
id = Column(Integer, ForeignKey('employee.id'), primary_key=True)
favorite_employee_id = Column(Integer, ForeignKey('employee.id'))
favorite_employee = relationship(Employee,
foreign_keys=favorite_employee_id)
__mapper_args__ = {
'polymorphic_identity':'engineer',
'inherit_condition': id == Employee.id
}
Handling Simple Naming Conflicts
--------------------------------
In the case of naming conflicts during mapping, override any of
:func:`.classname_for_table`, :func:`.name_for_scalar_relationship`,
and :func:`.name_for_collection_relationship` as needed. For example, if
automap is attempting to name a many-to-one relationship the same as an
existing column, an alternate convention can be conditionally selected. Given
a schema:
.. sourcecode:: sql
CREATE TABLE table_a (
id INTEGER PRIMARY KEY
);
CREATE TABLE table_b (
id INTEGER PRIMARY KEY,
table_a INTEGER,
FOREIGN KEY(table_a) REFERENCES table_a(id)
);
The above schema will first automap the ``table_a`` table as a class named
``table_a``; it will then automap a relationship onto the class for ``table_b``
with the same name as this related class, e.g. ``table_a``. This
relationship name conflicts with the mapping column ``table_b.table_a``,
and will emit an error on mapping.
We can resolve this conflict by using an underscore as follows::
def name_for_scalar_relationship(base, local_cls, referred_cls, constraint):
name = referred_cls.__name__.lower()
local_table = local_cls.__table__
if name in local_table.columns:
newname = name + "_"
warnings.warn(
"Already detected name %s present. using %s" %
(name, newname))
return newname
return name
Base.prepare(engine, reflect=True,
name_for_scalar_relationship=name_for_scalar_relationship)
Alternatively, we can change the name on the column side. The columns
that are mapped can be modified using the technique described at
:ref:`mapper_column_distinct_names`, by assigning the column explicitly
to a new name::
Base = automap_base()
class TableB(Base):
__tablename__ = 'table_b'
_table_a = Column('table_a', ForeignKey('table_a.id'))
Base.prepare(engine, reflect=True)
Using Automap with Explicit Declarations
========================================
As noted previously, automap has no dependency on reflection, and can make
use of any collection of :class:`.Table` objects within a :class:`.MetaData`
collection. From this, it follows that automap can also be used
generate missing relationships given an otherwise complete model that fully
defines table metadata::
from sqlalchemy.ext.automap import automap_base
from sqlalchemy import Column, Integer, String, ForeignKey
Base = automap_base()
class User(Base):
__tablename__ = 'user'
id = Column(Integer, primary_key=True)
name = Column(String)
class Address(Base):
__tablename__ = 'address'
id = Column(Integer, primary_key=True)
email = Column(String)
user_id = Column(ForeignKey('user.id'))
# produce relationships
Base.prepare()
# mapping is complete, with "address_collection" and
# "user" relationships
a1 = Address(email='u1')
a2 = Address(email='u2')
u1 = User(address_collection=[a1, a2])
assert a1.user is u1
Above, given mostly complete ``User`` and ``Address`` mappings, the
:class:`.ForeignKey` which we defined on ``Address.user_id`` allowed a
bidirectional relationship pair ``Address.user`` and
``User.address_collection`` to be generated on the mapped classes.
Note that when subclassing :class:`.AutomapBase`,
the :meth:`.AutomapBase.prepare` method is required; if not called, the classes
we've declared are in an un-mapped state.
"""
from .declarative import declarative_base as _declarative_base
from .declarative.base import _DeferredMapperConfig
from ..sql import and_
from ..schema import ForeignKeyConstraint
from ..orm import relationship, backref, interfaces
from .. import util
def classname_for_table(base, tablename, table):
"""Return the class name that should be used, given the name
of a table.
The default implementation is::
return str(tablename)
Alternate implementations can be specified using the
:paramref:`.AutomapBase.prepare.classname_for_table`
parameter.
:param base: the :class:`.AutomapBase` class doing the prepare.
:param tablename: string name of the :class:`.Table`.
:param table: the :class:`.Table` object itself.
:return: a string class name.
.. note::
In Python 2, the string used for the class name **must** be a
non-Unicode object, e.g. a ``str()`` object. The ``.name`` attribute
of :class:`.Table` is typically a Python unicode subclass, so the
``str()`` function should be applied to this name, after accounting for
any non-ASCII characters.
"""
return str(tablename)
def name_for_scalar_relationship(base, local_cls, referred_cls, constraint):
"""Return the attribute name that should be used to refer from one
class to another, for a scalar object reference.
The default implementation is::
return referred_cls.__name__.lower()
Alternate implementations can be specified using the
:paramref:`.AutomapBase.prepare.name_for_scalar_relationship`
parameter.
:param base: the :class:`.AutomapBase` class doing the prepare.
:param local_cls: the class to be mapped on the local side.
:param referred_cls: the class to be mapped on the referring side.
:param constraint: the :class:`.ForeignKeyConstraint` that is being
inspected to produce this relationship.
"""
return referred_cls.__name__.lower()
def name_for_collection_relationship(
base, local_cls, referred_cls, constraint):
"""Return the attribute name that should be used to refer from one
class to another, for a collection reference.
The default implementation is::
return referred_cls.__name__.lower() + "_collection"
Alternate implementations
can be specified using the
:paramref:`.AutomapBase.prepare.name_for_collection_relationship`
parameter.
:param base: the :class:`.AutomapBase` class doing the prepare.
:param local_cls: the class to be mapped on the local side.
:param referred_cls: the class to be mapped on the referring side.
:param constraint: the :class:`.ForeignKeyConstraint` that is being
inspected to produce this relationship.
"""
return referred_cls.__name__.lower() + "_collection"
def generate_relationship(
base, direction, return_fn, attrname, local_cls, referred_cls, **kw):
"""Generate a :func:`.relationship` or :func:`.backref` on behalf of two
mapped classes.
An alternate implementation of this function can be specified using the
:paramref:`.AutomapBase.prepare.generate_relationship` parameter.
The default implementation of this function is as follows::
if return_fn is backref:
return return_fn(attrname, **kw)
elif return_fn is relationship:
return return_fn(referred_cls, **kw)
else:
raise TypeError("Unknown relationship function: %s" % return_fn)
:param base: the :class:`.AutomapBase` class doing the prepare.
:param direction: indicate the "direction" of the relationship; this will
be one of :data:`.ONETOMANY`, :data:`.MANYTOONE`, :data:`.MANYTOONE`.
:param return_fn: the function that is used by default to create the
relationship. This will be either :func:`.relationship` or
:func:`.backref`. The :func:`.backref` function's result will be used to
produce a new :func:`.relationship` in a second step, so it is critical
that user-defined implementations correctly differentiate between the two
functions, if a custom relationship function is being used.
:attrname: the attribute name to which this relationship is being assigned.
If the value of :paramref:`.generate_relationship.return_fn` is the
:func:`.backref` function, then this name is the name that is being
assigned to the backref.
:param local_cls: the "local" class to which this relationship or backref
will be locally present.
:param referred_cls: the "referred" class to which the relationship or
backref refers to.
:param \**kw: all additional keyword arguments are passed along to the
function.
:return: a :func:`.relationship` or :func:`.backref` construct, as dictated
by the :paramref:`.generate_relationship.return_fn` parameter.
"""
if return_fn is backref:
return return_fn(attrname, **kw)
elif return_fn is relationship:
return return_fn(referred_cls, **kw)
else:
raise TypeError("Unknown relationship function: %s" % return_fn)
class AutomapBase(object):
"""Base class for an "automap" schema.
The :class:`.AutomapBase` class can be compared to the "declarative base"
class that is produced by the :func:`.declarative.declarative_base`
function. In practice, the :class:`.AutomapBase` class is always used
as a mixin along with an actual declarative base.
A new subclassable :class:`.AutomapBase` is typically instantated
using the :func:`.automap_base` function.
.. seealso::
:ref:`automap_toplevel`
"""
__abstract__ = True
classes = None
"""An instance of :class:`.util.Properties` containing classes.
This object behaves much like the ``.c`` collection on a table. Classes
are present under the name they were given, e.g.::
Base = automap_base()
Base.prepare(engine=some_engine, reflect=True)
User, Address = Base.classes.User, Base.classes.Address
"""
@classmethod
def prepare(
cls,
engine=None,
reflect=False,
classname_for_table=classname_for_table,
collection_class=list,
name_for_scalar_relationship=name_for_scalar_relationship,
name_for_collection_relationship=name_for_collection_relationship,
generate_relationship=generate_relationship):
"""Extract mapped classes and relationships from the :class:`.MetaData` and
perform mappings.
:param engine: an :class:`.Engine` or :class:`.Connection` with which
to perform schema reflection, if specified.
If the :paramref:`.AutomapBase.prepare.reflect` argument is False,
this object is not used.
:param reflect: if True, the :meth:`.MetaData.reflect` method is called
on the :class:`.MetaData` associated with this :class:`.AutomapBase`.
The :class:`.Engine` passed via
:paramref:`.AutomapBase.prepare.engine` will be used to perform the
reflection if present; else, the :class:`.MetaData` should already be
bound to some engine else the operation will fail.
:param classname_for_table: callable function which will be used to
produce new class names, given a table name. Defaults to
:func:`.classname_for_table`.
:param name_for_scalar_relationship: callable function which will be
used to produce relationship names for scalar relationships. Defaults
to :func:`.name_for_scalar_relationship`.
:param name_for_collection_relationship: callable function which will
be used to produce relationship names for collection-oriented
relationships. Defaults to :func:`.name_for_collection_relationship`.
:param generate_relationship: callable function which will be used to
actually generate :func:`.relationship` and :func:`.backref`
constructs. Defaults to :func:`.generate_relationship`.
:param collection_class: the Python collection class that will be used
when a new :func:`.relationship` object is created that represents a
collection. Defaults to ``list``.
"""
if reflect:
cls.metadata.reflect(
engine,
extend_existing=True,
autoload_replace=False
)
table_to_map_config = dict(
(m.local_table, m)
for m in _DeferredMapperConfig.
classes_for_base(cls, sort=False)
)
many_to_many = []
for table in cls.metadata.tables.values():
lcl_m2m, rem_m2m, m2m_const = _is_many_to_many(cls, table)
if lcl_m2m is not None:
many_to_many.append((lcl_m2m, rem_m2m, m2m_const, table))
elif not table.primary_key:
continue
elif table not in table_to_map_config:
mapped_cls = type(
classname_for_table(cls, table.name, table),
(cls, ),
{"__table__": table}
)
map_config = _DeferredMapperConfig.config_for_cls(mapped_cls)
cls.classes[map_config.cls.__name__] = mapped_cls
table_to_map_config[table] = map_config
for map_config in table_to_map_config.values():
_relationships_for_fks(cls,
map_config,
table_to_map_config,
collection_class,
name_for_scalar_relationship,
name_for_collection_relationship,
generate_relationship)
for lcl_m2m, rem_m2m, m2m_const, table in many_to_many:
_m2m_relationship(cls, lcl_m2m, rem_m2m, m2m_const, table,
table_to_map_config,
collection_class,
name_for_scalar_relationship,
name_for_collection_relationship,
generate_relationship)
for map_config in _DeferredMapperConfig.classes_for_base(cls):
map_config.map()
_sa_decl_prepare = True
"""Indicate that the mapping of classes should be deferred.
The presence of this attribute name indicates to declarative
that the call to mapper() should not occur immediately; instead,
information about the table and attributes to be mapped are gathered
into an internal structure called _DeferredMapperConfig. These
objects can be collected later using classes_for_base(), additional
mapping decisions can be made, and then the map() method will actually
apply the mapping.
The only real reason this deferral of the whole
thing is needed is to support primary key columns that aren't reflected
yet when the class is declared; everything else can theoretically be
added to the mapper later. However, the _DeferredMapperConfig is a
nice interface in any case which exists at that not usually exposed point
at which declarative has the class and the Table but hasn't called
mapper() yet.
"""
def automap_base(declarative_base=None, **kw):
"""Produce a declarative automap base.
This function produces a new base class that is a product of the
:class:`.AutomapBase` class as well a declarative base produced by
:func:`.declarative.declarative_base`.
All parameters other than ``declarative_base`` are keyword arguments
that are passed directly to the :func:`.declarative.declarative_base`
function.
:param declarative_base: an existing class produced by
:func:`.declarative.declarative_base`. When this is passed, the function
no longer invokes :func:`.declarative.declarative_base` itself, and all
other keyword arguments are ignored.
:param \**kw: keyword arguments are passed along to
:func:`.declarative.declarative_base`.
"""
if declarative_base is None:
Base = _declarative_base(**kw)
else:
Base = declarative_base
return type(
Base.__name__,
(AutomapBase, Base,),
{"__abstract__": True, "classes": util.Properties({})}
)
def _is_many_to_many(automap_base, table):
fk_constraints = [const for const in table.constraints
if isinstance(const, ForeignKeyConstraint)]
if len(fk_constraints) != 2:
return None, None, None
cols = sum(
[[fk.parent for fk in fk_constraint.elements]
for fk_constraint in fk_constraints], [])
if set(cols) != set(table.c):
return None, None, None
return (
fk_constraints[0].elements[0].column.table,
fk_constraints[1].elements[0].column.table,
fk_constraints
)
def _relationships_for_fks(automap_base, map_config, table_to_map_config,
collection_class,
name_for_scalar_relationship,
name_for_collection_relationship,
generate_relationship):
local_table = map_config.local_table
local_cls = map_config.cls
if local_table is None:
return
for constraint in local_table.constraints:
if isinstance(constraint, ForeignKeyConstraint):
fks = constraint.elements
referred_table = fks[0].column.table
referred_cfg = table_to_map_config.get(referred_table, None)
if referred_cfg is None:
continue
referred_cls = referred_cfg.cls
if local_cls is not referred_cls and issubclass(
local_cls, referred_cls):
continue
relationship_name = name_for_scalar_relationship(
automap_base,
local_cls,
referred_cls, constraint)
backref_name = name_for_collection_relationship(
automap_base,
referred_cls,
local_cls,
constraint
)
create_backref = backref_name not in referred_cfg.properties
if relationship_name not in map_config.properties:
if create_backref:
backref_obj = generate_relationship(
automap_base,
interfaces.ONETOMANY, backref,
backref_name, referred_cls, local_cls,
collection_class=collection_class)
else:
backref_obj = None
rel = generate_relationship(automap_base,
interfaces.MANYTOONE,
relationship,
relationship_name,
local_cls, referred_cls,
foreign_keys=[
fk.parent
for fk in constraint.elements],
backref=backref_obj,
remote_side=[
fk.column
for fk in constraint.elements]
)
if rel is not None:
map_config.properties[relationship_name] = rel
if not create_backref:
referred_cfg.properties[
backref_name].back_populates = relationship_name
elif create_backref:
rel = generate_relationship(automap_base,
interfaces.ONETOMANY,
relationship,
backref_name,
referred_cls, local_cls,
foreign_keys=[
fk.parent
for fk in constraint.elements],
back_populates=relationship_name,
collection_class=collection_class)
if rel is not None:
referred_cfg.properties[backref_name] = rel
map_config.properties[
relationship_name].back_populates = backref_name
def _m2m_relationship(automap_base, lcl_m2m, rem_m2m, m2m_const, table,
table_to_map_config,
collection_class,
name_for_scalar_relationship,
name_for_collection_relationship,
generate_relationship):
map_config = table_to_map_config.get(lcl_m2m, None)
referred_cfg = table_to_map_config.get(rem_m2m, None)
if map_config is None or referred_cfg is None:
return
local_cls = map_config.cls
referred_cls = referred_cfg.cls
relationship_name = name_for_collection_relationship(
automap_base,
local_cls,
referred_cls, m2m_const[0])
backref_name = name_for_collection_relationship(
automap_base,
referred_cls,
local_cls,
m2m_const[1]
)
create_backref = backref_name not in referred_cfg.properties
if relationship_name not in map_config.properties:
if create_backref:
backref_obj = generate_relationship(
automap_base,
interfaces.MANYTOMANY,
backref,
backref_name,
referred_cls, local_cls,
collection_class=collection_class
)
else:
backref_obj = None
rel = generate_relationship(automap_base,
interfaces.MANYTOMANY,
relationship,
relationship_name,
local_cls, referred_cls,
secondary=table,
primaryjoin=and_(
fk.column == fk.parent
for fk in m2m_const[0].elements),
secondaryjoin=and_(
fk.column == fk.parent
for fk in m2m_const[1].elements),
backref=backref_obj,
collection_class=collection_class
)
if rel is not None:
map_config.properties[relationship_name] = rel
if not create_backref:
referred_cfg.properties[
backref_name].back_populates = relationship_name
elif create_backref:
rel = generate_relationship(automap_base,
interfaces.MANYTOMANY,
relationship,
backref_name,
referred_cls, local_cls,
secondary=table,
primaryjoin=and_(
fk.column == fk.parent
for fk in m2m_const[1].elements),
secondaryjoin=and_(
fk.column == fk.parent
for fk in m2m_const[0].elements),
back_populates=relationship_name,
collection_class=collection_class)
if rel is not None:
referred_cfg.properties[backref_name] = rel
map_config.properties[
relationship_name].back_populates = backref_name
| bsd-3-clause |
Haleyo/spark-tk | regression-tests/sparktkregtests/testcases/frames/frame_inspect_test.py | 13 | 4394 | # vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tests frame.inspect() """
import unittest
import sys
import os
from sparktkregtests.lib import sparktk_test
class FrameInspectTest(sparktk_test.SparkTKTestCase):
def setUp(self):
"""Build test frame"""
super(FrameInspectTest, self).setUp()
dataset = self.get_file("movie_user_5ratings.csv")
schema = [("src", int),
("vertex_type", str),
("dest", int),
("weight", int),
("edge_type", str)]
self.frame = self.context.frame.import_csv(
dataset, schema=schema)
def test_frame_inspect_0_offset(self):
"""Test offset of 0 does nothing"""
inspect = self.frame.inspect(n=5, offset=0)
self.assertEqual(len(inspect.rows), 5)
def test_frame_inspect_offset_large(self):
"""Test offset of a large value"""
inspect = self.frame.inspect(n=5, offset=1000)
self.assertEqual(len(inspect.rows), 5)
def test_frame_inspect_offset_overflow(self):
"""Test inspecting more lines than in frrame from offset truncates"""
inspect = self.frame.inspect(n=10, offset=self.frame.count()-3)
self.assertEqual(len(inspect.rows), 3)
def test_frame_inspect_0_count(self):
"""Test inspecting 0 rows returns nothing"""
inspect = self.frame.inspect(n=0)
self.assertEqual(len(inspect.rows), 0)
def test_frame_inspect_n(self):
"""Test requesting n rows returns n rows"""
inspect = self.frame.inspect(n=1)
self.assertEqual(len(inspect.rows), 1)
def test_frame_inspect_default(self):
"""Test the default number of rows is 10"""
inspect = self.frame.inspect()
self.assertEqual(len(inspect.rows), 10)
def test_frame_inspect_all(self):
"""Test inspecting entire frame returns entire frame"""
inspect = self.frame.inspect(n=self.frame.count())
self.assertEqual(len(inspect.rows), self.frame.count())
def test_frame_inspect_count_overflow(self):
"""Test inspecting more than entire frame returns the entire frame"""
row_count = self.frame.count()
inspect = self.frame.inspect(n=row_count*10)
self.assertEqual(len(inspect.rows), row_count)
#compare 'inspect' with the actual entire frame RowInspection object
self.assertEqual(str(inspect),
str(self.frame.inspect(n=row_count)))
def test_negative_offset(self):
"""Test a negative offset errors"""
with self.assertRaisesRegexp(ValueError, "Expected non-negative integer"):
self.frame.inspect(n=5, offset=-1)
def test_negative_count(self):
"""Test taking a negative number of rows errors"""
with self.assertRaises(ValueError):
self.frame.inspect(n=-1)
def test_float_count(self):
"""Test float for count errors"""
with self.assertRaisesRegexp(TypeError, "Expected type <type 'int'>"):
self.frame.inspect(n=1.5)
def test_float_offset(self):
"""Test float for offset errors"""
with self.assertRaises(TypeError):
self.frame.inspect(n=1, offset=1.5)
def test_take_no_columns(self):
"""Test taking an empty list of columns gets an empty list"""
self.assertEqual([], self.frame.take(n=10, columns=[]))
def test_take_invalid_column(self):
"""Test taking a column that doesn't exist errors"""
with self.assertRaisesRegexp(
ValueError, "Invalid column name .* provided"):
self.frame.take(n=10, columns=["no_such_col", "weight"])
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
Griffintaur/QRCodeReader | simple_gui.py | 1 | 1696 | import os, threading, webbrowser
from flask import Flask, request, render_template, Response
from Imagehandler import Imagehandler
app = Flask(__name__, static_folder='Input')
APP_ROOT = os.path.dirname(os.path.abspath(__file__))
dest = []
def gen(destination):
import cv2
obj = Imagehandler(str(destination))
TransformImage = obj.QRCodeInImage()
ret, frame = cv2.imencode('.jpg', TransformImage)
frame = frame.tobytes()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n')
@app.route('/image_feed')
def image_feed():
return Response(gen(dest[0]),
mimetype='multipart/x-mixed-replace; boundary=frame')
@app.route("/")
def index():
return render_template("upload.html")
@app.route("/upload", methods=["POST"])
def upload():
target = os.path.join(APP_ROOT, 'Input/')
print(target)
if not os.path.isdir(target):
os.mkdir(target)
else:
print("Couldn't create upload directory: {}".format(target))
print(request.files.getlist("file"))
for upload in request.files.getlist("file"):
print(upload)
print("{} is the file name".format(upload.filename))
filename = upload.filename
destination = "/".join([target, filename])
print ("Accept incoming file:", filename)
print ("Save it to:", destination)
upload.save(destination)
dest.append(destination)
return render_template("complete.html", image_name=filename)
if __name__ == "__main__":
url = 'http://127.0.0.1:{0}'.format(5000)
threading.Timer(1.25, lambda : webbrowser.open(url)).start()
app.run(port=5000, debug=False)
| mit |
darkspring2015/PyDatcomLab | PyDatcomLab/GUIs/components/BrowseModels.py | 1 | 12197 | # -*- coding: utf-8 -*-
"""
Module implementing DlgBrowseModels.
"""
from PyQt5.QtCore import pyqtSlot, pyqtSignal, QSize, QPoint, Qt
from PyQt5.QtWidgets import QDialog, QFileDialog, QMenu, QTableWidgetItem
from PyQt5.QtGui import QIcon, QPixmap
import logging
import os
from xml.etree import ElementTree as ET
from PyDatcomLab.GUIs.components.NewModel import NewModelDlg
from PyDatcomLab.GUIs.components.ModelPreview import ModelPreview as Mp
from PyDatcomLab.Core.PyDatcomConfigLoader import defaultConfig as dtConfig
#导入界面
from .Ui_BrowseModels import Ui_BrowseModel
class BrowseModels(QDialog, Ui_BrowseModel):
"""
模型浏览器部分的内容
"""
emit_ModelSelected = pyqtSignal(object)
def __init__(self, parent=None, iConfig = dtConfig):
"""
Constructor
@param parent reference to the parent widget
@type QWidget
"""
super(BrowseModels, self).__init__(parent)
self.setupUi(self)
#self.splitter.setStretchFactor(1, 4)
#日志
self.logger = logging.getLogger(r'Datcomlogger')
#加载datcom的配置文件
if iConfig is None:
self.dtConfig = dtConfig
else:
self.dtConfig = iConfig
#设置模型信息
self.extList = ['.xml', '.dcXML', '.dcxml']
self.libraryKeyWord = 'ModelLibrary'
self.rootTag = self.dtConfig.getLibraryRootTag(self.libraryKeyWord) #获取库文件的根节点的tag
self.MEKeys = list(dtConfig.getLibraryElementTemplate(self.libraryKeyWord ))
#装载表头
if self.MEKeys is None:
self.logger.error("尝试读取ProjectLibrary并不存在定义!")
else:
self.ModelSet.setColumnCount(len(self.MEKeys))
self.ModelSet.setHorizontalHeaderLabels(self.MEKeys)
self.ModelSet.horizontalHeaderItem(1).setTextAlignment(Qt.AlignRight)
self.ModelSet.setContextMenuPolicy(Qt.CustomContextMenu)
self.lastEditingRow = -1
#刷新模型数据
self.dataSet = None
self._resetModel()
#界面参数
self.curPos = QPoint(0, 0)
self.curWidget = None
self.popMenu = None
#初始化界面
def getCurrentModelPath(self):
"""
返回当前的模型的路径,没有对应行返回None
如果找不到对应列,返回None
其他返回对应的Path值
"""
tRow = self.ModelSet.currentRow()
tPathKey = self.dtConfig.getPathKeyByLibraryName(self.libraryKeyWord)
if tRow <0 or tPathKey is None:
return None
else:
#获得对应的key
tCIndex = self.MEKeys.index(tPathKey)
if tCIndex >= 0:
return self.ModelSet.item(tRow, 1).text()
else:
return None
@pyqtSlot()
def on_pushButton_ChoiseDir_clicked(self):
"""
点击选择目录按钮的响应函数
"""
tDir = QFileDialog.getExistingDirectory(self,"打开模型目录", ''
, QFileDialog.DontUseNativeDialog)
if not os.path.exists(tDir):
self.logger.error("Try 打开模型目录%s 不存在!"%tDir)
return
self.logger.info("Try 打开模型目录")
#保证当前目录在选择的范围内
tIndex = self.comboBox_Dirs.findText(tDir, Qt.MatchExactly | Qt.MatchCaseSensitive)
if tIndex == -1:
self.comboBox_Dirs.addItem(tDir, None)
self.comboBox_Dirs.setCurrentIndex(self.comboBox_Dirs.count() -1)
elif tIndex != self.comboBox_Dirs.currentIndex():
self.comboBox_Dirs.setCurrentIndex(tIndex)
else:
self.AddModels(self.modelDir)
def AddModels(self, tDir):
"""
批量添加目录下的模型
@para dir 模型所在目录
"""
#保证当前目录在选择的范围内
tIndex = self.comboBox_Dirs.findText(tDir, Qt.MatchExactly | Qt.MatchCaseSensitive)
if tIndex == -1:
self.comboBox_Dirs.addItem(tDir, None)
self.comboBox_Dirs.setCurrentIndex(self.comboBox_Dirs.count() -1)
elif tIndex != self.comboBox_Dirs.currentIndex():
self.comboBox_Dirs.setCurrentIndex(tIndex)
#清空当前表格
for aFile in os.listdir(tDir):
# os.path.splitext():分离文件名与扩展名
if os.path.splitext(aFile)[1] in self.extList :
self.AddModel(tDir, aFile)
def AddModel(self,tDir, tFile):
"""
添加一个模型
"""
#检查文件系统
fpath = os.path.join(tDir, tFile)
if not os.path.isfile(fpath):
return #
fN, extN = os.path.splitext(tFile)
if extN not in self.extList:
return
#检查内容是否符合XML格式要求
tCName = None
try:
tRoot = ET.parse(fpath).getroot()
if tRoot.tag != self.rootTag:
self.logger.info("AddModels()测试模型文件的的根节点Tag,期望%s,实际:%s!忽略该文件"%(self.rootTag, tRoot.tag))
return
else:
tCName = tRoot.attrib.get('CName', None) #获得CASE Name,使用先验知识
except Exception as e:
self.logger.error("AddModels()测试模型文件过程中发生异常,%s :%s"%(repr(e), str(e)))
#添加一行
tTemplate = self.dtConfig.getLibraryElementTemplate(self.libraryKeyWord)
tTemplate.update({'ModelName':tCName,
'path':fpath,
})
if self.dtConfig.addItemToLibrary(self.libraryKeyWord, tTemplate) == '成功添加':
#刷新模型
self._resetModel()
self.ModelSet.setCurrentCell(self.ModelSet.rowCount() -1, 0)
def _resetModel(self):
"""
根据config的配置重置QTableWidget的模型数据
"""
if self.dtConfig is None : return
self.dataSet = self.dtConfig.getLibrary(self.libraryKeyWord)
self.ModelSet.clearContents()
self.ModelSet.setRowCount(0)
for iT in self.dataSet :
tRowCount = self.ModelSet.rowCount()
self.ModelSet.insertRow(tRowCount)
pix1 = QPixmap(r":/card/rc_card/亚音速常规布局.jpg")
self.ModelSet.setItem(tRowCount, 0, QTableWidgetItem(QIcon(pix1.scaled(QSize(100,100))),iT.get('ModelName', '')))
self.ModelSet.setItem(tRowCount, 1, QTableWidgetItem(iT.get('path', '.') ))
self.ModelSet.item(tRowCount, 1).setFlags(Qt.NoItemFlags|Qt.ItemIsEnabled)
#self.ModelSet.item(tRowCount, 1).setFlags(Qt.NoItemFlags|Qt.ItemIsSelectable|Qt.ItemIsEnabled)
#self.ModelSet.setItem(tRowCount, 2, QTableWidgetItem(''))
@pyqtSlot(QPoint)
def on_ModelSet_customContextMenuRequested(self, pos):
"""
Slot documentation goes here.
@param pos DESCRIPTION
@type QPoint
"""
self.curPos = pos
self.curWidget = self.ModelSet
posG = self.curWidget.mapToGlobal(pos)
self.popMenu = QMenu(self.curWidget)
self.popMenu.addAction(self.actionNewModel)
self.popMenu.addAction(self.actionAddModel)
self.popMenu.addAction(self.actionRemoveModel)
self.popMenu.addAction(self.actionPreviewModel)
self.curWidget.setContextMenuPolicy(Qt.CustomContextMenu)
self.popMenu.exec(posG)
@pyqtSlot()
def on_actionNewModel_triggered(self):
"""
Slot documentation goes here.
"""
dlg = NewModelDlg()
dlg.exec()
if dlg.result() == QDialog.Accepted:
mPath = dlg.getModelPath()
self.AddModel(os.path.dirname(mPath), os.path.basename(mPath))
@pyqtSlot()
def on_actionAddModel_triggered(self):
"""
Slot documentation goes here.
"""
baseDir = os.path.expanduser('~')
if os.path.exists(self.comboBox_Dirs.currentText() ):
baseDir = self.comboBox_Dirs.currentText()
#打开文件选择对话框
tFiles = QFileDialog.getOpenFileNames(self,"选择模型文件",
baseDir,
"Datcom Project Files (*.dcxml *.xml )")
for iF in tFiles[0]:
if os.path.exists(iF):
fN, extN = os.path.splitext(os.path.basename(iF))
self.AddModel(os.path.dirname(iF), os.path.basename(iF))
#切换模型,认为AddModel将数据添加到最后一行
#self.on_ModelSet_itemDoubleClicked( self.ModelSet.item(self.ModelSet.rowCount() - 1, 0))
@pyqtSlot()
def on_actionRemoveModel_triggered(self):
"""
从列表移除模型,但不会删除模型
"""
aItem = self.curWidget.indexAt(self.curPos)
if aItem.row() >= 0 :
tTemplate = self.dtConfig.getLibraryElementTemplate(self.libraryKeyWord)
tTemplate.update({'ModelName':self.curWidget.item(aItem.row(), 0).text(),
'path':self.curWidget.item(aItem.row(), 1).text(), })
self.dtConfig.delItemFromLibrary(self.libraryKeyWord, tTemplate)
self.curWidget.removeRow(aItem.row())
else:
self.logger.info("没有命中任何行")
@pyqtSlot(int, int, int, int)
def on_ModelSet_currentCellChanged(self, currentRow, currentColumn, previousRow, previousColumn):
"""
Slot documentation goes here.
@param currentRow DESCRIPTION
@type int
@param currentColumn DESCRIPTION
@type int
@param previousRow DESCRIPTION
@type int
@param previousColumn DESCRIPTION
@type int
"""
# #发送模型信号
#
# if previousRow != currentRow:
# if not self.ModelSet.itemAt(currentRow, 1) is None :
# self.emit_ModelSelected.emit( self.ModelSet.item(currentRow, 1).text())
#void QTableWidget::cellChanged(int row, int column)
@pyqtSlot(int, int)
def on_ModelSet_cellChanged(self, iRow, iColumn):
"""
cellChanged的槽函数,用来刷新编辑属性
因为在系统中设置了不可编辑路径,所以只编辑名称
"""
if iColumn == 0:
#ModelName列
self.dataSet[iRow].update({'ModelName':self.ModelSet.item(iRow, 0).text(), })
self.dtConfig.setLibrary(self.libraryKeyWord, self.dataSet)
@pyqtSlot(int)
def on_comboBox_Dirs_currentIndexChanged(self, index):
"""
重新加载当前目录的模型
@param index DESCRIPTION
@type int
"""
# 在此处重新加载当前目录的模型
pass
@pyqtSlot()
def on_actionPreviewModel_triggered(self):
"""
打开模型预览窗口.
"""
# 打开模型预览窗口
aItem = self.curWidget.indexAt(self.curPos)
if aItem.row() < 0 :
self.logger.info("没有命中任何行")
return
tPath = self.ModelSet.item(aItem.row(), 1).text()
self.PreViewdlg = Mp()
self.PreViewdlg.loadModel(tPath)
self.isDcModel = True
self.PreViewdlg.show()
@pyqtSlot(QTableWidgetItem)
def on_ModelSet_itemDoubleClicked(self, item):
"""
在双击项目是加载到中央.
@param item DESCRIPTION
@type QTableWidgetItem
"""
if self.lastEditingRow != item.row():
if not self.ModelSet.itemAt(item.row(), 1) is None :
self.lastEditingRow = item.row()
self.emit_ModelSelected.emit( self.ModelSet.item(item.row(), 1).text())
| mit |
RecipeML/Recipe | recipe/preprocessors/fag.py | 1 | 1645 | # -*- coding: utf-8 -*-
"""
Copyright 2016 Walter José and Alex de Sá
This file is part of the RECIPE Algorithm.
The RECIPE is free software: you can redistribute it and/or
modify it under the terms of the GNU General Public License as published by the
Free Software Foundation, either version 3 of the License, or (at your option)
any later version.
RECIPE is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. See http://www.gnu.org/licenses/.
"""
from sklearn.cluster import FeatureAgglomeration
def fag(args):
"""Uses scikit-learn's FeatureAgglomeration agglomerate features.
Parameters
----------
affinity : string
Metric used to compute the linkage. Can be “euclidean”, “l1”, “l2”, “manhattan”, “cosine”, or ‘precomputed’. If linkage is “ward”,
only “euclidean” is accepted.
linkage : {“ward”, “complete”, “average”}
Which linkage criterion to use.
The linkage criterion determines which distance to use between sets of features.
The algorithm will merge the pairs of cluster that minimize this criterion.
compute_full_tree : bool or ‘auto’
Stop early the construction of the tree at n_clusters
n_clusters : int
The number of clusters to find.
"""
affi = args[1]
link = args[2]
cft = False
if(args[3].find("True")!=-1):
cft = True
n_clust = int(args[4])
return FeatureAgglomeration(n_clusters=n_clust, affinity=affi,
connectivity=None,compute_full_tree=cft, linkage=link)
| gpl-3.0 |
sudheesh001/oh-mainline | vendor/packages/requests/requests/packages/chardet/big5freq.py | 3133 | 82594 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# Big5 frequency table
# by Taiwan's Mandarin Promotion Council
# <http://www.edu.tw:81/mandr/>
#
# 128 --> 0.42261
# 256 --> 0.57851
# 512 --> 0.74851
# 1024 --> 0.89384
# 2048 --> 0.97583
#
# Ideal Distribution Ratio = 0.74851/(1-0.74851) =2.98
# Random Distribution Ration = 512/(5401-512)=0.105
#
# Typical Distribution Ratio about 25% of Ideal one, still much higher than RDR
BIG5_TYPICAL_DISTRIBUTION_RATIO = 0.75
#Char to FreqOrder table
BIG5_TABLE_SIZE = 5376
Big5CharToFreqOrder = (
1,1801,1506, 255,1431, 198, 9, 82, 6,5008, 177, 202,3681,1256,2821, 110, # 16
3814, 33,3274, 261, 76, 44,2114, 16,2946,2187,1176, 659,3971, 26,3451,2653, # 32
1198,3972,3350,4202, 410,2215, 302, 590, 361,1964, 8, 204, 58,4510,5009,1932, # 48
63,5010,5011, 317,1614, 75, 222, 159,4203,2417,1480,5012,3555,3091, 224,2822, # 64
3682, 3, 10,3973,1471, 29,2787,1135,2866,1940, 873, 130,3275,1123, 312,5013, # 80
4511,2052, 507, 252, 682,5014, 142,1915, 124, 206,2947, 34,3556,3204, 64, 604, # 96
5015,2501,1977,1978, 155,1991, 645, 641,1606,5016,3452, 337, 72, 406,5017, 80, # 112
630, 238,3205,1509, 263, 939,1092,2654, 756,1440,1094,3453, 449, 69,2987, 591, # 128
179,2096, 471, 115,2035,1844, 60, 50,2988, 134, 806,1869, 734,2036,3454, 180, # 144
995,1607, 156, 537,2907, 688,5018, 319,1305, 779,2145, 514,2379, 298,4512, 359, # 160
2502, 90,2716,1338, 663, 11, 906,1099,2553, 20,2441, 182, 532,1716,5019, 732, # 176
1376,4204,1311,1420,3206, 25,2317,1056, 113, 399, 382,1950, 242,3455,2474, 529, # 192
3276, 475,1447,3683,5020, 117, 21, 656, 810,1297,2300,2334,3557,5021, 126,4205, # 208
706, 456, 150, 613,4513, 71,1118,2037,4206, 145,3092, 85, 835, 486,2115,1246, # 224
1426, 428, 727,1285,1015, 800, 106, 623, 303,1281,5022,2128,2359, 347,3815, 221, # 240
3558,3135,5023,1956,1153,4207, 83, 296,1199,3093, 192, 624, 93,5024, 822,1898, # 256
2823,3136, 795,2065, 991,1554,1542,1592, 27, 43,2867, 859, 139,1456, 860,4514, # 272
437, 712,3974, 164,2397,3137, 695, 211,3037,2097, 195,3975,1608,3559,3560,3684, # 288
3976, 234, 811,2989,2098,3977,2233,1441,3561,1615,2380, 668,2077,1638, 305, 228, # 304
1664,4515, 467, 415,5025, 262,2099,1593, 239, 108, 300, 200,1033, 512,1247,2078, # 320
5026,5027,2176,3207,3685,2682, 593, 845,1062,3277, 88,1723,2038,3978,1951, 212, # 336
266, 152, 149, 468,1899,4208,4516, 77, 187,5028,3038, 37, 5,2990,5029,3979, # 352
5030,5031, 39,2524,4517,2908,3208,2079, 55, 148, 74,4518, 545, 483,1474,1029, # 368
1665, 217,1870,1531,3138,1104,2655,4209, 24, 172,3562, 900,3980,3563,3564,4519, # 384
32,1408,2824,1312, 329, 487,2360,2251,2717, 784,2683, 4,3039,3351,1427,1789, # 400
188, 109, 499,5032,3686,1717,1790, 888,1217,3040,4520,5033,3565,5034,3352,1520, # 416
3687,3981, 196,1034, 775,5035,5036, 929,1816, 249, 439, 38,5037,1063,5038, 794, # 432
3982,1435,2301, 46, 178,3278,2066,5039,2381,5040, 214,1709,4521, 804, 35, 707, # 448
324,3688,1601,2554, 140, 459,4210,5041,5042,1365, 839, 272, 978,2262,2580,3456, # 464
2129,1363,3689,1423, 697, 100,3094, 48, 70,1231, 495,3139,2196,5043,1294,5044, # 480
2080, 462, 586,1042,3279, 853, 256, 988, 185,2382,3457,1698, 434,1084,5045,3458, # 496
314,2625,2788,4522,2335,2336, 569,2285, 637,1817,2525, 757,1162,1879,1616,3459, # 512
287,1577,2116, 768,4523,1671,2868,3566,2526,1321,3816, 909,2418,5046,4211, 933, # 528
3817,4212,2053,2361,1222,4524, 765,2419,1322, 786,4525,5047,1920,1462,1677,2909, # 544
1699,5048,4526,1424,2442,3140,3690,2600,3353,1775,1941,3460,3983,4213, 309,1369, # 560
1130,2825, 364,2234,1653,1299,3984,3567,3985,3986,2656, 525,1085,3041, 902,2001, # 576
1475, 964,4527, 421,1845,1415,1057,2286, 940,1364,3141, 376,4528,4529,1381, 7, # 592
2527, 983,2383, 336,1710,2684,1846, 321,3461, 559,1131,3042,2752,1809,1132,1313, # 608
265,1481,1858,5049, 352,1203,2826,3280, 167,1089, 420,2827, 776, 792,1724,3568, # 624
4214,2443,3281,5050,4215,5051, 446, 229, 333,2753, 901,3818,1200,1557,4530,2657, # 640
1921, 395,2754,2685,3819,4216,1836, 125, 916,3209,2626,4531,5052,5053,3820,5054, # 656
5055,5056,4532,3142,3691,1133,2555,1757,3462,1510,2318,1409,3569,5057,2146, 438, # 672
2601,2910,2384,3354,1068, 958,3043, 461, 311,2869,2686,4217,1916,3210,4218,1979, # 688
383, 750,2755,2627,4219, 274, 539, 385,1278,1442,5058,1154,1965, 384, 561, 210, # 704
98,1295,2556,3570,5059,1711,2420,1482,3463,3987,2911,1257, 129,5060,3821, 642, # 720
523,2789,2790,2658,5061, 141,2235,1333, 68, 176, 441, 876, 907,4220, 603,2602, # 736
710, 171,3464, 404, 549, 18,3143,2398,1410,3692,1666,5062,3571,4533,2912,4534, # 752
5063,2991, 368,5064, 146, 366, 99, 871,3693,1543, 748, 807,1586,1185, 22,2263, # 768
379,3822,3211,5065,3212, 505,1942,2628,1992,1382,2319,5066, 380,2362, 218, 702, # 784
1818,1248,3465,3044,3572,3355,3282,5067,2992,3694, 930,3283,3823,5068, 59,5069, # 800
585, 601,4221, 497,3466,1112,1314,4535,1802,5070,1223,1472,2177,5071, 749,1837, # 816
690,1900,3824,1773,3988,1476, 429,1043,1791,2236,2117, 917,4222, 447,1086,1629, # 832
5072, 556,5073,5074,2021,1654, 844,1090, 105, 550, 966,1758,2828,1008,1783, 686, # 848
1095,5075,2287, 793,1602,5076,3573,2603,4536,4223,2948,2302,4537,3825, 980,2503, # 864
544, 353, 527,4538, 908,2687,2913,5077, 381,2629,1943,1348,5078,1341,1252, 560, # 880
3095,5079,3467,2870,5080,2054, 973, 886,2081, 143,4539,5081,5082, 157,3989, 496, # 896
4224, 57, 840, 540,2039,4540,4541,3468,2118,1445, 970,2264,1748,1966,2082,4225, # 912
3144,1234,1776,3284,2829,3695, 773,1206,2130,1066,2040,1326,3990,1738,1725,4226, # 928
279,3145, 51,1544,2604, 423,1578,2131,2067, 173,4542,1880,5083,5084,1583, 264, # 944
610,3696,4543,2444, 280, 154,5085,5086,5087,1739, 338,1282,3096, 693,2871,1411, # 960
1074,3826,2445,5088,4544,5089,5090,1240, 952,2399,5091,2914,1538,2688, 685,1483, # 976
4227,2475,1436, 953,4228,2055,4545, 671,2400, 79,4229,2446,3285, 608, 567,2689, # 992
3469,4230,4231,1691, 393,1261,1792,2401,5092,4546,5093,5094,5095,5096,1383,1672, # 1008
3827,3213,1464, 522,1119, 661,1150, 216, 675,4547,3991,1432,3574, 609,4548,2690, # 1024
2402,5097,5098,5099,4232,3045, 0,5100,2476, 315, 231,2447, 301,3356,4549,2385, # 1040
5101, 233,4233,3697,1819,4550,4551,5102, 96,1777,1315,2083,5103, 257,5104,1810, # 1056
3698,2718,1139,1820,4234,2022,1124,2164,2791,1778,2659,5105,3097, 363,1655,3214, # 1072
5106,2993,5107,5108,5109,3992,1567,3993, 718, 103,3215, 849,1443, 341,3357,2949, # 1088
1484,5110,1712, 127, 67, 339,4235,2403, 679,1412, 821,5111,5112, 834, 738, 351, # 1104
2994,2147, 846, 235,1497,1881, 418,1993,3828,2719, 186,1100,2148,2756,3575,1545, # 1120
1355,2950,2872,1377, 583,3994,4236,2581,2995,5113,1298,3699,1078,2557,3700,2363, # 1136
78,3829,3830, 267,1289,2100,2002,1594,4237, 348, 369,1274,2197,2178,1838,4552, # 1152
1821,2830,3701,2757,2288,2003,4553,2951,2758, 144,3358, 882,4554,3995,2759,3470, # 1168
4555,2915,5114,4238,1726, 320,5115,3996,3046, 788,2996,5116,2831,1774,1327,2873, # 1184
3997,2832,5117,1306,4556,2004,1700,3831,3576,2364,2660, 787,2023, 506, 824,3702, # 1200
534, 323,4557,1044,3359,2024,1901, 946,3471,5118,1779,1500,1678,5119,1882,4558, # 1216
165, 243,4559,3703,2528, 123, 683,4239, 764,4560, 36,3998,1793, 589,2916, 816, # 1232
626,1667,3047,2237,1639,1555,1622,3832,3999,5120,4000,2874,1370,1228,1933, 891, # 1248
2084,2917, 304,4240,5121, 292,2997,2720,3577, 691,2101,4241,1115,4561, 118, 662, # 1264
5122, 611,1156, 854,2386,1316,2875, 2, 386, 515,2918,5123,5124,3286, 868,2238, # 1280
1486, 855,2661, 785,2216,3048,5125,1040,3216,3578,5126,3146, 448,5127,1525,5128, # 1296
2165,4562,5129,3833,5130,4242,2833,3579,3147, 503, 818,4001,3148,1568, 814, 676, # 1312
1444, 306,1749,5131,3834,1416,1030, 197,1428, 805,2834,1501,4563,5132,5133,5134, # 1328
1994,5135,4564,5136,5137,2198, 13,2792,3704,2998,3149,1229,1917,5138,3835,2132, # 1344
5139,4243,4565,2404,3580,5140,2217,1511,1727,1120,5141,5142, 646,3836,2448, 307, # 1360
5143,5144,1595,3217,5145,5146,5147,3705,1113,1356,4002,1465,2529,2530,5148, 519, # 1376
5149, 128,2133, 92,2289,1980,5150,4003,1512, 342,3150,2199,5151,2793,2218,1981, # 1392
3360,4244, 290,1656,1317, 789, 827,2365,5152,3837,4566, 562, 581,4004,5153, 401, # 1408
4567,2252, 94,4568,5154,1399,2794,5155,1463,2025,4569,3218,1944,5156, 828,1105, # 1424
4245,1262,1394,5157,4246, 605,4570,5158,1784,2876,5159,2835, 819,2102, 578,2200, # 1440
2952,5160,1502, 436,3287,4247,3288,2836,4005,2919,3472,3473,5161,2721,2320,5162, # 1456
5163,2337,2068, 23,4571, 193, 826,3838,2103, 699,1630,4248,3098, 390,1794,1064, # 1472
3581,5164,1579,3099,3100,1400,5165,4249,1839,1640,2877,5166,4572,4573, 137,4250, # 1488
598,3101,1967, 780, 104, 974,2953,5167, 278, 899, 253, 402, 572, 504, 493,1339, # 1504
5168,4006,1275,4574,2582,2558,5169,3706,3049,3102,2253, 565,1334,2722, 863, 41, # 1520
5170,5171,4575,5172,1657,2338, 19, 463,2760,4251, 606,5173,2999,3289,1087,2085, # 1536
1323,2662,3000,5174,1631,1623,1750,4252,2691,5175,2878, 791,2723,2663,2339, 232, # 1552
2421,5176,3001,1498,5177,2664,2630, 755,1366,3707,3290,3151,2026,1609, 119,1918, # 1568
3474, 862,1026,4253,5178,4007,3839,4576,4008,4577,2265,1952,2477,5179,1125, 817, # 1584
4254,4255,4009,1513,1766,2041,1487,4256,3050,3291,2837,3840,3152,5180,5181,1507, # 1600
5182,2692, 733, 40,1632,1106,2879, 345,4257, 841,2531, 230,4578,3002,1847,3292, # 1616
3475,5183,1263, 986,3476,5184, 735, 879, 254,1137, 857, 622,1300,1180,1388,1562, # 1632
4010,4011,2954, 967,2761,2665,1349, 592,2134,1692,3361,3003,1995,4258,1679,4012, # 1648
1902,2188,5185, 739,3708,2724,1296,1290,5186,4259,2201,2202,1922,1563,2605,2559, # 1664
1871,2762,3004,5187, 435,5188, 343,1108, 596, 17,1751,4579,2239,3477,3709,5189, # 1680
4580, 294,3582,2955,1693, 477, 979, 281,2042,3583, 643,2043,3710,2631,2795,2266, # 1696
1031,2340,2135,2303,3584,4581, 367,1249,2560,5190,3585,5191,4582,1283,3362,2005, # 1712
240,1762,3363,4583,4584, 836,1069,3153, 474,5192,2149,2532, 268,3586,5193,3219, # 1728
1521,1284,5194,1658,1546,4260,5195,3587,3588,5196,4261,3364,2693,1685,4262, 961, # 1744
1673,2632, 190,2006,2203,3841,4585,4586,5197, 570,2504,3711,1490,5198,4587,2633, # 1760
3293,1957,4588, 584,1514, 396,1045,1945,5199,4589,1968,2449,5200,5201,4590,4013, # 1776
619,5202,3154,3294, 215,2007,2796,2561,3220,4591,3221,4592, 763,4263,3842,4593, # 1792
5203,5204,1958,1767,2956,3365,3712,1174, 452,1477,4594,3366,3155,5205,2838,1253, # 1808
2387,2189,1091,2290,4264, 492,5206, 638,1169,1825,2136,1752,4014, 648, 926,1021, # 1824
1324,4595, 520,4596, 997, 847,1007, 892,4597,3843,2267,1872,3713,2405,1785,4598, # 1840
1953,2957,3103,3222,1728,4265,2044,3714,4599,2008,1701,3156,1551, 30,2268,4266, # 1856
5207,2027,4600,3589,5208, 501,5209,4267, 594,3478,2166,1822,3590,3479,3591,3223, # 1872
829,2839,4268,5210,1680,3157,1225,4269,5211,3295,4601,4270,3158,2341,5212,4602, # 1888
4271,5213,4015,4016,5214,1848,2388,2606,3367,5215,4603, 374,4017, 652,4272,4273, # 1904
375,1140, 798,5216,5217,5218,2366,4604,2269, 546,1659, 138,3051,2450,4605,5219, # 1920
2254, 612,1849, 910, 796,3844,1740,1371, 825,3845,3846,5220,2920,2562,5221, 692, # 1936
444,3052,2634, 801,4606,4274,5222,1491, 244,1053,3053,4275,4276, 340,5223,4018, # 1952
1041,3005, 293,1168, 87,1357,5224,1539, 959,5225,2240, 721, 694,4277,3847, 219, # 1968
1478, 644,1417,3368,2666,1413,1401,1335,1389,4019,5226,5227,3006,2367,3159,1826, # 1984
730,1515, 184,2840, 66,4607,5228,1660,2958, 246,3369, 378,1457, 226,3480, 975, # 2000
4020,2959,1264,3592, 674, 696,5229, 163,5230,1141,2422,2167, 713,3593,3370,4608, # 2016
4021,5231,5232,1186, 15,5233,1079,1070,5234,1522,3224,3594, 276,1050,2725, 758, # 2032
1126, 653,2960,3296,5235,2342, 889,3595,4022,3104,3007, 903,1250,4609,4023,3481, # 2048
3596,1342,1681,1718, 766,3297, 286, 89,2961,3715,5236,1713,5237,2607,3371,3008, # 2064
5238,2962,2219,3225,2880,5239,4610,2505,2533, 181, 387,1075,4024, 731,2190,3372, # 2080
5240,3298, 310, 313,3482,2304, 770,4278, 54,3054, 189,4611,3105,3848,4025,5241, # 2096
1230,1617,1850, 355,3597,4279,4612,3373, 111,4280,3716,1350,3160,3483,3055,4281, # 2112
2150,3299,3598,5242,2797,4026,4027,3009, 722,2009,5243,1071, 247,1207,2343,2478, # 2128
1378,4613,2010, 864,1437,1214,4614, 373,3849,1142,2220, 667,4615, 442,2763,2563, # 2144
3850,4028,1969,4282,3300,1840, 837, 170,1107, 934,1336,1883,5244,5245,2119,4283, # 2160
2841, 743,1569,5246,4616,4284, 582,2389,1418,3484,5247,1803,5248, 357,1395,1729, # 2176
3717,3301,2423,1564,2241,5249,3106,3851,1633,4617,1114,2086,4285,1532,5250, 482, # 2192
2451,4618,5251,5252,1492, 833,1466,5253,2726,3599,1641,2842,5254,1526,1272,3718, # 2208
4286,1686,1795, 416,2564,1903,1954,1804,5255,3852,2798,3853,1159,2321,5256,2881, # 2224
4619,1610,1584,3056,2424,2764, 443,3302,1163,3161,5257,5258,4029,5259,4287,2506, # 2240
3057,4620,4030,3162,2104,1647,3600,2011,1873,4288,5260,4289, 431,3485,5261, 250, # 2256
97, 81,4290,5262,1648,1851,1558, 160, 848,5263, 866, 740,1694,5264,2204,2843, # 2272
3226,4291,4621,3719,1687, 950,2479, 426, 469,3227,3720,3721,4031,5265,5266,1188, # 2288
424,1996, 861,3601,4292,3854,2205,2694, 168,1235,3602,4293,5267,2087,1674,4622, # 2304
3374,3303, 220,2565,1009,5268,3855, 670,3010, 332,1208, 717,5269,5270,3603,2452, # 2320
4032,3375,5271, 513,5272,1209,2882,3376,3163,4623,1080,5273,5274,5275,5276,2534, # 2336
3722,3604, 815,1587,4033,4034,5277,3605,3486,3856,1254,4624,1328,3058,1390,4035, # 2352
1741,4036,3857,4037,5278, 236,3858,2453,3304,5279,5280,3723,3859,1273,3860,4625, # 2368
5281, 308,5282,4626, 245,4627,1852,2480,1307,2583, 430, 715,2137,2454,5283, 270, # 2384
199,2883,4038,5284,3606,2727,1753, 761,1754, 725,1661,1841,4628,3487,3724,5285, # 2400
5286, 587, 14,3305, 227,2608, 326, 480,2270, 943,2765,3607, 291, 650,1884,5287, # 2416
1702,1226, 102,1547, 62,3488, 904,4629,3489,1164,4294,5288,5289,1224,1548,2766, # 2432
391, 498,1493,5290,1386,1419,5291,2056,1177,4630, 813, 880,1081,2368, 566,1145, # 2448
4631,2291,1001,1035,2566,2609,2242, 394,1286,5292,5293,2069,5294, 86,1494,1730, # 2464
4039, 491,1588, 745, 897,2963, 843,3377,4040,2767,2884,3306,1768, 998,2221,2070, # 2480
397,1827,1195,1970,3725,3011,3378, 284,5295,3861,2507,2138,2120,1904,5296,4041, # 2496
2151,4042,4295,1036,3490,1905, 114,2567,4296, 209,1527,5297,5298,2964,2844,2635, # 2512
2390,2728,3164, 812,2568,5299,3307,5300,1559, 737,1885,3726,1210, 885, 28,2695, # 2528
3608,3862,5301,4297,1004,1780,4632,5302, 346,1982,2222,2696,4633,3863,1742, 797, # 2544
1642,4043,1934,1072,1384,2152, 896,4044,3308,3727,3228,2885,3609,5303,2569,1959, # 2560
4634,2455,1786,5304,5305,5306,4045,4298,1005,1308,3728,4299,2729,4635,4636,1528, # 2576
2610, 161,1178,4300,1983, 987,4637,1101,4301, 631,4046,1157,3229,2425,1343,1241, # 2592
1016,2243,2570, 372, 877,2344,2508,1160, 555,1935, 911,4047,5307, 466,1170, 169, # 2608
1051,2921,2697,3729,2481,3012,1182,2012,2571,1251,2636,5308, 992,2345,3491,1540, # 2624
2730,1201,2071,2406,1997,2482,5309,4638, 528,1923,2191,1503,1874,1570,2369,3379, # 2640
3309,5310, 557,1073,5311,1828,3492,2088,2271,3165,3059,3107, 767,3108,2799,4639, # 2656
1006,4302,4640,2346,1267,2179,3730,3230, 778,4048,3231,2731,1597,2667,5312,4641, # 2672
5313,3493,5314,5315,5316,3310,2698,1433,3311, 131, 95,1504,4049, 723,4303,3166, # 2688
1842,3610,2768,2192,4050,2028,2105,3731,5317,3013,4051,1218,5318,3380,3232,4052, # 2704
4304,2584, 248,1634,3864, 912,5319,2845,3732,3060,3865, 654, 53,5320,3014,5321, # 2720
1688,4642, 777,3494,1032,4053,1425,5322, 191, 820,2121,2846, 971,4643, 931,3233, # 2736
135, 664, 783,3866,1998, 772,2922,1936,4054,3867,4644,2923,3234, 282,2732, 640, # 2752
1372,3495,1127, 922, 325,3381,5323,5324, 711,2045,5325,5326,4055,2223,2800,1937, # 2768
4056,3382,2224,2255,3868,2305,5327,4645,3869,1258,3312,4057,3235,2139,2965,4058, # 2784
4059,5328,2225, 258,3236,4646, 101,1227,5329,3313,1755,5330,1391,3314,5331,2924, # 2800
2057, 893,5332,5333,5334,1402,4305,2347,5335,5336,3237,3611,5337,5338, 878,1325, # 2816
1781,2801,4647, 259,1385,2585, 744,1183,2272,4648,5339,4060,2509,5340, 684,1024, # 2832
4306,5341, 472,3612,3496,1165,3315,4061,4062, 322,2153, 881, 455,1695,1152,1340, # 2848
660, 554,2154,4649,1058,4650,4307, 830,1065,3383,4063,4651,1924,5342,1703,1919, # 2864
5343, 932,2273, 122,5344,4652, 947, 677,5345,3870,2637, 297,1906,1925,2274,4653, # 2880
2322,3316,5346,5347,4308,5348,4309, 84,4310, 112, 989,5349, 547,1059,4064, 701, # 2896
3613,1019,5350,4311,5351,3497, 942, 639, 457,2306,2456, 993,2966, 407, 851, 494, # 2912
4654,3384, 927,5352,1237,5353,2426,3385, 573,4312, 680, 921,2925,1279,1875, 285, # 2928
790,1448,1984, 719,2168,5354,5355,4655,4065,4066,1649,5356,1541, 563,5357,1077, # 2944
5358,3386,3061,3498, 511,3015,4067,4068,3733,4069,1268,2572,3387,3238,4656,4657, # 2960
5359, 535,1048,1276,1189,2926,2029,3167,1438,1373,2847,2967,1134,2013,5360,4313, # 2976
1238,2586,3109,1259,5361, 700,5362,2968,3168,3734,4314,5363,4315,1146,1876,1907, # 2992
4658,2611,4070, 781,2427, 132,1589, 203, 147, 273,2802,2407, 898,1787,2155,4071, # 3008
4072,5364,3871,2803,5365,5366,4659,4660,5367,3239,5368,1635,3872, 965,5369,1805, # 3024
2699,1516,3614,1121,1082,1329,3317,4073,1449,3873, 65,1128,2848,2927,2769,1590, # 3040
3874,5370,5371, 12,2668, 45, 976,2587,3169,4661, 517,2535,1013,1037,3240,5372, # 3056
3875,2849,5373,3876,5374,3499,5375,2612, 614,1999,2323,3877,3110,2733,2638,5376, # 3072
2588,4316, 599,1269,5377,1811,3735,5378,2700,3111, 759,1060, 489,1806,3388,3318, # 3088
1358,5379,5380,2391,1387,1215,2639,2256, 490,5381,5382,4317,1759,2392,2348,5383, # 3104
4662,3878,1908,4074,2640,1807,3241,4663,3500,3319,2770,2349, 874,5384,5385,3501, # 3120
3736,1859, 91,2928,3737,3062,3879,4664,5386,3170,4075,2669,5387,3502,1202,1403, # 3136
3880,2969,2536,1517,2510,4665,3503,2511,5388,4666,5389,2701,1886,1495,1731,4076, # 3152
2370,4667,5390,2030,5391,5392,4077,2702,1216, 237,2589,4318,2324,4078,3881,4668, # 3168
4669,2703,3615,3504, 445,4670,5393,5394,5395,5396,2771, 61,4079,3738,1823,4080, # 3184
5397, 687,2046, 935, 925, 405,2670, 703,1096,1860,2734,4671,4081,1877,1367,2704, # 3200
3389, 918,2106,1782,2483, 334,3320,1611,1093,4672, 564,3171,3505,3739,3390, 945, # 3216
2641,2058,4673,5398,1926, 872,4319,5399,3506,2705,3112, 349,4320,3740,4082,4674, # 3232
3882,4321,3741,2156,4083,4675,4676,4322,4677,2408,2047, 782,4084, 400, 251,4323, # 3248
1624,5400,5401, 277,3742, 299,1265, 476,1191,3883,2122,4324,4325,1109, 205,5402, # 3264
2590,1000,2157,3616,1861,5403,5404,5405,4678,5406,4679,2573, 107,2484,2158,4085, # 3280
3507,3172,5407,1533, 541,1301, 158, 753,4326,2886,3617,5408,1696, 370,1088,4327, # 3296
4680,3618, 579, 327, 440, 162,2244, 269,1938,1374,3508, 968,3063, 56,1396,3113, # 3312
2107,3321,3391,5409,1927,2159,4681,3016,5410,3619,5411,5412,3743,4682,2485,5413, # 3328
2804,5414,1650,4683,5415,2613,5416,5417,4086,2671,3392,1149,3393,4087,3884,4088, # 3344
5418,1076, 49,5419, 951,3242,3322,3323, 450,2850, 920,5420,1812,2805,2371,4328, # 3360
1909,1138,2372,3885,3509,5421,3243,4684,1910,1147,1518,2428,4685,3886,5422,4686, # 3376
2393,2614, 260,1796,3244,5423,5424,3887,3324, 708,5425,3620,1704,5426,3621,1351, # 3392
1618,3394,3017,1887, 944,4329,3395,4330,3064,3396,4331,5427,3744, 422, 413,1714, # 3408
3325, 500,2059,2350,4332,2486,5428,1344,1911, 954,5429,1668,5430,5431,4089,2409, # 3424
4333,3622,3888,4334,5432,2307,1318,2512,3114, 133,3115,2887,4687, 629, 31,2851, # 3440
2706,3889,4688, 850, 949,4689,4090,2970,1732,2089,4335,1496,1853,5433,4091, 620, # 3456
3245, 981,1242,3745,3397,1619,3746,1643,3326,2140,2457,1971,1719,3510,2169,5434, # 3472
3246,5435,5436,3398,1829,5437,1277,4690,1565,2048,5438,1636,3623,3116,5439, 869, # 3488
2852, 655,3890,3891,3117,4092,3018,3892,1310,3624,4691,5440,5441,5442,1733, 558, # 3504
4692,3747, 335,1549,3065,1756,4336,3748,1946,3511,1830,1291,1192, 470,2735,2108, # 3520
2806, 913,1054,4093,5443,1027,5444,3066,4094,4693, 982,2672,3399,3173,3512,3247, # 3536
3248,1947,2807,5445, 571,4694,5446,1831,5447,3625,2591,1523,2429,5448,2090, 984, # 3552
4695,3749,1960,5449,3750, 852, 923,2808,3513,3751, 969,1519, 999,2049,2325,1705, # 3568
5450,3118, 615,1662, 151, 597,4095,2410,2326,1049, 275,4696,3752,4337, 568,3753, # 3584
3626,2487,4338,3754,5451,2430,2275, 409,3249,5452,1566,2888,3514,1002, 769,2853, # 3600
194,2091,3174,3755,2226,3327,4339, 628,1505,5453,5454,1763,2180,3019,4096, 521, # 3616
1161,2592,1788,2206,2411,4697,4097,1625,4340,4341, 412, 42,3119, 464,5455,2642, # 3632
4698,3400,1760,1571,2889,3515,2537,1219,2207,3893,2643,2141,2373,4699,4700,3328, # 3648
1651,3401,3627,5456,5457,3628,2488,3516,5458,3756,5459,5460,2276,2092, 460,5461, # 3664
4701,5462,3020, 962, 588,3629, 289,3250,2644,1116, 52,5463,3067,1797,5464,5465, # 3680
5466,1467,5467,1598,1143,3757,4342,1985,1734,1067,4702,1280,3402, 465,4703,1572, # 3696
510,5468,1928,2245,1813,1644,3630,5469,4704,3758,5470,5471,2673,1573,1534,5472, # 3712
5473, 536,1808,1761,3517,3894,3175,2645,5474,5475,5476,4705,3518,2929,1912,2809, # 3728
5477,3329,1122, 377,3251,5478, 360,5479,5480,4343,1529, 551,5481,2060,3759,1769, # 3744
2431,5482,2930,4344,3330,3120,2327,2109,2031,4706,1404, 136,1468,1479, 672,1171, # 3760
3252,2308, 271,3176,5483,2772,5484,2050, 678,2736, 865,1948,4707,5485,2014,4098, # 3776
2971,5486,2737,2227,1397,3068,3760,4708,4709,1735,2931,3403,3631,5487,3895, 509, # 3792
2854,2458,2890,3896,5488,5489,3177,3178,4710,4345,2538,4711,2309,1166,1010, 552, # 3808
681,1888,5490,5491,2972,2973,4099,1287,1596,1862,3179, 358, 453, 736, 175, 478, # 3824
1117, 905,1167,1097,5492,1854,1530,5493,1706,5494,2181,3519,2292,3761,3520,3632, # 3840
4346,2093,4347,5495,3404,1193,2489,4348,1458,2193,2208,1863,1889,1421,3331,2932, # 3856
3069,2182,3521, 595,2123,5496,4100,5497,5498,4349,1707,2646, 223,3762,1359, 751, # 3872
3121, 183,3522,5499,2810,3021, 419,2374, 633, 704,3897,2394, 241,5500,5501,5502, # 3888
838,3022,3763,2277,2773,2459,3898,1939,2051,4101,1309,3122,2246,1181,5503,1136, # 3904
2209,3899,2375,1446,4350,2310,4712,5504,5505,4351,1055,2615, 484,3764,5506,4102, # 3920
625,4352,2278,3405,1499,4353,4103,5507,4104,4354,3253,2279,2280,3523,5508,5509, # 3936
2774, 808,2616,3765,3406,4105,4355,3123,2539, 526,3407,3900,4356, 955,5510,1620, # 3952
4357,2647,2432,5511,1429,3766,1669,1832, 994, 928,5512,3633,1260,5513,5514,5515, # 3968
1949,2293, 741,2933,1626,4358,2738,2460, 867,1184, 362,3408,1392,5516,5517,4106, # 3984
4359,1770,1736,3254,2934,4713,4714,1929,2707,1459,1158,5518,3070,3409,2891,1292, # 4000
1930,2513,2855,3767,1986,1187,2072,2015,2617,4360,5519,2574,2514,2170,3768,2490, # 4016
3332,5520,3769,4715,5521,5522, 666,1003,3023,1022,3634,4361,5523,4716,1814,2257, # 4032
574,3901,1603, 295,1535, 705,3902,4362, 283, 858, 417,5524,5525,3255,4717,4718, # 4048
3071,1220,1890,1046,2281,2461,4107,1393,1599, 689,2575, 388,4363,5526,2491, 802, # 4064
5527,2811,3903,2061,1405,2258,5528,4719,3904,2110,1052,1345,3256,1585,5529, 809, # 4080
5530,5531,5532, 575,2739,3524, 956,1552,1469,1144,2328,5533,2329,1560,2462,3635, # 4096
3257,4108, 616,2210,4364,3180,2183,2294,5534,1833,5535,3525,4720,5536,1319,3770, # 4112
3771,1211,3636,1023,3258,1293,2812,5537,5538,5539,3905, 607,2311,3906, 762,2892, # 4128
1439,4365,1360,4721,1485,3072,5540,4722,1038,4366,1450,2062,2648,4367,1379,4723, # 4144
2593,5541,5542,4368,1352,1414,2330,2935,1172,5543,5544,3907,3908,4724,1798,1451, # 4160
5545,5546,5547,5548,2936,4109,4110,2492,2351, 411,4111,4112,3637,3333,3124,4725, # 4176
1561,2674,1452,4113,1375,5549,5550, 47,2974, 316,5551,1406,1591,2937,3181,5552, # 4192
1025,2142,3125,3182, 354,2740, 884,2228,4369,2412, 508,3772, 726,3638, 996,2433, # 4208
3639, 729,5553, 392,2194,1453,4114,4726,3773,5554,5555,2463,3640,2618,1675,2813, # 4224
919,2352,2975,2353,1270,4727,4115, 73,5556,5557, 647,5558,3259,2856,2259,1550, # 4240
1346,3024,5559,1332, 883,3526,5560,5561,5562,5563,3334,2775,5564,1212, 831,1347, # 4256
4370,4728,2331,3909,1864,3073, 720,3910,4729,4730,3911,5565,4371,5566,5567,4731, # 4272
5568,5569,1799,4732,3774,2619,4733,3641,1645,2376,4734,5570,2938, 669,2211,2675, # 4288
2434,5571,2893,5572,5573,1028,3260,5574,4372,2413,5575,2260,1353,5576,5577,4735, # 4304
3183, 518,5578,4116,5579,4373,1961,5580,2143,4374,5581,5582,3025,2354,2355,3912, # 4320
516,1834,1454,4117,2708,4375,4736,2229,2620,1972,1129,3642,5583,2776,5584,2976, # 4336
1422, 577,1470,3026,1524,3410,5585,5586, 432,4376,3074,3527,5587,2594,1455,2515, # 4352
2230,1973,1175,5588,1020,2741,4118,3528,4737,5589,2742,5590,1743,1361,3075,3529, # 4368
2649,4119,4377,4738,2295, 895, 924,4378,2171, 331,2247,3076, 166,1627,3077,1098, # 4384
5591,1232,2894,2231,3411,4739, 657, 403,1196,2377, 542,3775,3412,1600,4379,3530, # 4400
5592,4740,2777,3261, 576, 530,1362,4741,4742,2540,2676,3776,4120,5593, 842,3913, # 4416
5594,2814,2032,1014,4121, 213,2709,3413, 665, 621,4380,5595,3777,2939,2435,5596, # 4432
2436,3335,3643,3414,4743,4381,2541,4382,4744,3644,1682,4383,3531,1380,5597, 724, # 4448
2282, 600,1670,5598,1337,1233,4745,3126,2248,5599,1621,4746,5600, 651,4384,5601, # 4464
1612,4385,2621,5602,2857,5603,2743,2312,3078,5604, 716,2464,3079, 174,1255,2710, # 4480
4122,3645, 548,1320,1398, 728,4123,1574,5605,1891,1197,3080,4124,5606,3081,3082, # 4496
3778,3646,3779, 747,5607, 635,4386,4747,5608,5609,5610,4387,5611,5612,4748,5613, # 4512
3415,4749,2437, 451,5614,3780,2542,2073,4388,2744,4389,4125,5615,1764,4750,5616, # 4528
4390, 350,4751,2283,2395,2493,5617,4391,4126,2249,1434,4127, 488,4752, 458,4392, # 4544
4128,3781, 771,1330,2396,3914,2576,3184,2160,2414,1553,2677,3185,4393,5618,2494, # 4560
2895,2622,1720,2711,4394,3416,4753,5619,2543,4395,5620,3262,4396,2778,5621,2016, # 4576
2745,5622,1155,1017,3782,3915,5623,3336,2313, 201,1865,4397,1430,5624,4129,5625, # 4592
5626,5627,5628,5629,4398,1604,5630, 414,1866, 371,2595,4754,4755,3532,2017,3127, # 4608
4756,1708, 960,4399, 887, 389,2172,1536,1663,1721,5631,2232,4130,2356,2940,1580, # 4624
5632,5633,1744,4757,2544,4758,4759,5634,4760,5635,2074,5636,4761,3647,3417,2896, # 4640
4400,5637,4401,2650,3418,2815, 673,2712,2465, 709,3533,4131,3648,4402,5638,1148, # 4656
502, 634,5639,5640,1204,4762,3649,1575,4763,2623,3783,5641,3784,3128, 948,3263, # 4672
121,1745,3916,1110,5642,4403,3083,2516,3027,4132,3785,1151,1771,3917,1488,4133, # 4688
1987,5643,2438,3534,5644,5645,2094,5646,4404,3918,1213,1407,2816, 531,2746,2545, # 4704
3264,1011,1537,4764,2779,4405,3129,1061,5647,3786,3787,1867,2897,5648,2018, 120, # 4720
4406,4407,2063,3650,3265,2314,3919,2678,3419,1955,4765,4134,5649,3535,1047,2713, # 4736
1266,5650,1368,4766,2858, 649,3420,3920,2546,2747,1102,2859,2679,5651,5652,2000, # 4752
5653,1111,3651,2977,5654,2495,3921,3652,2817,1855,3421,3788,5655,5656,3422,2415, # 4768
2898,3337,3266,3653,5657,2577,5658,3654,2818,4135,1460, 856,5659,3655,5660,2899, # 4784
2978,5661,2900,3922,5662,4408, 632,2517, 875,3923,1697,3924,2296,5663,5664,4767, # 4800
3028,1239, 580,4768,4409,5665, 914, 936,2075,1190,4136,1039,2124,5666,5667,5668, # 4816
5669,3423,1473,5670,1354,4410,3925,4769,2173,3084,4137, 915,3338,4411,4412,3339, # 4832
1605,1835,5671,2748, 398,3656,4413,3926,4138, 328,1913,2860,4139,3927,1331,4414, # 4848
3029, 937,4415,5672,3657,4140,4141,3424,2161,4770,3425, 524, 742, 538,3085,1012, # 4864
5673,5674,3928,2466,5675, 658,1103, 225,3929,5676,5677,4771,5678,4772,5679,3267, # 4880
1243,5680,4142, 963,2250,4773,5681,2714,3658,3186,5682,5683,2596,2332,5684,4774, # 4896
5685,5686,5687,3536, 957,3426,2547,2033,1931,2941,2467, 870,2019,3659,1746,2780, # 4912
2781,2439,2468,5688,3930,5689,3789,3130,3790,3537,3427,3791,5690,1179,3086,5691, # 4928
3187,2378,4416,3792,2548,3188,3131,2749,4143,5692,3428,1556,2549,2297, 977,2901, # 4944
2034,4144,1205,3429,5693,1765,3430,3189,2125,1271, 714,1689,4775,3538,5694,2333, # 4960
3931, 533,4417,3660,2184, 617,5695,2469,3340,3539,2315,5696,5697,3190,5698,5699, # 4976
3932,1988, 618, 427,2651,3540,3431,5700,5701,1244,1690,5702,2819,4418,4776,5703, # 4992
3541,4777,5704,2284,1576, 473,3661,4419,3432, 972,5705,3662,5706,3087,5707,5708, # 5008
4778,4779,5709,3793,4145,4146,5710, 153,4780, 356,5711,1892,2902,4420,2144, 408, # 5024
803,2357,5712,3933,5713,4421,1646,2578,2518,4781,4782,3934,5714,3935,4422,5715, # 5040
2416,3433, 752,5716,5717,1962,3341,2979,5718, 746,3030,2470,4783,4423,3794, 698, # 5056
4784,1893,4424,3663,2550,4785,3664,3936,5719,3191,3434,5720,1824,1302,4147,2715, # 5072
3937,1974,4425,5721,4426,3192, 823,1303,1288,1236,2861,3542,4148,3435, 774,3938, # 5088
5722,1581,4786,1304,2862,3939,4787,5723,2440,2162,1083,3268,4427,4149,4428, 344, # 5104
1173, 288,2316, 454,1683,5724,5725,1461,4788,4150,2597,5726,5727,4789, 985, 894, # 5120
5728,3436,3193,5729,1914,2942,3795,1989,5730,2111,1975,5731,4151,5732,2579,1194, # 5136
425,5733,4790,3194,1245,3796,4429,5734,5735,2863,5736, 636,4791,1856,3940, 760, # 5152
1800,5737,4430,2212,1508,4792,4152,1894,1684,2298,5738,5739,4793,4431,4432,2213, # 5168
479,5740,5741, 832,5742,4153,2496,5743,2980,2497,3797, 990,3132, 627,1815,2652, # 5184
4433,1582,4434,2126,2112,3543,4794,5744, 799,4435,3195,5745,4795,2113,1737,3031, # 5200
1018, 543, 754,4436,3342,1676,4796,4797,4154,4798,1489,5746,3544,5747,2624,2903, # 5216
4155,5748,5749,2981,5750,5751,5752,5753,3196,4799,4800,2185,1722,5754,3269,3270, # 5232
1843,3665,1715, 481, 365,1976,1857,5755,5756,1963,2498,4801,5757,2127,3666,3271, # 5248
433,1895,2064,2076,5758, 602,2750,5759,5760,5761,5762,5763,3032,1628,3437,5764, # 5264
3197,4802,4156,2904,4803,2519,5765,2551,2782,5766,5767,5768,3343,4804,2905,5769, # 5280
4805,5770,2864,4806,4807,1221,2982,4157,2520,5771,5772,5773,1868,1990,5774,5775, # 5296
5776,1896,5777,5778,4808,1897,4158, 318,5779,2095,4159,4437,5780,5781, 485,5782, # 5312
938,3941, 553,2680, 116,5783,3942,3667,5784,3545,2681,2783,3438,3344,2820,5785, # 5328
3668,2943,4160,1747,2944,2983,5786,5787, 207,5788,4809,5789,4810,2521,5790,3033, # 5344
890,3669,3943,5791,1878,3798,3439,5792,2186,2358,3440,1652,5793,5794,5795, 941, # 5360
2299, 208,3546,4161,2020, 330,4438,3944,2906,2499,3799,4439,4811,5796,5797,5798, # 5376 #last 512
#Everything below is of no interest for detection purpose
2522,1613,4812,5799,3345,3945,2523,5800,4162,5801,1637,4163,2471,4813,3946,5802, # 5392
2500,3034,3800,5803,5804,2195,4814,5805,2163,5806,5807,5808,5809,5810,5811,5812, # 5408
5813,5814,5815,5816,5817,5818,5819,5820,5821,5822,5823,5824,5825,5826,5827,5828, # 5424
5829,5830,5831,5832,5833,5834,5835,5836,5837,5838,5839,5840,5841,5842,5843,5844, # 5440
5845,5846,5847,5848,5849,5850,5851,5852,5853,5854,5855,5856,5857,5858,5859,5860, # 5456
5861,5862,5863,5864,5865,5866,5867,5868,5869,5870,5871,5872,5873,5874,5875,5876, # 5472
5877,5878,5879,5880,5881,5882,5883,5884,5885,5886,5887,5888,5889,5890,5891,5892, # 5488
5893,5894,5895,5896,5897,5898,5899,5900,5901,5902,5903,5904,5905,5906,5907,5908, # 5504
5909,5910,5911,5912,5913,5914,5915,5916,5917,5918,5919,5920,5921,5922,5923,5924, # 5520
5925,5926,5927,5928,5929,5930,5931,5932,5933,5934,5935,5936,5937,5938,5939,5940, # 5536
5941,5942,5943,5944,5945,5946,5947,5948,5949,5950,5951,5952,5953,5954,5955,5956, # 5552
5957,5958,5959,5960,5961,5962,5963,5964,5965,5966,5967,5968,5969,5970,5971,5972, # 5568
5973,5974,5975,5976,5977,5978,5979,5980,5981,5982,5983,5984,5985,5986,5987,5988, # 5584
5989,5990,5991,5992,5993,5994,5995,5996,5997,5998,5999,6000,6001,6002,6003,6004, # 5600
6005,6006,6007,6008,6009,6010,6011,6012,6013,6014,6015,6016,6017,6018,6019,6020, # 5616
6021,6022,6023,6024,6025,6026,6027,6028,6029,6030,6031,6032,6033,6034,6035,6036, # 5632
6037,6038,6039,6040,6041,6042,6043,6044,6045,6046,6047,6048,6049,6050,6051,6052, # 5648
6053,6054,6055,6056,6057,6058,6059,6060,6061,6062,6063,6064,6065,6066,6067,6068, # 5664
6069,6070,6071,6072,6073,6074,6075,6076,6077,6078,6079,6080,6081,6082,6083,6084, # 5680
6085,6086,6087,6088,6089,6090,6091,6092,6093,6094,6095,6096,6097,6098,6099,6100, # 5696
6101,6102,6103,6104,6105,6106,6107,6108,6109,6110,6111,6112,6113,6114,6115,6116, # 5712
6117,6118,6119,6120,6121,6122,6123,6124,6125,6126,6127,6128,6129,6130,6131,6132, # 5728
6133,6134,6135,6136,6137,6138,6139,6140,6141,6142,6143,6144,6145,6146,6147,6148, # 5744
6149,6150,6151,6152,6153,6154,6155,6156,6157,6158,6159,6160,6161,6162,6163,6164, # 5760
6165,6166,6167,6168,6169,6170,6171,6172,6173,6174,6175,6176,6177,6178,6179,6180, # 5776
6181,6182,6183,6184,6185,6186,6187,6188,6189,6190,6191,6192,6193,6194,6195,6196, # 5792
6197,6198,6199,6200,6201,6202,6203,6204,6205,6206,6207,6208,6209,6210,6211,6212, # 5808
6213,6214,6215,6216,6217,6218,6219,6220,6221,6222,6223,3670,6224,6225,6226,6227, # 5824
6228,6229,6230,6231,6232,6233,6234,6235,6236,6237,6238,6239,6240,6241,6242,6243, # 5840
6244,6245,6246,6247,6248,6249,6250,6251,6252,6253,6254,6255,6256,6257,6258,6259, # 5856
6260,6261,6262,6263,6264,6265,6266,6267,6268,6269,6270,6271,6272,6273,6274,6275, # 5872
6276,6277,6278,6279,6280,6281,6282,6283,6284,6285,4815,6286,6287,6288,6289,6290, # 5888
6291,6292,4816,6293,6294,6295,6296,6297,6298,6299,6300,6301,6302,6303,6304,6305, # 5904
6306,6307,6308,6309,6310,6311,4817,4818,6312,6313,6314,6315,6316,6317,6318,4819, # 5920
6319,6320,6321,6322,6323,6324,6325,6326,6327,6328,6329,6330,6331,6332,6333,6334, # 5936
6335,6336,6337,4820,6338,6339,6340,6341,6342,6343,6344,6345,6346,6347,6348,6349, # 5952
6350,6351,6352,6353,6354,6355,6356,6357,6358,6359,6360,6361,6362,6363,6364,6365, # 5968
6366,6367,6368,6369,6370,6371,6372,6373,6374,6375,6376,6377,6378,6379,6380,6381, # 5984
6382,6383,6384,6385,6386,6387,6388,6389,6390,6391,6392,6393,6394,6395,6396,6397, # 6000
6398,6399,6400,6401,6402,6403,6404,6405,6406,6407,6408,6409,6410,3441,6411,6412, # 6016
6413,6414,6415,6416,6417,6418,6419,6420,6421,6422,6423,6424,6425,4440,6426,6427, # 6032
6428,6429,6430,6431,6432,6433,6434,6435,6436,6437,6438,6439,6440,6441,6442,6443, # 6048
6444,6445,6446,6447,6448,6449,6450,6451,6452,6453,6454,4821,6455,6456,6457,6458, # 6064
6459,6460,6461,6462,6463,6464,6465,6466,6467,6468,6469,6470,6471,6472,6473,6474, # 6080
6475,6476,6477,3947,3948,6478,6479,6480,6481,3272,4441,6482,6483,6484,6485,4442, # 6096
6486,6487,6488,6489,6490,6491,6492,6493,6494,6495,6496,4822,6497,6498,6499,6500, # 6112
6501,6502,6503,6504,6505,6506,6507,6508,6509,6510,6511,6512,6513,6514,6515,6516, # 6128
6517,6518,6519,6520,6521,6522,6523,6524,6525,6526,6527,6528,6529,6530,6531,6532, # 6144
6533,6534,6535,6536,6537,6538,6539,6540,6541,6542,6543,6544,6545,6546,6547,6548, # 6160
6549,6550,6551,6552,6553,6554,6555,6556,2784,6557,4823,6558,6559,6560,6561,6562, # 6176
6563,6564,6565,6566,6567,6568,6569,3949,6570,6571,6572,4824,6573,6574,6575,6576, # 6192
6577,6578,6579,6580,6581,6582,6583,4825,6584,6585,6586,3950,2785,6587,6588,6589, # 6208
6590,6591,6592,6593,6594,6595,6596,6597,6598,6599,6600,6601,6602,6603,6604,6605, # 6224
6606,6607,6608,6609,6610,6611,6612,4826,6613,6614,6615,4827,6616,6617,6618,6619, # 6240
6620,6621,6622,6623,6624,6625,4164,6626,6627,6628,6629,6630,6631,6632,6633,6634, # 6256
3547,6635,4828,6636,6637,6638,6639,6640,6641,6642,3951,2984,6643,6644,6645,6646, # 6272
6647,6648,6649,4165,6650,4829,6651,6652,4830,6653,6654,6655,6656,6657,6658,6659, # 6288
6660,6661,6662,4831,6663,6664,6665,6666,6667,6668,6669,6670,6671,4166,6672,4832, # 6304
3952,6673,6674,6675,6676,4833,6677,6678,6679,4167,6680,6681,6682,3198,6683,6684, # 6320
6685,6686,6687,6688,6689,6690,6691,6692,6693,6694,6695,6696,6697,4834,6698,6699, # 6336
6700,6701,6702,6703,6704,6705,6706,6707,6708,6709,6710,6711,6712,6713,6714,6715, # 6352
6716,6717,6718,6719,6720,6721,6722,6723,6724,6725,6726,6727,6728,6729,6730,6731, # 6368
6732,6733,6734,4443,6735,6736,6737,6738,6739,6740,6741,6742,6743,6744,6745,4444, # 6384
6746,6747,6748,6749,6750,6751,6752,6753,6754,6755,6756,6757,6758,6759,6760,6761, # 6400
6762,6763,6764,6765,6766,6767,6768,6769,6770,6771,6772,6773,6774,6775,6776,6777, # 6416
6778,6779,6780,6781,4168,6782,6783,3442,6784,6785,6786,6787,6788,6789,6790,6791, # 6432
4169,6792,6793,6794,6795,6796,6797,6798,6799,6800,6801,6802,6803,6804,6805,6806, # 6448
6807,6808,6809,6810,6811,4835,6812,6813,6814,4445,6815,6816,4446,6817,6818,6819, # 6464
6820,6821,6822,6823,6824,6825,6826,6827,6828,6829,6830,6831,6832,6833,6834,6835, # 6480
3548,6836,6837,6838,6839,6840,6841,6842,6843,6844,6845,6846,4836,6847,6848,6849, # 6496
6850,6851,6852,6853,6854,3953,6855,6856,6857,6858,6859,6860,6861,6862,6863,6864, # 6512
6865,6866,6867,6868,6869,6870,6871,6872,6873,6874,6875,6876,6877,3199,6878,6879, # 6528
6880,6881,6882,4447,6883,6884,6885,6886,6887,6888,6889,6890,6891,6892,6893,6894, # 6544
6895,6896,6897,6898,6899,6900,6901,6902,6903,6904,4170,6905,6906,6907,6908,6909, # 6560
6910,6911,6912,6913,6914,6915,6916,6917,6918,6919,6920,6921,6922,6923,6924,6925, # 6576
6926,6927,4837,6928,6929,6930,6931,6932,6933,6934,6935,6936,3346,6937,6938,4838, # 6592
6939,6940,6941,4448,6942,6943,6944,6945,6946,4449,6947,6948,6949,6950,6951,6952, # 6608
6953,6954,6955,6956,6957,6958,6959,6960,6961,6962,6963,6964,6965,6966,6967,6968, # 6624
6969,6970,6971,6972,6973,6974,6975,6976,6977,6978,6979,6980,6981,6982,6983,6984, # 6640
6985,6986,6987,6988,6989,6990,6991,6992,6993,6994,3671,6995,6996,6997,6998,4839, # 6656
6999,7000,7001,7002,3549,7003,7004,7005,7006,7007,7008,7009,7010,7011,7012,7013, # 6672
7014,7015,7016,7017,7018,7019,7020,7021,7022,7023,7024,7025,7026,7027,7028,7029, # 6688
7030,4840,7031,7032,7033,7034,7035,7036,7037,7038,4841,7039,7040,7041,7042,7043, # 6704
7044,7045,7046,7047,7048,7049,7050,7051,7052,7053,7054,7055,7056,7057,7058,7059, # 6720
7060,7061,7062,7063,7064,7065,7066,7067,7068,7069,7070,2985,7071,7072,7073,7074, # 6736
7075,7076,7077,7078,7079,7080,4842,7081,7082,7083,7084,7085,7086,7087,7088,7089, # 6752
7090,7091,7092,7093,7094,7095,7096,7097,7098,7099,7100,7101,7102,7103,7104,7105, # 6768
7106,7107,7108,7109,7110,7111,7112,7113,7114,7115,7116,7117,7118,4450,7119,7120, # 6784
7121,7122,7123,7124,7125,7126,7127,7128,7129,7130,7131,7132,7133,7134,7135,7136, # 6800
7137,7138,7139,7140,7141,7142,7143,4843,7144,7145,7146,7147,7148,7149,7150,7151, # 6816
7152,7153,7154,7155,7156,7157,7158,7159,7160,7161,7162,7163,7164,7165,7166,7167, # 6832
7168,7169,7170,7171,7172,7173,7174,7175,7176,7177,7178,7179,7180,7181,7182,7183, # 6848
7184,7185,7186,7187,7188,4171,4172,7189,7190,7191,7192,7193,7194,7195,7196,7197, # 6864
7198,7199,7200,7201,7202,7203,7204,7205,7206,7207,7208,7209,7210,7211,7212,7213, # 6880
7214,7215,7216,7217,7218,7219,7220,7221,7222,7223,7224,7225,7226,7227,7228,7229, # 6896
7230,7231,7232,7233,7234,7235,7236,7237,7238,7239,7240,7241,7242,7243,7244,7245, # 6912
7246,7247,7248,7249,7250,7251,7252,7253,7254,7255,7256,7257,7258,7259,7260,7261, # 6928
7262,7263,7264,7265,7266,7267,7268,7269,7270,7271,7272,7273,7274,7275,7276,7277, # 6944
7278,7279,7280,7281,7282,7283,7284,7285,7286,7287,7288,7289,7290,7291,7292,7293, # 6960
7294,7295,7296,4844,7297,7298,7299,7300,7301,7302,7303,7304,7305,7306,7307,7308, # 6976
7309,7310,7311,7312,7313,7314,7315,7316,4451,7317,7318,7319,7320,7321,7322,7323, # 6992
7324,7325,7326,7327,7328,7329,7330,7331,7332,7333,7334,7335,7336,7337,7338,7339, # 7008
7340,7341,7342,7343,7344,7345,7346,7347,7348,7349,7350,7351,7352,7353,4173,7354, # 7024
7355,4845,7356,7357,7358,7359,7360,7361,7362,7363,7364,7365,7366,7367,7368,7369, # 7040
7370,7371,7372,7373,7374,7375,7376,7377,7378,7379,7380,7381,7382,7383,7384,7385, # 7056
7386,7387,7388,4846,7389,7390,7391,7392,7393,7394,7395,7396,7397,7398,7399,7400, # 7072
7401,7402,7403,7404,7405,3672,7406,7407,7408,7409,7410,7411,7412,7413,7414,7415, # 7088
7416,7417,7418,7419,7420,7421,7422,7423,7424,7425,7426,7427,7428,7429,7430,7431, # 7104
7432,7433,7434,7435,7436,7437,7438,7439,7440,7441,7442,7443,7444,7445,7446,7447, # 7120
7448,7449,7450,7451,7452,7453,4452,7454,3200,7455,7456,7457,7458,7459,7460,7461, # 7136
7462,7463,7464,7465,7466,7467,7468,7469,7470,7471,7472,7473,7474,4847,7475,7476, # 7152
7477,3133,7478,7479,7480,7481,7482,7483,7484,7485,7486,7487,7488,7489,7490,7491, # 7168
7492,7493,7494,7495,7496,7497,7498,7499,7500,7501,7502,3347,7503,7504,7505,7506, # 7184
7507,7508,7509,7510,7511,7512,7513,7514,7515,7516,7517,7518,7519,7520,7521,4848, # 7200
7522,7523,7524,7525,7526,7527,7528,7529,7530,7531,7532,7533,7534,7535,7536,7537, # 7216
7538,7539,7540,7541,7542,7543,7544,7545,7546,7547,7548,7549,3801,4849,7550,7551, # 7232
7552,7553,7554,7555,7556,7557,7558,7559,7560,7561,7562,7563,7564,7565,7566,7567, # 7248
7568,7569,3035,7570,7571,7572,7573,7574,7575,7576,7577,7578,7579,7580,7581,7582, # 7264
7583,7584,7585,7586,7587,7588,7589,7590,7591,7592,7593,7594,7595,7596,7597,7598, # 7280
7599,7600,7601,7602,7603,7604,7605,7606,7607,7608,7609,7610,7611,7612,7613,7614, # 7296
7615,7616,4850,7617,7618,3802,7619,7620,7621,7622,7623,7624,7625,7626,7627,7628, # 7312
7629,7630,7631,7632,4851,7633,7634,7635,7636,7637,7638,7639,7640,7641,7642,7643, # 7328
7644,7645,7646,7647,7648,7649,7650,7651,7652,7653,7654,7655,7656,7657,7658,7659, # 7344
7660,7661,7662,7663,7664,7665,7666,7667,7668,7669,7670,4453,7671,7672,7673,7674, # 7360
7675,7676,7677,7678,7679,7680,7681,7682,7683,7684,7685,7686,7687,7688,7689,7690, # 7376
7691,7692,7693,7694,7695,7696,7697,3443,7698,7699,7700,7701,7702,4454,7703,7704, # 7392
7705,7706,7707,7708,7709,7710,7711,7712,7713,2472,7714,7715,7716,7717,7718,7719, # 7408
7720,7721,7722,7723,7724,7725,7726,7727,7728,7729,7730,7731,3954,7732,7733,7734, # 7424
7735,7736,7737,7738,7739,7740,7741,7742,7743,7744,7745,7746,7747,7748,7749,7750, # 7440
3134,7751,7752,4852,7753,7754,7755,4853,7756,7757,7758,7759,7760,4174,7761,7762, # 7456
7763,7764,7765,7766,7767,7768,7769,7770,7771,7772,7773,7774,7775,7776,7777,7778, # 7472
7779,7780,7781,7782,7783,7784,7785,7786,7787,7788,7789,7790,7791,7792,7793,7794, # 7488
7795,7796,7797,7798,7799,7800,7801,7802,7803,7804,7805,4854,7806,7807,7808,7809, # 7504
7810,7811,7812,7813,7814,7815,7816,7817,7818,7819,7820,7821,7822,7823,7824,7825, # 7520
4855,7826,7827,7828,7829,7830,7831,7832,7833,7834,7835,7836,7837,7838,7839,7840, # 7536
7841,7842,7843,7844,7845,7846,7847,3955,7848,7849,7850,7851,7852,7853,7854,7855, # 7552
7856,7857,7858,7859,7860,3444,7861,7862,7863,7864,7865,7866,7867,7868,7869,7870, # 7568
7871,7872,7873,7874,7875,7876,7877,7878,7879,7880,7881,7882,7883,7884,7885,7886, # 7584
7887,7888,7889,7890,7891,4175,7892,7893,7894,7895,7896,4856,4857,7897,7898,7899, # 7600
7900,2598,7901,7902,7903,7904,7905,7906,7907,7908,4455,7909,7910,7911,7912,7913, # 7616
7914,3201,7915,7916,7917,7918,7919,7920,7921,4858,7922,7923,7924,7925,7926,7927, # 7632
7928,7929,7930,7931,7932,7933,7934,7935,7936,7937,7938,7939,7940,7941,7942,7943, # 7648
7944,7945,7946,7947,7948,7949,7950,7951,7952,7953,7954,7955,7956,7957,7958,7959, # 7664
7960,7961,7962,7963,7964,7965,7966,7967,7968,7969,7970,7971,7972,7973,7974,7975, # 7680
7976,7977,7978,7979,7980,7981,4859,7982,7983,7984,7985,7986,7987,7988,7989,7990, # 7696
7991,7992,7993,7994,7995,7996,4860,7997,7998,7999,8000,8001,8002,8003,8004,8005, # 7712
8006,8007,8008,8009,8010,8011,8012,8013,8014,8015,8016,4176,8017,8018,8019,8020, # 7728
8021,8022,8023,4861,8024,8025,8026,8027,8028,8029,8030,8031,8032,8033,8034,8035, # 7744
8036,4862,4456,8037,8038,8039,8040,4863,8041,8042,8043,8044,8045,8046,8047,8048, # 7760
8049,8050,8051,8052,8053,8054,8055,8056,8057,8058,8059,8060,8061,8062,8063,8064, # 7776
8065,8066,8067,8068,8069,8070,8071,8072,8073,8074,8075,8076,8077,8078,8079,8080, # 7792
8081,8082,8083,8084,8085,8086,8087,8088,8089,8090,8091,8092,8093,8094,8095,8096, # 7808
8097,8098,8099,4864,4177,8100,8101,8102,8103,8104,8105,8106,8107,8108,8109,8110, # 7824
8111,8112,8113,8114,8115,8116,8117,8118,8119,8120,4178,8121,8122,8123,8124,8125, # 7840
8126,8127,8128,8129,8130,8131,8132,8133,8134,8135,8136,8137,8138,8139,8140,8141, # 7856
8142,8143,8144,8145,4865,4866,8146,8147,8148,8149,8150,8151,8152,8153,8154,8155, # 7872
8156,8157,8158,8159,8160,8161,8162,8163,8164,8165,4179,8166,8167,8168,8169,8170, # 7888
8171,8172,8173,8174,8175,8176,8177,8178,8179,8180,8181,4457,8182,8183,8184,8185, # 7904
8186,8187,8188,8189,8190,8191,8192,8193,8194,8195,8196,8197,8198,8199,8200,8201, # 7920
8202,8203,8204,8205,8206,8207,8208,8209,8210,8211,8212,8213,8214,8215,8216,8217, # 7936
8218,8219,8220,8221,8222,8223,8224,8225,8226,8227,8228,8229,8230,8231,8232,8233, # 7952
8234,8235,8236,8237,8238,8239,8240,8241,8242,8243,8244,8245,8246,8247,8248,8249, # 7968
8250,8251,8252,8253,8254,8255,8256,3445,8257,8258,8259,8260,8261,8262,4458,8263, # 7984
8264,8265,8266,8267,8268,8269,8270,8271,8272,4459,8273,8274,8275,8276,3550,8277, # 8000
8278,8279,8280,8281,8282,8283,8284,8285,8286,8287,8288,8289,4460,8290,8291,8292, # 8016
8293,8294,8295,8296,8297,8298,8299,8300,8301,8302,8303,8304,8305,8306,8307,4867, # 8032
8308,8309,8310,8311,8312,3551,8313,8314,8315,8316,8317,8318,8319,8320,8321,8322, # 8048
8323,8324,8325,8326,4868,8327,8328,8329,8330,8331,8332,8333,8334,8335,8336,8337, # 8064
8338,8339,8340,8341,8342,8343,8344,8345,8346,8347,8348,8349,8350,8351,8352,8353, # 8080
8354,8355,8356,8357,8358,8359,8360,8361,8362,8363,4869,4461,8364,8365,8366,8367, # 8096
8368,8369,8370,4870,8371,8372,8373,8374,8375,8376,8377,8378,8379,8380,8381,8382, # 8112
8383,8384,8385,8386,8387,8388,8389,8390,8391,8392,8393,8394,8395,8396,8397,8398, # 8128
8399,8400,8401,8402,8403,8404,8405,8406,8407,8408,8409,8410,4871,8411,8412,8413, # 8144
8414,8415,8416,8417,8418,8419,8420,8421,8422,4462,8423,8424,8425,8426,8427,8428, # 8160
8429,8430,8431,8432,8433,2986,8434,8435,8436,8437,8438,8439,8440,8441,8442,8443, # 8176
8444,8445,8446,8447,8448,8449,8450,8451,8452,8453,8454,8455,8456,8457,8458,8459, # 8192
8460,8461,8462,8463,8464,8465,8466,8467,8468,8469,8470,8471,8472,8473,8474,8475, # 8208
8476,8477,8478,4180,8479,8480,8481,8482,8483,8484,8485,8486,8487,8488,8489,8490, # 8224
8491,8492,8493,8494,8495,8496,8497,8498,8499,8500,8501,8502,8503,8504,8505,8506, # 8240
8507,8508,8509,8510,8511,8512,8513,8514,8515,8516,8517,8518,8519,8520,8521,8522, # 8256
8523,8524,8525,8526,8527,8528,8529,8530,8531,8532,8533,8534,8535,8536,8537,8538, # 8272
8539,8540,8541,8542,8543,8544,8545,8546,8547,8548,8549,8550,8551,8552,8553,8554, # 8288
8555,8556,8557,8558,8559,8560,8561,8562,8563,8564,4872,8565,8566,8567,8568,8569, # 8304
8570,8571,8572,8573,4873,8574,8575,8576,8577,8578,8579,8580,8581,8582,8583,8584, # 8320
8585,8586,8587,8588,8589,8590,8591,8592,8593,8594,8595,8596,8597,8598,8599,8600, # 8336
8601,8602,8603,8604,8605,3803,8606,8607,8608,8609,8610,8611,8612,8613,4874,3804, # 8352
8614,8615,8616,8617,8618,8619,8620,8621,3956,8622,8623,8624,8625,8626,8627,8628, # 8368
8629,8630,8631,8632,8633,8634,8635,8636,8637,8638,2865,8639,8640,8641,8642,8643, # 8384
8644,8645,8646,8647,8648,8649,8650,8651,8652,8653,8654,8655,8656,4463,8657,8658, # 8400
8659,4875,4876,8660,8661,8662,8663,8664,8665,8666,8667,8668,8669,8670,8671,8672, # 8416
8673,8674,8675,8676,8677,8678,8679,8680,8681,4464,8682,8683,8684,8685,8686,8687, # 8432
8688,8689,8690,8691,8692,8693,8694,8695,8696,8697,8698,8699,8700,8701,8702,8703, # 8448
8704,8705,8706,8707,8708,8709,2261,8710,8711,8712,8713,8714,8715,8716,8717,8718, # 8464
8719,8720,8721,8722,8723,8724,8725,8726,8727,8728,8729,8730,8731,8732,8733,4181, # 8480
8734,8735,8736,8737,8738,8739,8740,8741,8742,8743,8744,8745,8746,8747,8748,8749, # 8496
8750,8751,8752,8753,8754,8755,8756,8757,8758,8759,8760,8761,8762,8763,4877,8764, # 8512
8765,8766,8767,8768,8769,8770,8771,8772,8773,8774,8775,8776,8777,8778,8779,8780, # 8528
8781,8782,8783,8784,8785,8786,8787,8788,4878,8789,4879,8790,8791,8792,4880,8793, # 8544
8794,8795,8796,8797,8798,8799,8800,8801,4881,8802,8803,8804,8805,8806,8807,8808, # 8560
8809,8810,8811,8812,8813,8814,8815,3957,8816,8817,8818,8819,8820,8821,8822,8823, # 8576
8824,8825,8826,8827,8828,8829,8830,8831,8832,8833,8834,8835,8836,8837,8838,8839, # 8592
8840,8841,8842,8843,8844,8845,8846,8847,4882,8848,8849,8850,8851,8852,8853,8854, # 8608
8855,8856,8857,8858,8859,8860,8861,8862,8863,8864,8865,8866,8867,8868,8869,8870, # 8624
8871,8872,8873,8874,8875,8876,8877,8878,8879,8880,8881,8882,8883,8884,3202,8885, # 8640
8886,8887,8888,8889,8890,8891,8892,8893,8894,8895,8896,8897,8898,8899,8900,8901, # 8656
8902,8903,8904,8905,8906,8907,8908,8909,8910,8911,8912,8913,8914,8915,8916,8917, # 8672
8918,8919,8920,8921,8922,8923,8924,4465,8925,8926,8927,8928,8929,8930,8931,8932, # 8688
4883,8933,8934,8935,8936,8937,8938,8939,8940,8941,8942,8943,2214,8944,8945,8946, # 8704
8947,8948,8949,8950,8951,8952,8953,8954,8955,8956,8957,8958,8959,8960,8961,8962, # 8720
8963,8964,8965,4884,8966,8967,8968,8969,8970,8971,8972,8973,8974,8975,8976,8977, # 8736
8978,8979,8980,8981,8982,8983,8984,8985,8986,8987,8988,8989,8990,8991,8992,4885, # 8752
8993,8994,8995,8996,8997,8998,8999,9000,9001,9002,9003,9004,9005,9006,9007,9008, # 8768
9009,9010,9011,9012,9013,9014,9015,9016,9017,9018,9019,9020,9021,4182,9022,9023, # 8784
9024,9025,9026,9027,9028,9029,9030,9031,9032,9033,9034,9035,9036,9037,9038,9039, # 8800
9040,9041,9042,9043,9044,9045,9046,9047,9048,9049,9050,9051,9052,9053,9054,9055, # 8816
9056,9057,9058,9059,9060,9061,9062,9063,4886,9064,9065,9066,9067,9068,9069,4887, # 8832
9070,9071,9072,9073,9074,9075,9076,9077,9078,9079,9080,9081,9082,9083,9084,9085, # 8848
9086,9087,9088,9089,9090,9091,9092,9093,9094,9095,9096,9097,9098,9099,9100,9101, # 8864
9102,9103,9104,9105,9106,9107,9108,9109,9110,9111,9112,9113,9114,9115,9116,9117, # 8880
9118,9119,9120,9121,9122,9123,9124,9125,9126,9127,9128,9129,9130,9131,9132,9133, # 8896
9134,9135,9136,9137,9138,9139,9140,9141,3958,9142,9143,9144,9145,9146,9147,9148, # 8912
9149,9150,9151,4888,9152,9153,9154,9155,9156,9157,9158,9159,9160,9161,9162,9163, # 8928
9164,9165,9166,9167,9168,9169,9170,9171,9172,9173,9174,9175,4889,9176,9177,9178, # 8944
9179,9180,9181,9182,9183,9184,9185,9186,9187,9188,9189,9190,9191,9192,9193,9194, # 8960
9195,9196,9197,9198,9199,9200,9201,9202,9203,4890,9204,9205,9206,9207,9208,9209, # 8976
9210,9211,9212,9213,9214,9215,9216,9217,9218,9219,9220,9221,9222,4466,9223,9224, # 8992
9225,9226,9227,9228,9229,9230,9231,9232,9233,9234,9235,9236,9237,9238,9239,9240, # 9008
9241,9242,9243,9244,9245,4891,9246,9247,9248,9249,9250,9251,9252,9253,9254,9255, # 9024
9256,9257,4892,9258,9259,9260,9261,4893,4894,9262,9263,9264,9265,9266,9267,9268, # 9040
9269,9270,9271,9272,9273,4467,9274,9275,9276,9277,9278,9279,9280,9281,9282,9283, # 9056
9284,9285,3673,9286,9287,9288,9289,9290,9291,9292,9293,9294,9295,9296,9297,9298, # 9072
9299,9300,9301,9302,9303,9304,9305,9306,9307,9308,9309,9310,9311,9312,9313,9314, # 9088
9315,9316,9317,9318,9319,9320,9321,9322,4895,9323,9324,9325,9326,9327,9328,9329, # 9104
9330,9331,9332,9333,9334,9335,9336,9337,9338,9339,9340,9341,9342,9343,9344,9345, # 9120
9346,9347,4468,9348,9349,9350,9351,9352,9353,9354,9355,9356,9357,9358,9359,9360, # 9136
9361,9362,9363,9364,9365,9366,9367,9368,9369,9370,9371,9372,9373,4896,9374,4469, # 9152
9375,9376,9377,9378,9379,4897,9380,9381,9382,9383,9384,9385,9386,9387,9388,9389, # 9168
9390,9391,9392,9393,9394,9395,9396,9397,9398,9399,9400,9401,9402,9403,9404,9405, # 9184
9406,4470,9407,2751,9408,9409,3674,3552,9410,9411,9412,9413,9414,9415,9416,9417, # 9200
9418,9419,9420,9421,4898,9422,9423,9424,9425,9426,9427,9428,9429,3959,9430,9431, # 9216
9432,9433,9434,9435,9436,4471,9437,9438,9439,9440,9441,9442,9443,9444,9445,9446, # 9232
9447,9448,9449,9450,3348,9451,9452,9453,9454,9455,9456,9457,9458,9459,9460,9461, # 9248
9462,9463,9464,9465,9466,9467,9468,9469,9470,9471,9472,4899,9473,9474,9475,9476, # 9264
9477,4900,9478,9479,9480,9481,9482,9483,9484,9485,9486,9487,9488,3349,9489,9490, # 9280
9491,9492,9493,9494,9495,9496,9497,9498,9499,9500,9501,9502,9503,9504,9505,9506, # 9296
9507,9508,9509,9510,9511,9512,9513,9514,9515,9516,9517,9518,9519,9520,4901,9521, # 9312
9522,9523,9524,9525,9526,4902,9527,9528,9529,9530,9531,9532,9533,9534,9535,9536, # 9328
9537,9538,9539,9540,9541,9542,9543,9544,9545,9546,9547,9548,9549,9550,9551,9552, # 9344
9553,9554,9555,9556,9557,9558,9559,9560,9561,9562,9563,9564,9565,9566,9567,9568, # 9360
9569,9570,9571,9572,9573,9574,9575,9576,9577,9578,9579,9580,9581,9582,9583,9584, # 9376
3805,9585,9586,9587,9588,9589,9590,9591,9592,9593,9594,9595,9596,9597,9598,9599, # 9392
9600,9601,9602,4903,9603,9604,9605,9606,9607,4904,9608,9609,9610,9611,9612,9613, # 9408
9614,4905,9615,9616,9617,9618,9619,9620,9621,9622,9623,9624,9625,9626,9627,9628, # 9424
9629,9630,9631,9632,4906,9633,9634,9635,9636,9637,9638,9639,9640,9641,9642,9643, # 9440
4907,9644,9645,9646,9647,9648,9649,9650,9651,9652,9653,9654,9655,9656,9657,9658, # 9456
9659,9660,9661,9662,9663,9664,9665,9666,9667,9668,9669,9670,9671,9672,4183,9673, # 9472
9674,9675,9676,9677,4908,9678,9679,9680,9681,4909,9682,9683,9684,9685,9686,9687, # 9488
9688,9689,9690,4910,9691,9692,9693,3675,9694,9695,9696,2945,9697,9698,9699,9700, # 9504
9701,9702,9703,9704,9705,4911,9706,9707,9708,9709,9710,9711,9712,9713,9714,9715, # 9520
9716,9717,9718,9719,9720,9721,9722,9723,9724,9725,9726,9727,9728,9729,9730,9731, # 9536
9732,9733,9734,9735,4912,9736,9737,9738,9739,9740,4913,9741,9742,9743,9744,9745, # 9552
9746,9747,9748,9749,9750,9751,9752,9753,9754,9755,9756,9757,9758,4914,9759,9760, # 9568
9761,9762,9763,9764,9765,9766,9767,9768,9769,9770,9771,9772,9773,9774,9775,9776, # 9584
9777,9778,9779,9780,9781,9782,4915,9783,9784,9785,9786,9787,9788,9789,9790,9791, # 9600
9792,9793,4916,9794,9795,9796,9797,9798,9799,9800,9801,9802,9803,9804,9805,9806, # 9616
9807,9808,9809,9810,9811,9812,9813,9814,9815,9816,9817,9818,9819,9820,9821,9822, # 9632
9823,9824,9825,9826,9827,9828,9829,9830,9831,9832,9833,9834,9835,9836,9837,9838, # 9648
9839,9840,9841,9842,9843,9844,9845,9846,9847,9848,9849,9850,9851,9852,9853,9854, # 9664
9855,9856,9857,9858,9859,9860,9861,9862,9863,9864,9865,9866,9867,9868,4917,9869, # 9680
9870,9871,9872,9873,9874,9875,9876,9877,9878,9879,9880,9881,9882,9883,9884,9885, # 9696
9886,9887,9888,9889,9890,9891,9892,4472,9893,9894,9895,9896,9897,3806,9898,9899, # 9712
9900,9901,9902,9903,9904,9905,9906,9907,9908,9909,9910,9911,9912,9913,9914,4918, # 9728
9915,9916,9917,4919,9918,9919,9920,9921,4184,9922,9923,9924,9925,9926,9927,9928, # 9744
9929,9930,9931,9932,9933,9934,9935,9936,9937,9938,9939,9940,9941,9942,9943,9944, # 9760
9945,9946,4920,9947,9948,9949,9950,9951,9952,9953,9954,9955,4185,9956,9957,9958, # 9776
9959,9960,9961,9962,9963,9964,9965,4921,9966,9967,9968,4473,9969,9970,9971,9972, # 9792
9973,9974,9975,9976,9977,4474,9978,9979,9980,9981,9982,9983,9984,9985,9986,9987, # 9808
9988,9989,9990,9991,9992,9993,9994,9995,9996,9997,9998,9999,10000,10001,10002,10003, # 9824
10004,10005,10006,10007,10008,10009,10010,10011,10012,10013,10014,10015,10016,10017,10018,10019, # 9840
10020,10021,4922,10022,4923,10023,10024,10025,10026,10027,10028,10029,10030,10031,10032,10033, # 9856
10034,10035,10036,10037,10038,10039,10040,10041,10042,10043,10044,10045,10046,10047,10048,4924, # 9872
10049,10050,10051,10052,10053,10054,10055,10056,10057,10058,10059,10060,10061,10062,10063,10064, # 9888
10065,10066,10067,10068,10069,10070,10071,10072,10073,10074,10075,10076,10077,10078,10079,10080, # 9904
10081,10082,10083,10084,10085,10086,10087,4475,10088,10089,10090,10091,10092,10093,10094,10095, # 9920
10096,10097,4476,10098,10099,10100,10101,10102,10103,10104,10105,10106,10107,10108,10109,10110, # 9936
10111,2174,10112,10113,10114,10115,10116,10117,10118,10119,10120,10121,10122,10123,10124,10125, # 9952
10126,10127,10128,10129,10130,10131,10132,10133,10134,10135,10136,10137,10138,10139,10140,3807, # 9968
4186,4925,10141,10142,10143,10144,10145,10146,10147,4477,4187,10148,10149,10150,10151,10152, # 9984
10153,4188,10154,10155,10156,10157,10158,10159,10160,10161,4926,10162,10163,10164,10165,10166, #10000
10167,10168,10169,10170,10171,10172,10173,10174,10175,10176,10177,10178,10179,10180,10181,10182, #10016
10183,10184,10185,10186,10187,10188,10189,10190,10191,10192,3203,10193,10194,10195,10196,10197, #10032
10198,10199,10200,4478,10201,10202,10203,10204,4479,10205,10206,10207,10208,10209,10210,10211, #10048
10212,10213,10214,10215,10216,10217,10218,10219,10220,10221,10222,10223,10224,10225,10226,10227, #10064
10228,10229,10230,10231,10232,10233,10234,4927,10235,10236,10237,10238,10239,10240,10241,10242, #10080
10243,10244,10245,10246,10247,10248,10249,10250,10251,10252,10253,10254,10255,10256,10257,10258, #10096
10259,10260,10261,10262,10263,10264,10265,10266,10267,10268,10269,10270,10271,10272,10273,4480, #10112
4928,4929,10274,10275,10276,10277,10278,10279,10280,10281,10282,10283,10284,10285,10286,10287, #10128
10288,10289,10290,10291,10292,10293,10294,10295,10296,10297,10298,10299,10300,10301,10302,10303, #10144
10304,10305,10306,10307,10308,10309,10310,10311,10312,10313,10314,10315,10316,10317,10318,10319, #10160
10320,10321,10322,10323,10324,10325,10326,10327,10328,10329,10330,10331,10332,10333,10334,4930, #10176
10335,10336,10337,10338,10339,10340,10341,10342,4931,10343,10344,10345,10346,10347,10348,10349, #10192
10350,10351,10352,10353,10354,10355,3088,10356,2786,10357,10358,10359,10360,4189,10361,10362, #10208
10363,10364,10365,10366,10367,10368,10369,10370,10371,10372,10373,10374,10375,4932,10376,10377, #10224
10378,10379,10380,10381,10382,10383,10384,10385,10386,10387,10388,10389,10390,10391,10392,4933, #10240
10393,10394,10395,4934,10396,10397,10398,10399,10400,10401,10402,10403,10404,10405,10406,10407, #10256
10408,10409,10410,10411,10412,3446,10413,10414,10415,10416,10417,10418,10419,10420,10421,10422, #10272
10423,4935,10424,10425,10426,10427,10428,10429,10430,4936,10431,10432,10433,10434,10435,10436, #10288
10437,10438,10439,10440,10441,10442,10443,4937,10444,10445,10446,10447,4481,10448,10449,10450, #10304
10451,10452,10453,10454,10455,10456,10457,10458,10459,10460,10461,10462,10463,10464,10465,10466, #10320
10467,10468,10469,10470,10471,10472,10473,10474,10475,10476,10477,10478,10479,10480,10481,10482, #10336
10483,10484,10485,10486,10487,10488,10489,10490,10491,10492,10493,10494,10495,10496,10497,10498, #10352
10499,10500,10501,10502,10503,10504,10505,4938,10506,10507,10508,10509,10510,2552,10511,10512, #10368
10513,10514,10515,10516,3447,10517,10518,10519,10520,10521,10522,10523,10524,10525,10526,10527, #10384
10528,10529,10530,10531,10532,10533,10534,10535,10536,10537,10538,10539,10540,10541,10542,10543, #10400
4482,10544,4939,10545,10546,10547,10548,10549,10550,10551,10552,10553,10554,10555,10556,10557, #10416
10558,10559,10560,10561,10562,10563,10564,10565,10566,10567,3676,4483,10568,10569,10570,10571, #10432
10572,3448,10573,10574,10575,10576,10577,10578,10579,10580,10581,10582,10583,10584,10585,10586, #10448
10587,10588,10589,10590,10591,10592,10593,10594,10595,10596,10597,10598,10599,10600,10601,10602, #10464
10603,10604,10605,10606,10607,10608,10609,10610,10611,10612,10613,10614,10615,10616,10617,10618, #10480
10619,10620,10621,10622,10623,10624,10625,10626,10627,4484,10628,10629,10630,10631,10632,4940, #10496
10633,10634,10635,10636,10637,10638,10639,10640,10641,10642,10643,10644,10645,10646,10647,10648, #10512
10649,10650,10651,10652,10653,10654,10655,10656,4941,10657,10658,10659,2599,10660,10661,10662, #10528
10663,10664,10665,10666,3089,10667,10668,10669,10670,10671,10672,10673,10674,10675,10676,10677, #10544
10678,10679,10680,4942,10681,10682,10683,10684,10685,10686,10687,10688,10689,10690,10691,10692, #10560
10693,10694,10695,10696,10697,4485,10698,10699,10700,10701,10702,10703,10704,4943,10705,3677, #10576
10706,10707,10708,10709,10710,10711,10712,4944,10713,10714,10715,10716,10717,10718,10719,10720, #10592
10721,10722,10723,10724,10725,10726,10727,10728,4945,10729,10730,10731,10732,10733,10734,10735, #10608
10736,10737,10738,10739,10740,10741,10742,10743,10744,10745,10746,10747,10748,10749,10750,10751, #10624
10752,10753,10754,10755,10756,10757,10758,10759,10760,10761,4946,10762,10763,10764,10765,10766, #10640
10767,4947,4948,10768,10769,10770,10771,10772,10773,10774,10775,10776,10777,10778,10779,10780, #10656
10781,10782,10783,10784,10785,10786,10787,10788,10789,10790,10791,10792,10793,10794,10795,10796, #10672
10797,10798,10799,10800,10801,10802,10803,10804,10805,10806,10807,10808,10809,10810,10811,10812, #10688
10813,10814,10815,10816,10817,10818,10819,10820,10821,10822,10823,10824,10825,10826,10827,10828, #10704
10829,10830,10831,10832,10833,10834,10835,10836,10837,10838,10839,10840,10841,10842,10843,10844, #10720
10845,10846,10847,10848,10849,10850,10851,10852,10853,10854,10855,10856,10857,10858,10859,10860, #10736
10861,10862,10863,10864,10865,10866,10867,10868,10869,10870,10871,10872,10873,10874,10875,10876, #10752
10877,10878,4486,10879,10880,10881,10882,10883,10884,10885,4949,10886,10887,10888,10889,10890, #10768
10891,10892,10893,10894,10895,10896,10897,10898,10899,10900,10901,10902,10903,10904,10905,10906, #10784
10907,10908,10909,10910,10911,10912,10913,10914,10915,10916,10917,10918,10919,4487,10920,10921, #10800
10922,10923,10924,10925,10926,10927,10928,10929,10930,10931,10932,4950,10933,10934,10935,10936, #10816
10937,10938,10939,10940,10941,10942,10943,10944,10945,10946,10947,10948,10949,4488,10950,10951, #10832
10952,10953,10954,10955,10956,10957,10958,10959,4190,10960,10961,10962,10963,10964,10965,10966, #10848
10967,10968,10969,10970,10971,10972,10973,10974,10975,10976,10977,10978,10979,10980,10981,10982, #10864
10983,10984,10985,10986,10987,10988,10989,10990,10991,10992,10993,10994,10995,10996,10997,10998, #10880
10999,11000,11001,11002,11003,11004,11005,11006,3960,11007,11008,11009,11010,11011,11012,11013, #10896
11014,11015,11016,11017,11018,11019,11020,11021,11022,11023,11024,11025,11026,11027,11028,11029, #10912
11030,11031,11032,4951,11033,11034,11035,11036,11037,11038,11039,11040,11041,11042,11043,11044, #10928
11045,11046,11047,4489,11048,11049,11050,11051,4952,11052,11053,11054,11055,11056,11057,11058, #10944
4953,11059,11060,11061,11062,11063,11064,11065,11066,11067,11068,11069,11070,11071,4954,11072, #10960
11073,11074,11075,11076,11077,11078,11079,11080,11081,11082,11083,11084,11085,11086,11087,11088, #10976
11089,11090,11091,11092,11093,11094,11095,11096,11097,11098,11099,11100,11101,11102,11103,11104, #10992
11105,11106,11107,11108,11109,11110,11111,11112,11113,11114,11115,3808,11116,11117,11118,11119, #11008
11120,11121,11122,11123,11124,11125,11126,11127,11128,11129,11130,11131,11132,11133,11134,4955, #11024
11135,11136,11137,11138,11139,11140,11141,11142,11143,11144,11145,11146,11147,11148,11149,11150, #11040
11151,11152,11153,11154,11155,11156,11157,11158,11159,11160,11161,4956,11162,11163,11164,11165, #11056
11166,11167,11168,11169,11170,11171,11172,11173,11174,11175,11176,11177,11178,11179,11180,4957, #11072
11181,11182,11183,11184,11185,11186,4958,11187,11188,11189,11190,11191,11192,11193,11194,11195, #11088
11196,11197,11198,11199,11200,3678,11201,11202,11203,11204,11205,11206,4191,11207,11208,11209, #11104
11210,11211,11212,11213,11214,11215,11216,11217,11218,11219,11220,11221,11222,11223,11224,11225, #11120
11226,11227,11228,11229,11230,11231,11232,11233,11234,11235,11236,11237,11238,11239,11240,11241, #11136
11242,11243,11244,11245,11246,11247,11248,11249,11250,11251,4959,11252,11253,11254,11255,11256, #11152
11257,11258,11259,11260,11261,11262,11263,11264,11265,11266,11267,11268,11269,11270,11271,11272, #11168
11273,11274,11275,11276,11277,11278,11279,11280,11281,11282,11283,11284,11285,11286,11287,11288, #11184
11289,11290,11291,11292,11293,11294,11295,11296,11297,11298,11299,11300,11301,11302,11303,11304, #11200
11305,11306,11307,11308,11309,11310,11311,11312,11313,11314,3679,11315,11316,11317,11318,4490, #11216
11319,11320,11321,11322,11323,11324,11325,11326,11327,11328,11329,11330,11331,11332,11333,11334, #11232
11335,11336,11337,11338,11339,11340,11341,11342,11343,11344,11345,11346,11347,4960,11348,11349, #11248
11350,11351,11352,11353,11354,11355,11356,11357,11358,11359,11360,11361,11362,11363,11364,11365, #11264
11366,11367,11368,11369,11370,11371,11372,11373,11374,11375,11376,11377,3961,4961,11378,11379, #11280
11380,11381,11382,11383,11384,11385,11386,11387,11388,11389,11390,11391,11392,11393,11394,11395, #11296
11396,11397,4192,11398,11399,11400,11401,11402,11403,11404,11405,11406,11407,11408,11409,11410, #11312
11411,4962,11412,11413,11414,11415,11416,11417,11418,11419,11420,11421,11422,11423,11424,11425, #11328
11426,11427,11428,11429,11430,11431,11432,11433,11434,11435,11436,11437,11438,11439,11440,11441, #11344
11442,11443,11444,11445,11446,11447,11448,11449,11450,11451,11452,11453,11454,11455,11456,11457, #11360
11458,11459,11460,11461,11462,11463,11464,11465,11466,11467,11468,11469,4963,11470,11471,4491, #11376
11472,11473,11474,11475,4964,11476,11477,11478,11479,11480,11481,11482,11483,11484,11485,11486, #11392
11487,11488,11489,11490,11491,11492,4965,11493,11494,11495,11496,11497,11498,11499,11500,11501, #11408
11502,11503,11504,11505,11506,11507,11508,11509,11510,11511,11512,11513,11514,11515,11516,11517, #11424
11518,11519,11520,11521,11522,11523,11524,11525,11526,11527,11528,11529,3962,11530,11531,11532, #11440
11533,11534,11535,11536,11537,11538,11539,11540,11541,11542,11543,11544,11545,11546,11547,11548, #11456
11549,11550,11551,11552,11553,11554,11555,11556,11557,11558,11559,11560,11561,11562,11563,11564, #11472
4193,4194,11565,11566,11567,11568,11569,11570,11571,11572,11573,11574,11575,11576,11577,11578, #11488
11579,11580,11581,11582,11583,11584,11585,11586,11587,11588,11589,11590,11591,4966,4195,11592, #11504
11593,11594,11595,11596,11597,11598,11599,11600,11601,11602,11603,11604,3090,11605,11606,11607, #11520
11608,11609,11610,4967,11611,11612,11613,11614,11615,11616,11617,11618,11619,11620,11621,11622, #11536
11623,11624,11625,11626,11627,11628,11629,11630,11631,11632,11633,11634,11635,11636,11637,11638, #11552
11639,11640,11641,11642,11643,11644,11645,11646,11647,11648,11649,11650,11651,11652,11653,11654, #11568
11655,11656,11657,11658,11659,11660,11661,11662,11663,11664,11665,11666,11667,11668,11669,11670, #11584
11671,11672,11673,11674,4968,11675,11676,11677,11678,11679,11680,11681,11682,11683,11684,11685, #11600
11686,11687,11688,11689,11690,11691,11692,11693,3809,11694,11695,11696,11697,11698,11699,11700, #11616
11701,11702,11703,11704,11705,11706,11707,11708,11709,11710,11711,11712,11713,11714,11715,11716, #11632
11717,11718,3553,11719,11720,11721,11722,11723,11724,11725,11726,11727,11728,11729,11730,4969, #11648
11731,11732,11733,11734,11735,11736,11737,11738,11739,11740,4492,11741,11742,11743,11744,11745, #11664
11746,11747,11748,11749,11750,11751,11752,4970,11753,11754,11755,11756,11757,11758,11759,11760, #11680
11761,11762,11763,11764,11765,11766,11767,11768,11769,11770,11771,11772,11773,11774,11775,11776, #11696
11777,11778,11779,11780,11781,11782,11783,11784,11785,11786,11787,11788,11789,11790,4971,11791, #11712
11792,11793,11794,11795,11796,11797,4972,11798,11799,11800,11801,11802,11803,11804,11805,11806, #11728
11807,11808,11809,11810,4973,11811,11812,11813,11814,11815,11816,11817,11818,11819,11820,11821, #11744
11822,11823,11824,11825,11826,11827,11828,11829,11830,11831,11832,11833,11834,3680,3810,11835, #11760
11836,4974,11837,11838,11839,11840,11841,11842,11843,11844,11845,11846,11847,11848,11849,11850, #11776
11851,11852,11853,11854,11855,11856,11857,11858,11859,11860,11861,11862,11863,11864,11865,11866, #11792
11867,11868,11869,11870,11871,11872,11873,11874,11875,11876,11877,11878,11879,11880,11881,11882, #11808
11883,11884,4493,11885,11886,11887,11888,11889,11890,11891,11892,11893,11894,11895,11896,11897, #11824
11898,11899,11900,11901,11902,11903,11904,11905,11906,11907,11908,11909,11910,11911,11912,11913, #11840
11914,11915,4975,11916,11917,11918,11919,11920,11921,11922,11923,11924,11925,11926,11927,11928, #11856
11929,11930,11931,11932,11933,11934,11935,11936,11937,11938,11939,11940,11941,11942,11943,11944, #11872
11945,11946,11947,11948,11949,4976,11950,11951,11952,11953,11954,11955,11956,11957,11958,11959, #11888
11960,11961,11962,11963,11964,11965,11966,11967,11968,11969,11970,11971,11972,11973,11974,11975, #11904
11976,11977,11978,11979,11980,11981,11982,11983,11984,11985,11986,11987,4196,11988,11989,11990, #11920
11991,11992,4977,11993,11994,11995,11996,11997,11998,11999,12000,12001,12002,12003,12004,12005, #11936
12006,12007,12008,12009,12010,12011,12012,12013,12014,12015,12016,12017,12018,12019,12020,12021, #11952
12022,12023,12024,12025,12026,12027,12028,12029,12030,12031,12032,12033,12034,12035,12036,12037, #11968
12038,12039,12040,12041,12042,12043,12044,12045,12046,12047,12048,12049,12050,12051,12052,12053, #11984
12054,12055,12056,12057,12058,12059,12060,12061,4978,12062,12063,12064,12065,12066,12067,12068, #12000
12069,12070,12071,12072,12073,12074,12075,12076,12077,12078,12079,12080,12081,12082,12083,12084, #12016
12085,12086,12087,12088,12089,12090,12091,12092,12093,12094,12095,12096,12097,12098,12099,12100, #12032
12101,12102,12103,12104,12105,12106,12107,12108,12109,12110,12111,12112,12113,12114,12115,12116, #12048
12117,12118,12119,12120,12121,12122,12123,4979,12124,12125,12126,12127,12128,4197,12129,12130, #12064
12131,12132,12133,12134,12135,12136,12137,12138,12139,12140,12141,12142,12143,12144,12145,12146, #12080
12147,12148,12149,12150,12151,12152,12153,12154,4980,12155,12156,12157,12158,12159,12160,4494, #12096
12161,12162,12163,12164,3811,12165,12166,12167,12168,12169,4495,12170,12171,4496,12172,12173, #12112
12174,12175,12176,3812,12177,12178,12179,12180,12181,12182,12183,12184,12185,12186,12187,12188, #12128
12189,12190,12191,12192,12193,12194,12195,12196,12197,12198,12199,12200,12201,12202,12203,12204, #12144
12205,12206,12207,12208,12209,12210,12211,12212,12213,12214,12215,12216,12217,12218,12219,12220, #12160
12221,4981,12222,12223,12224,12225,12226,12227,12228,12229,12230,12231,12232,12233,12234,12235, #12176
4982,12236,12237,12238,12239,12240,12241,12242,12243,12244,12245,4983,12246,12247,12248,12249, #12192
4984,12250,12251,12252,12253,12254,12255,12256,12257,12258,12259,12260,12261,12262,12263,12264, #12208
4985,12265,4497,12266,12267,12268,12269,12270,12271,12272,12273,12274,12275,12276,12277,12278, #12224
12279,12280,12281,12282,12283,12284,12285,12286,12287,4986,12288,12289,12290,12291,12292,12293, #12240
12294,12295,12296,2473,12297,12298,12299,12300,12301,12302,12303,12304,12305,12306,12307,12308, #12256
12309,12310,12311,12312,12313,12314,12315,12316,12317,12318,12319,3963,12320,12321,12322,12323, #12272
12324,12325,12326,12327,12328,12329,12330,12331,12332,4987,12333,12334,12335,12336,12337,12338, #12288
12339,12340,12341,12342,12343,12344,12345,12346,12347,12348,12349,12350,12351,12352,12353,12354, #12304
12355,12356,12357,12358,12359,3964,12360,12361,12362,12363,12364,12365,12366,12367,12368,12369, #12320
12370,3965,12371,12372,12373,12374,12375,12376,12377,12378,12379,12380,12381,12382,12383,12384, #12336
12385,12386,12387,12388,12389,12390,12391,12392,12393,12394,12395,12396,12397,12398,12399,12400, #12352
12401,12402,12403,12404,12405,12406,12407,12408,4988,12409,12410,12411,12412,12413,12414,12415, #12368
12416,12417,12418,12419,12420,12421,12422,12423,12424,12425,12426,12427,12428,12429,12430,12431, #12384
12432,12433,12434,12435,12436,12437,12438,3554,12439,12440,12441,12442,12443,12444,12445,12446, #12400
12447,12448,12449,12450,12451,12452,12453,12454,12455,12456,12457,12458,12459,12460,12461,12462, #12416
12463,12464,4989,12465,12466,12467,12468,12469,12470,12471,12472,12473,12474,12475,12476,12477, #12432
12478,12479,12480,4990,12481,12482,12483,12484,12485,12486,12487,12488,12489,4498,12490,12491, #12448
12492,12493,12494,12495,12496,12497,12498,12499,12500,12501,12502,12503,12504,12505,12506,12507, #12464
12508,12509,12510,12511,12512,12513,12514,12515,12516,12517,12518,12519,12520,12521,12522,12523, #12480
12524,12525,12526,12527,12528,12529,12530,12531,12532,12533,12534,12535,12536,12537,12538,12539, #12496
12540,12541,12542,12543,12544,12545,12546,12547,12548,12549,12550,12551,4991,12552,12553,12554, #12512
12555,12556,12557,12558,12559,12560,12561,12562,12563,12564,12565,12566,12567,12568,12569,12570, #12528
12571,12572,12573,12574,12575,12576,12577,12578,3036,12579,12580,12581,12582,12583,3966,12584, #12544
12585,12586,12587,12588,12589,12590,12591,12592,12593,12594,12595,12596,12597,12598,12599,12600, #12560
12601,12602,12603,12604,12605,12606,12607,12608,12609,12610,12611,12612,12613,12614,12615,12616, #12576
12617,12618,12619,12620,12621,12622,12623,12624,12625,12626,12627,12628,12629,12630,12631,12632, #12592
12633,12634,12635,12636,12637,12638,12639,12640,12641,12642,12643,12644,12645,12646,4499,12647, #12608
12648,12649,12650,12651,12652,12653,12654,12655,12656,12657,12658,12659,12660,12661,12662,12663, #12624
12664,12665,12666,12667,12668,12669,12670,12671,12672,12673,12674,12675,12676,12677,12678,12679, #12640
12680,12681,12682,12683,12684,12685,12686,12687,12688,12689,12690,12691,12692,12693,12694,12695, #12656
12696,12697,12698,4992,12699,12700,12701,12702,12703,12704,12705,12706,12707,12708,12709,12710, #12672
12711,12712,12713,12714,12715,12716,12717,12718,12719,12720,12721,12722,12723,12724,12725,12726, #12688
12727,12728,12729,12730,12731,12732,12733,12734,12735,12736,12737,12738,12739,12740,12741,12742, #12704
12743,12744,12745,12746,12747,12748,12749,12750,12751,12752,12753,12754,12755,12756,12757,12758, #12720
12759,12760,12761,12762,12763,12764,12765,12766,12767,12768,12769,12770,12771,12772,12773,12774, #12736
12775,12776,12777,12778,4993,2175,12779,12780,12781,12782,12783,12784,12785,12786,4500,12787, #12752
12788,12789,12790,12791,12792,12793,12794,12795,12796,12797,12798,12799,12800,12801,12802,12803, #12768
12804,12805,12806,12807,12808,12809,12810,12811,12812,12813,12814,12815,12816,12817,12818,12819, #12784
12820,12821,12822,12823,12824,12825,12826,4198,3967,12827,12828,12829,12830,12831,12832,12833, #12800
12834,12835,12836,12837,12838,12839,12840,12841,12842,12843,12844,12845,12846,12847,12848,12849, #12816
12850,12851,12852,12853,12854,12855,12856,12857,12858,12859,12860,12861,4199,12862,12863,12864, #12832
12865,12866,12867,12868,12869,12870,12871,12872,12873,12874,12875,12876,12877,12878,12879,12880, #12848
12881,12882,12883,12884,12885,12886,12887,4501,12888,12889,12890,12891,12892,12893,12894,12895, #12864
12896,12897,12898,12899,12900,12901,12902,12903,12904,12905,12906,12907,12908,12909,12910,12911, #12880
12912,4994,12913,12914,12915,12916,12917,12918,12919,12920,12921,12922,12923,12924,12925,12926, #12896
12927,12928,12929,12930,12931,12932,12933,12934,12935,12936,12937,12938,12939,12940,12941,12942, #12912
12943,12944,12945,12946,12947,12948,12949,12950,12951,12952,12953,12954,12955,12956,1772,12957, #12928
12958,12959,12960,12961,12962,12963,12964,12965,12966,12967,12968,12969,12970,12971,12972,12973, #12944
12974,12975,12976,12977,12978,12979,12980,12981,12982,12983,12984,12985,12986,12987,12988,12989, #12960
12990,12991,12992,12993,12994,12995,12996,12997,4502,12998,4503,12999,13000,13001,13002,13003, #12976
4504,13004,13005,13006,13007,13008,13009,13010,13011,13012,13013,13014,13015,13016,13017,13018, #12992
13019,13020,13021,13022,13023,13024,13025,13026,13027,13028,13029,3449,13030,13031,13032,13033, #13008
13034,13035,13036,13037,13038,13039,13040,13041,13042,13043,13044,13045,13046,13047,13048,13049, #13024
13050,13051,13052,13053,13054,13055,13056,13057,13058,13059,13060,13061,13062,13063,13064,13065, #13040
13066,13067,13068,13069,13070,13071,13072,13073,13074,13075,13076,13077,13078,13079,13080,13081, #13056
13082,13083,13084,13085,13086,13087,13088,13089,13090,13091,13092,13093,13094,13095,13096,13097, #13072
13098,13099,13100,13101,13102,13103,13104,13105,13106,13107,13108,13109,13110,13111,13112,13113, #13088
13114,13115,13116,13117,13118,3968,13119,4995,13120,13121,13122,13123,13124,13125,13126,13127, #13104
4505,13128,13129,13130,13131,13132,13133,13134,4996,4506,13135,13136,13137,13138,13139,4997, #13120
13140,13141,13142,13143,13144,13145,13146,13147,13148,13149,13150,13151,13152,13153,13154,13155, #13136
13156,13157,13158,13159,4998,13160,13161,13162,13163,13164,13165,13166,13167,13168,13169,13170, #13152
13171,13172,13173,13174,13175,13176,4999,13177,13178,13179,13180,13181,13182,13183,13184,13185, #13168
13186,13187,13188,13189,13190,13191,13192,13193,13194,13195,13196,13197,13198,13199,13200,13201, #13184
13202,13203,13204,13205,13206,5000,13207,13208,13209,13210,13211,13212,13213,13214,13215,13216, #13200
13217,13218,13219,13220,13221,13222,13223,13224,13225,13226,13227,4200,5001,13228,13229,13230, #13216
13231,13232,13233,13234,13235,13236,13237,13238,13239,13240,3969,13241,13242,13243,13244,3970, #13232
13245,13246,13247,13248,13249,13250,13251,13252,13253,13254,13255,13256,13257,13258,13259,13260, #13248
13261,13262,13263,13264,13265,13266,13267,13268,3450,13269,13270,13271,13272,13273,13274,13275, #13264
13276,5002,13277,13278,13279,13280,13281,13282,13283,13284,13285,13286,13287,13288,13289,13290, #13280
13291,13292,13293,13294,13295,13296,13297,13298,13299,13300,13301,13302,3813,13303,13304,13305, #13296
13306,13307,13308,13309,13310,13311,13312,13313,13314,13315,13316,13317,13318,13319,13320,13321, #13312
13322,13323,13324,13325,13326,13327,13328,4507,13329,13330,13331,13332,13333,13334,13335,13336, #13328
13337,13338,13339,13340,13341,5003,13342,13343,13344,13345,13346,13347,13348,13349,13350,13351, #13344
13352,13353,13354,13355,13356,13357,13358,13359,13360,13361,13362,13363,13364,13365,13366,13367, #13360
5004,13368,13369,13370,13371,13372,13373,13374,13375,13376,13377,13378,13379,13380,13381,13382, #13376
13383,13384,13385,13386,13387,13388,13389,13390,13391,13392,13393,13394,13395,13396,13397,13398, #13392
13399,13400,13401,13402,13403,13404,13405,13406,13407,13408,13409,13410,13411,13412,13413,13414, #13408
13415,13416,13417,13418,13419,13420,13421,13422,13423,13424,13425,13426,13427,13428,13429,13430, #13424
13431,13432,4508,13433,13434,13435,4201,13436,13437,13438,13439,13440,13441,13442,13443,13444, #13440
13445,13446,13447,13448,13449,13450,13451,13452,13453,13454,13455,13456,13457,5005,13458,13459, #13456
13460,13461,13462,13463,13464,13465,13466,13467,13468,13469,13470,4509,13471,13472,13473,13474, #13472
13475,13476,13477,13478,13479,13480,13481,13482,13483,13484,13485,13486,13487,13488,13489,13490, #13488
13491,13492,13493,13494,13495,13496,13497,13498,13499,13500,13501,13502,13503,13504,13505,13506, #13504
13507,13508,13509,13510,13511,13512,13513,13514,13515,13516,13517,13518,13519,13520,13521,13522, #13520
13523,13524,13525,13526,13527,13528,13529,13530,13531,13532,13533,13534,13535,13536,13537,13538, #13536
13539,13540,13541,13542,13543,13544,13545,13546,13547,13548,13549,13550,13551,13552,13553,13554, #13552
13555,13556,13557,13558,13559,13560,13561,13562,13563,13564,13565,13566,13567,13568,13569,13570, #13568
13571,13572,13573,13574,13575,13576,13577,13578,13579,13580,13581,13582,13583,13584,13585,13586, #13584
13587,13588,13589,13590,13591,13592,13593,13594,13595,13596,13597,13598,13599,13600,13601,13602, #13600
13603,13604,13605,13606,13607,13608,13609,13610,13611,13612,13613,13614,13615,13616,13617,13618, #13616
13619,13620,13621,13622,13623,13624,13625,13626,13627,13628,13629,13630,13631,13632,13633,13634, #13632
13635,13636,13637,13638,13639,13640,13641,13642,5006,13643,13644,13645,13646,13647,13648,13649, #13648
13650,13651,5007,13652,13653,13654,13655,13656,13657,13658,13659,13660,13661,13662,13663,13664, #13664
13665,13666,13667,13668,13669,13670,13671,13672,13673,13674,13675,13676,13677,13678,13679,13680, #13680
13681,13682,13683,13684,13685,13686,13687,13688,13689,13690,13691,13692,13693,13694,13695,13696, #13696
13697,13698,13699,13700,13701,13702,13703,13704,13705,13706,13707,13708,13709,13710,13711,13712, #13712
13713,13714,13715,13716,13717,13718,13719,13720,13721,13722,13723,13724,13725,13726,13727,13728, #13728
13729,13730,13731,13732,13733,13734,13735,13736,13737,13738,13739,13740,13741,13742,13743,13744, #13744
13745,13746,13747,13748,13749,13750,13751,13752,13753,13754,13755,13756,13757,13758,13759,13760, #13760
13761,13762,13763,13764,13765,13766,13767,13768,13769,13770,13771,13772,13773,13774,3273,13775, #13776
13776,13777,13778,13779,13780,13781,13782,13783,13784,13785,13786,13787,13788,13789,13790,13791, #13792
13792,13793,13794,13795,13796,13797,13798,13799,13800,13801,13802,13803,13804,13805,13806,13807, #13808
13808,13809,13810,13811,13812,13813,13814,13815,13816,13817,13818,13819,13820,13821,13822,13823, #13824
13824,13825,13826,13827,13828,13829,13830,13831,13832,13833,13834,13835,13836,13837,13838,13839, #13840
13840,13841,13842,13843,13844,13845,13846,13847,13848,13849,13850,13851,13852,13853,13854,13855, #13856
13856,13857,13858,13859,13860,13861,13862,13863,13864,13865,13866,13867,13868,13869,13870,13871, #13872
13872,13873,13874,13875,13876,13877,13878,13879,13880,13881,13882,13883,13884,13885,13886,13887, #13888
13888,13889,13890,13891,13892,13893,13894,13895,13896,13897,13898,13899,13900,13901,13902,13903, #13904
13904,13905,13906,13907,13908,13909,13910,13911,13912,13913,13914,13915,13916,13917,13918,13919, #13920
13920,13921,13922,13923,13924,13925,13926,13927,13928,13929,13930,13931,13932,13933,13934,13935, #13936
13936,13937,13938,13939,13940,13941,13942,13943,13944,13945,13946,13947,13948,13949,13950,13951, #13952
13952,13953,13954,13955,13956,13957,13958,13959,13960,13961,13962,13963,13964,13965,13966,13967, #13968
13968,13969,13970,13971,13972) #13973
# flake8: noqa
| agpl-3.0 |
hyunchel/webargs | webargs/core.py | 1 | 15449 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import collections
import functools
import inspect
import logging
import warnings
import marshmallow as ma
from marshmallow.compat import iteritems
from marshmallow.utils import missing
logger = logging.getLogger(__name__)
__all__ = [
'WebargsError',
'ValidationError',
'argmap2schema',
'is_multiple',
'Parser',
'get_value',
]
DEFAULT_VALIDATION_STATUS = 422
class WebargsError(Exception):
"""Base class for all webargs-related errors."""
pass
class ValidationError(WebargsError, ma.exceptions.ValidationError):
"""Raised when validation fails on user input. Same as
`marshmallow.ValidationError`, with the addition of the ``status_code`` and
``headers`` arguments.
"""
def __init__(self, message, status_code=DEFAULT_VALIDATION_STATUS, headers=None, **kwargs):
self.status_code = status_code
self.headers = headers
ma.exceptions.ValidationError.__init__(self, message, **kwargs)
def __repr__(self):
return 'ValidationError({0!r}, status_code={1}, headers={2})'.format(
self.args[0], self.status_code, self.headers
)
def _callable_or_raise(obj):
"""Makes sure an object is callable if it is not ``None``. If not
callable, a ValueError is raised.
"""
if obj and not callable(obj):
raise ValueError('{0!r} is not callable.'.format(obj))
else:
return obj
def argmap2schema(argmap, instance=False, **kwargs):
"""Generate a `marshmallow.Schema` class given a dictionary of argument
names to `Fields <marshmallow.fields.Field>`.
"""
class Meta(object):
strict = True
attrs = dict(argmap, Meta=Meta)
cls = type(str('ArgSchema'), (ma.Schema,), attrs)
return cls if not instance else cls(**kwargs)
def is_multiple(field):
"""Return whether or not `field` handles repeated/multi-value arguments."""
return isinstance(field, ma.fields.List)
def get_value(d, name, multiple):
"""Get a value from a dictionary. Handles ``MultiDict`` types when
``multiple=True``. If the value is not found, return `missing`.
:param dict d: Dictionary to pull the value from.
:param str name: Name of the key.
:param bool multiple: Whether to handle multiple values.
"""
val = d.get(name, missing)
if multiple and val is not missing:
if hasattr(d, 'getlist'):
return d.getlist(name)
elif hasattr(d, 'getall'):
return d.getall(name)
elif isinstance(val, (list, tuple)):
return val
else:
return [val]
return val
def _ensure_list_of_callables(obj):
if obj:
if isinstance(obj, (list, tuple)):
validators = obj
elif callable(obj):
validators = [obj]
else:
raise ValueError('{0!r} is not a callable or list of callables.'.format(obj))
else:
validators = []
return validators
class Parser(object):
"""Base parser class that provides high-level implementation for parsing
a request.
Descendant classes must provide lower-level implementations for parsing
different locations, e.g. ``parse_json``, ``parse_querystring``, etc.
:param tuple locations: Default locations to parse.
:param callable error_handler: Custom error handler function.
"""
DEFAULT_LOCATIONS = ('querystring', 'form', 'json',)
DEFAULT_VALIDATION_STATUS = DEFAULT_VALIDATION_STATUS
DEFAULT_VALIDATION_MESSAGE = 'Invalid value.'
#: Maps location => method name
__location_map__ = {
'json': 'parse_json',
'querystring': 'parse_querystring',
'query': 'parse_querystring',
'form': 'parse_form',
'headers': 'parse_headers',
'cookies': 'parse_cookies',
'files': 'parse_files',
}
def __init__(self, locations=None, error_handler=None):
self.locations = locations or self.DEFAULT_LOCATIONS
self.error_callback = _callable_or_raise(error_handler)
#: A short-lived cache to store results from processing request bodies.
self._cache = {}
def _validated_locations(self, locations):
"""Ensure that the given locations argument is valid.
:raises: ValueError if a given locations includes an invalid location.
"""
# The set difference between the given locations and the available locations
# will be the set of invalid locations
valid_locations = set(self.__location_map__.keys())
given = set(locations)
invalid_locations = given - valid_locations
if len(invalid_locations):
msg = "Invalid locations arguments: {0}".format(list(invalid_locations))
raise ValueError(msg)
return locations
def _get_value(self, name, argobj, req, location):
# Parsing function to call
# May be a method name (str) or a function
func = self.__location_map__.get(location)
if func:
if inspect.isfunction(func):
function = func
else:
function = getattr(self, func)
value = function(req, name, argobj)
else:
raise ValueError('Invalid location: "{0}"'.format(location))
return value
def parse_arg(self, name, field, req, locations=None):
"""Parse a single argument from a request.
.. note::
This method does not perform validation on the argument.
:param str name: The name of the value.
:param marshmallow.fields.Field field: The marshmallow `Field` for the request
parameter.
:param req: The request object to parse.
:param tuple locations: The locations ('json', 'querystring', etc.) where
to search for the value.
:return: The unvalidated argument value or `missing` if the value cannot be found
on the request.
"""
location = field.metadata.get('location')
if location:
locations_to_check = self._validated_locations([location])
else:
locations_to_check = self._validated_locations(locations or self.locations)
key = field.load_from or name
for location in locations_to_check:
value = self._get_value(key, field, req=req, location=location)
if (is_multiple(field) and not
(isinstance(value, collections.Iterable) and len(value))):
continue
# Found the value; validate and return it
if value is not missing:
return value
return missing
def _parse_request(self, argmap, req, locations, force_all):
argdict = argmap.fields if isinstance(argmap, ma.Schema) else argmap
parsed = {}
for argname, field_obj in iteritems(argdict):
parsed_value = self.parse_arg(argname, field_obj, req,
locations=locations or self.locations)
parsed[argname] = parsed_value
return parsed
def load(self, data, argmap):
if isinstance(argmap, ma.Schema):
schema = argmap
else:
schema = argmap2schema(argmap)()
if not schema.strict:
warnings.warn("It is highly recommended that you set strict=True on your schema "
"so that the parser's error handler will be invoked when expected.", UserWarning)
return schema.load(data)
def parse(self, argmap, req, locations=None, validate=None, force_all=False):
"""Main request parsing method.
:param dict argmap: Either a `marshmallow.Schema` or a `dict`
of argname -> `marshmallow.fields.Field` pairs.
:param req: The request object to parse.
:param tuple locations: Where on the request to search for values.
Can include one or more of ``('json', 'querystring', 'form',
'headers', 'cookies', 'files')``.
:param callable validate: Validation function or list of validation functions
that receives the dictionary of parsed arguments. Validator either returns a
boolean or raises a :exc:`ValidationError`.
:return: A dictionary of parsed arguments
"""
ret = None
validators = _ensure_list_of_callables(validate)
try:
parsed = self._parse_request(argmap, req, locations, force_all=force_all)
result = self.load(parsed, argmap)
for validator in validators:
# TODO: Is this necessary? User could just defined schema validators
if validator(result.data) is False:
msg = self.DEFAULT_VALIDATION_MESSAGE
raise ValidationError(msg, data=result.data)
except ma.exceptions.ValidationError as error:
if (isinstance(error, ma.exceptions.ValidationError) and not
isinstance(error, ValidationError)):
# Raise a webargs error instead
error = ValidationError(
error.messages,
status_code=getattr(error, 'status_code', DEFAULT_VALIDATION_STATUS),
headers=getattr(error, 'headers', {}),
field_names=error.field_names,
fields=error.fields,
data=error.data
)
if self.error_callback:
self.error_callback(error)
else:
self.handle_error(error)
else:
ret = result.data
finally:
self.clear_cache()
if force_all:
if isinstance(argmap, ma.Schema):
all_field_names = set([fname for fname, fobj in iteritems(argmap.fields)
if not fobj.dump_only])
else:
all_field_names = set(argmap.keys())
missing_args = all_field_names - set(ret.keys())
for key in missing_args:
ret[key] = missing
return ret
def clear_cache(self):
"""Invalidate the parser's cache."""
self._cache = {}
return None
def use_args(self, argmap, req=None, locations=None, as_kwargs=False, validate=None):
"""Decorator that injects parsed arguments into a view function or method.
Example usage with Flask: ::
@app.route('/echo', methods=['get', 'post'])
@parser.use_args({'name': fields.Str()})
def greet(args):
return 'Hello ' + args['name']
:param dict argmap: Either a `marshmallow.Schema` or a `dict`
of argname -> `marshmallow.fields.Field` pairs.
:param tuple locations: Where on the request to search for values.
:param bool as_kwargs: Whether to insert arguments as keyword arguments.
:param callable validate: Validation function that receives the dictionary
of parsed arguments. If the function returns ``False``, the parser
will raise a :exc:`ValidationError`.
"""
locations = locations or self.locations
if isinstance(argmap, ma.Schema):
schema = argmap
else:
schema = argmap2schema(argmap)()
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
# if as_kwargs is passed, must include all args
force_all = as_kwargs
parsed_args = self.parse(schema, req=req, locations=locations,
validate=validate, force_all=force_all)
if as_kwargs:
kwargs.update(parsed_args)
return func(*args, **kwargs)
else:
# Wrapped function is a method, so inject parsed_args
# after 'self'
if args and args[0]:
return func(args[0], parsed_args, *args[1:], **kwargs)
return func(parsed_args, *args, **kwargs)
return wrapper
return decorator
def use_kwargs(self, *args, **kwargs):
"""Decorator that injects parsed arguments into a view function or method
as keyword arguments.
This is a shortcut to :meth:`use_args` with ``as_kwargs=True``.
Example usage with Flask: ::
@app.route('/echo', methods=['get', 'post'])
@parser.use_kwargs({'name': fields.Str()})
def greet(name):
return 'Hello ' + name
Receives the same ``args`` and ``kwargs`` as :meth:`use_args`.
"""
kwargs['as_kwargs'] = True
return self.use_args(*args, **kwargs)
def location_handler(self, name):
"""Decorator that registers a function for parsing a request location.
The wrapped function receives a request, the name of the argument, and
the corresponding `Field <marshmallow.fields.Field>` object.
Example: ::
from webargs import core
parser = core.Parser()
@parser.location_handler('name')
def parse_data(request, name, field):
return request.data.get(name)
:param str name: The name of the location to register.
"""
def decorator(func):
self.__location_map__[name] = func
return func
return decorator
def error_handler(self, func):
"""Decorator that registers a custom error handling function. The
function should received the raised error. Overrides
the parser's ``handle_error`` method.
Example: ::
from webargs import core
parser = core.Parser()
class CustomError(Exception):
pass
@parser.error_handler
def handle_error(error):
raise CustomError(error)
:param callable func: The error callback to register.
"""
self.error_callback = func
return func
# Abstract Methods
def parse_json(self, req, name, arg):
"""Pull a JSON value from a request object or return `missing` if the
value cannot be found.
"""
return missing
def parse_querystring(self, req, name, arg):
"""Pull a value from the query string of a request object or return `missing` if
the value cannot be found.
"""
return missing
def parse_form(self, req, name, arg):
"""Pull a value from the form data of a request object or return
`missing` if the value cannot be found.
"""
return missing
def parse_headers(self, req, name, arg):
"""Pull a value from the headers or return `missing` if the value
cannot be found.
"""
return missing
def parse_cookies(self, req, name, arg):
"""Pull a cookie value from the request or return `missing` if the value
cannot be found.
"""
return missing
def parse_files(self, req, name, arg):
"""Pull a file from the request or return `missing` if the value file
cannot be found.
"""
return missing
def handle_error(self, error):
"""Called if an error occurs while parsing args. By default, just logs and
raises ``error``.
"""
logger.error(error)
raise error
| mit |
google/pigweed | pw_arduino_build/py/pw_arduino_build/unit_test_runner.py | 1 | 13528 | #!/usr/bin/env python3
# Copyright 2020 The Pigweed Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""This script flashes and runs unit tests onto Arduino boards."""
import argparse
import logging
import os
import platform
import re
import subprocess
import sys
import time
from pathlib import Path
from typing import List
import serial # type: ignore
import serial.tools.list_ports # type: ignore
import pw_arduino_build.log
from pw_arduino_build import teensy_detector
from pw_arduino_build.file_operations import decode_file_json
_LOG = logging.getLogger('unit_test_runner')
# Verification of test pass/failure depends on these strings. If the formatting
# or output of the simple_printing_event_handler changes, this may need to be
# updated.
_TESTS_STARTING_STRING = b'[==========] Running all tests.'
_TESTS_DONE_STRING = b'[==========] Done running all tests.'
_TEST_FAILURE_STRING = b'[ FAILED ]'
# How long to wait for the first byte of a test to be emitted. This is longer
# than the user-configurable timeout as there's a delay while the device is
# flashed.
_FLASH_TIMEOUT = 5.0
class TestingFailure(Exception):
"""A simple exception to be raised when a testing step fails."""
class DeviceNotFound(Exception):
"""A simple exception to be raised when unable to connect to a device."""
class ArduinoCoreNotSupported(Exception):
"""Exception raised when a given core does not support unit testing."""
def valid_file_name(arg):
file_path = Path(os.path.expandvars(arg)).absolute()
if not file_path.is_file():
raise argparse.ArgumentTypeError(f"'{arg}' does not exist.")
return file_path
def parse_args():
"""Parses command-line arguments."""
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('binary',
help='The target test binary to run',
type=valid_file_name)
parser.add_argument('--port',
help='The name of the serial port to connect to when '
'running tests')
parser.add_argument('--baud',
type=int,
default=115200,
help='Target baud rate to use for serial communication'
' with target device')
parser.add_argument('--test-timeout',
type=float,
default=5.0,
help='Maximum communication delay in seconds before a '
'test is considered unresponsive and aborted')
parser.add_argument('--verbose',
'-v',
dest='verbose',
action='store_true',
help='Output additional logs as the script runs')
parser.add_argument('--flash-only',
action='store_true',
help="Don't check for test output after flashing.")
# arduino_builder arguments
# TODO(tonymd): Get these args from __main__.py or elsewhere.
parser.add_argument("-c",
"--config-file",
required=True,
help="Path to a config file.")
parser.add_argument("--arduino-package-path",
help="Path to the arduino IDE install location.")
parser.add_argument("--arduino-package-name",
help="Name of the Arduino board package to use.")
parser.add_argument("--compiler-path-override",
help="Path to arm-none-eabi-gcc bin folder. "
"Default: Arduino core specified gcc")
parser.add_argument("--board", help="Name of the Arduino board to use.")
parser.add_argument("--upload-tool",
required=True,
help="Name of the Arduino upload tool to use.")
parser.add_argument("--set-variable",
action="append",
metavar='some.variable=NEW_VALUE',
help="Override an Arduino recipe variable. May be "
"specified multiple times. For example: "
"--set-variable 'serial.port.label=/dev/ttyACM0' "
"--set-variable 'serial.port.protocol=Teensy'")
return parser.parse_args()
def log_subprocess_output(level, output):
"""Logs subprocess output line-by-line."""
lines = output.decode('utf-8', errors='replace').splitlines()
for line in lines:
_LOG.log(level, line)
def read_serial(port, baud_rate, test_timeout) -> bytes:
"""Reads lines from a serial port until a line read times out.
Returns bytes object containing the read serial data.
"""
serial_data = bytearray()
device = serial.Serial(baudrate=baud_rate,
port=port,
timeout=_FLASH_TIMEOUT)
if not device.is_open:
raise TestingFailure('Failed to open device')
# Flush input buffer and reset the device to begin the test.
device.reset_input_buffer()
# Block and wait for the first byte.
serial_data += device.read()
if not serial_data:
raise TestingFailure('Device not producing output')
device.timeout = test_timeout
# Read with a reasonable timeout until we stop getting characters.
while True:
bytes_read = device.readline()
if not bytes_read:
break
serial_data += bytes_read
if serial_data.rfind(_TESTS_DONE_STRING) != -1:
# Set to much more aggressive timeout since the last one or two
# lines should print out immediately. (one line if all fails or all
# passes, two lines if mixed.)
device.timeout = 0.01
# Remove carriage returns.
serial_data = serial_data.replace(b'\r', b'')
# Try to trim captured results to only contain most recent test run.
test_start_index = serial_data.rfind(_TESTS_STARTING_STRING)
return serial_data if test_start_index == -1 else serial_data[
test_start_index:]
def wait_for_port(port):
"""Wait for the serial port to be available."""
while port not in [sp.device for sp in serial.tools.list_ports.comports()]:
time.sleep(1)
def flash_device(test_runner_args, upload_tool):
"""Flash binary to a connected device using the provided configuration."""
# TODO(tonymd): Create a library function to call rather than launching
# the arduino_builder script.
flash_tool = 'arduino_builder'
cmd = [flash_tool, "--quiet"] + test_runner_args + [
"--run-objcopy", "--run-postbuilds", "--run-upload", upload_tool
]
_LOG.info('Flashing firmware to device')
_LOG.debug('Running: %s', " ".join(cmd))
env = os.environ.copy()
process = subprocess.run(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
env=env)
if process.returncode:
log_subprocess_output(logging.ERROR, process.stdout)
raise TestingFailure('Failed to flash target device')
log_subprocess_output(logging.DEBUG, process.stdout)
_LOG.debug('Successfully flashed firmware to device')
def handle_test_results(test_output):
"""Parses test output to determine whether tests passed or failed."""
if test_output.find(_TESTS_STARTING_STRING) == -1:
raise TestingFailure('Failed to find test start')
if test_output.rfind(_TESTS_DONE_STRING) == -1:
log_subprocess_output(logging.INFO, test_output)
raise TestingFailure('Tests did not complete')
if test_output.rfind(_TEST_FAILURE_STRING) != -1:
log_subprocess_output(logging.INFO, test_output)
raise TestingFailure('Test suite had one or more failures')
log_subprocess_output(logging.DEBUG, test_output)
_LOG.info('Test passed!')
def run_device_test(binary, flash_only, port, baud, test_timeout, upload_tool,
arduino_package_path, test_runner_args) -> bool:
"""Flashes, runs, and checks an on-device test binary.
Returns true on test pass.
"""
if test_runner_args is None:
test_runner_args = []
if "teensy" not in arduino_package_path:
raise ArduinoCoreNotSupported(arduino_package_path)
if port is None or "--set-variable" not in test_runner_args:
_LOG.debug('Attempting to automatically detect dev board')
boards = teensy_detector.detect_boards(arduino_package_path)
if not boards:
error = 'Could not find an attached device'
_LOG.error(error)
raise DeviceNotFound(error)
test_runner_args += boards[0].test_runner_args()
upload_tool = boards[0].arduino_upload_tool_name
if port is None:
port = boards[0].dev_name
# TODO(tonymd): Remove this when teensy_ports is working in teensy_detector
if platform.system() == "Windows":
# Delete the incorrect serial port.
index_of_port = [
i for i, l in enumerate(test_runner_args)
if l.startswith('serial.port=')
]
if index_of_port:
# Delete the '--set-variable' arg
del test_runner_args[index_of_port[0] - 1]
# Delete the 'serial.port=*' arg
del test_runner_args[index_of_port[0] - 1]
_LOG.debug('Launching test binary %s', binary)
try:
result: List[bytes] = []
_LOG.info('Running test')
# Warning: A race condition is possible here. This assumes the host is
# able to connect to the port and that there isn't a test running on
# this serial port.
flash_device(test_runner_args, upload_tool)
wait_for_port(port)
if flash_only:
return True
result.append(read_serial(port, baud, test_timeout))
if result:
handle_test_results(result[0])
except TestingFailure as err:
_LOG.error(err)
return False
return True
def get_option(key, config_file_values, args, required=False):
command_line_option = getattr(args, key, None)
final_option = config_file_values.get(key, command_line_option)
if required and command_line_option is None and final_option is None:
# Print a similar error message to argparse
executable = os.path.basename(sys.argv[0])
option = "--" + key.replace("_", "-")
print(f"{executable}: error: the following arguments are required: "
f"{option}")
sys.exit(1)
return final_option
def main():
"""Set up runner, and then flash/run device test."""
args = parse_args()
json_file_options, unused_config_path = decode_file_json(args.config_file)
log_level = logging.DEBUG if args.verbose else logging.INFO
pw_arduino_build.log.install(log_level)
# Construct arduino_builder flash arguments for a given .elf binary.
arduino_package_path = get_option("arduino_package_path",
json_file_options,
args,
required=True)
# Arduino core args.
arduino_builder_args = [
"--arduino-package-path",
arduino_package_path,
"--arduino-package-name",
get_option("arduino_package_name",
json_file_options,
args,
required=True),
]
# Use CIPD installed compilers.
compiler_path_override = get_option("compiler_path_override",
json_file_options, args)
if compiler_path_override:
arduino_builder_args += [
"--compiler-path-override", compiler_path_override
]
# Run subcommand with board selection arg.
arduino_builder_args += [
"run", "--board",
get_option("board", json_file_options, args, required=True)
]
# .elf file location args.
binary = args.binary
build_path = binary.parent.as_posix()
arduino_builder_args += ["--build-path", build_path]
build_project_name = binary.name
# Remove '.elf' extension.
match_result = re.match(r'(.*?)\.elf$', binary.name, re.IGNORECASE)
if match_result:
build_project_name = match_result[1]
arduino_builder_args += ["--build-project-name", build_project_name]
# USB port is passed to arduino_builder_args via --set-variable args.
if args.set_variable:
for var in args.set_variable:
arduino_builder_args += ["--set-variable", var]
if run_device_test(binary.as_posix(),
args.flash_only,
args.port,
args.baud,
args.test_timeout,
args.upload_tool,
arduino_package_path,
test_runner_args=arduino_builder_args):
sys.exit(0)
else:
sys.exit(1)
if __name__ == '__main__':
main()
| apache-2.0 |
TheTimmy/spack | var/spack/repos/builtin/packages/r-rgl/package.py | 2 | 2441 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RRgl(RPackage):
"""Provides medium to high level functions for 3D interactive graphics,
including functions modelled on base graphics (plot3d(), etc.) as well as
functions for constructing representations of geometric objects (cube3d(),
etc.). Output may be on screen using OpenGL, or to various standard
3D file formats including WebGL, PLY, OBJ, STL as well as 2D image formats,
including PNG, Postscript, SVG, PGF."""
homepage = "https://r-forge.r-project.org/projects/rgl"
url = "https://cloud.r-project.org/src/contrib/rgl_0.98.1.tar.gz"
version('0.98.1', 'bd69e1d33f1590feb4b6dc080b133e5b')
depends_on('r@3.2:3.9')
depends_on('zlib', type=('link'))
depends_on('libpng', type=('link'))
depends_on('freetype', type=('link'))
depends_on('mesa', type=('link'))
depends_on('mesa-glu', type=('link'))
depends_on('r-htmlwidgets', type=('build', 'run'))
depends_on('r-htmltools', type=('build', 'run'))
depends_on('r-knitr', type=('build', 'run'))
depends_on('r-jsonlite', type=('build', 'run'))
depends_on('r-shiny', type=('build', 'run'))
depends_on('r-magrittr', type=('build', 'run'))
| lgpl-2.1 |
endlessm/chromium-browser | third_party/catapult/dashboard/dashboard/find_change_points.py | 1 | 9944 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A simplified change-point detection algorithm.
Historically, the performance dashboard has used the GASP service for
detection. Brett Schein wrote a simplified version of this algorithm
for the dashboard in Matlab, and this was ported to Python by Dave Tu.
The general goal is to find any increase or decrease which is likely to
represent a real change in the underlying data source.
See: http://en.wikipedia.org/wiki/Step_detection
In 2019, we also integrate a successive bisection with combined Mann-Whitney
U-test and Kolmogorov-Smirnov tests to identify potential change points. This is
not exactly the E-divisive algorithm, but is close enough.
See: https://arxiv.org/abs/1306.4933
"""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import collections
import logging
from dashboard import find_step
from dashboard import ttest
from dashboard.common import math_utils
from dashboard.common import clustering_change_detector
# Maximum number of points to consider at one time.
_MAX_WINDOW_SIZE = 50
# Minimum number of points in a segment. This can help filter out erroneous
# results by ignoring results that were found from looking at too few points.
MIN_SEGMENT_SIZE = 6
# Minimum absolute difference between medians before and after.
_MIN_ABSOLUTE_CHANGE = 0
# Minimum relative difference between medians before and after.
_MIN_RELATIVE_CHANGE = 0.01
# "Steppiness" is a number between 0 and 1 that indicates how similar the
# shape is to a perfect step function, where 1 represents a step function.
_MIN_STEPPINESS = 0.5
# The "standard deviation" is based on a subset of points in the series.
# This parameter is the minimum acceptable ratio of the relative change
# and this standard deviation.
_MULTIPLE_OF_STD_DEV = 2.5
class ChangePoint(
collections.namedtuple(
'ChangePoint',
(
# The x-value of the first point after a step.
'x_value',
# Median of the segments before and after the change point.
'median_before',
'median_after',
# Number of points before and after the change point.
'size_before',
'size_after',
# X-values of the first and last point in the series window used.
'window_start',
'window_end',
# Relative change from before to after.
'relative_change',
# Standard deviation of points before.
'std_dev_before',
# Results of the Welch's t-test for values before and after.
't_statistic',
'degrees_of_freedom',
'p_value'))):
"""A ChangePoint represents a change in a series -- a potential alert."""
_slots = None
def AsDict(self):
"""Returns a dictionary mapping attributes to values."""
return self._asdict()
def FindChangePoints(series,
max_window_size=_MAX_WINDOW_SIZE,
min_segment_size=MIN_SEGMENT_SIZE,
min_absolute_change=_MIN_ABSOLUTE_CHANGE,
min_relative_change=_MIN_RELATIVE_CHANGE,
min_steppiness=_MIN_STEPPINESS,
multiple_of_std_dev=_MULTIPLE_OF_STD_DEV):
"""Finds change points in the given series.
Only the last |max_window_size| points are examined, regardless of how many
points are passed in. The reason why it might make sense to limit the number
of points to look at is that if there are multiple change-points in the window
that's looked at, then this function will be less likely to find any of them.
This uses a clustering change detector (an approximation of E-divisive) in the
`clustering_change_detector` module.
Args:
series: A list of (x, y) pairs.
max_window_size: Number of points to analyze.
min_segment_size: Min size of segments before or after change point.
min_absolute_change: Absolute change threshold.
min_relative_change: Relative change threshold.
min_steppiness: Threshold for how similar to a step a change point must be.
multiple_of_std_dev: Threshold for change as multiple of std. deviation.
Returns:
A list with one ChangePoint object, or an empty list.
"""
if len(series) < 2:
return [] # Not enough points to possibly contain a valid split point.
series = series[-max_window_size:]
_, y_values = zip(*series)
candidate_indices = []
split_index = 0
def RelativeIndexAdjuster(base, offset):
if base == 0:
return offset
# Prevent negative indices.
return max((base + offset) - min_segment_size, 0)
# We iteratively find all potential change-points in the range.
while split_index + min_segment_size < len(y_values):
try:
# First we get an adjusted set of indices, starting from split_index,
# filtering out the ones we've already seen.
potential_candidates_unadjusted = (
clustering_change_detector.ClusterAndFindSplit(
y_values[max(split_index - min_segment_size, 0):]))
potential_candidates_unfiltered = [
RelativeIndexAdjuster(split_index, x)
for x in potential_candidates_unadjusted
]
potential_candidates = [
x for x in potential_candidates_unfiltered
if x not in candidate_indices
]
logging.debug('New indices: %s', potential_candidates)
if potential_candidates:
candidate_indices.extend(potential_candidates)
split_index = max(potential_candidates)
else:
break
except clustering_change_detector.Error as e:
if not candidate_indices:
logging.warning('Clustering change point detection failed: %s', e)
return []
break
def RevAndIdx(idx):
return ('rev:%s' % (series[idx][0],), 'idx:%s' % (idx,))
logging.info('E-Divisive candidate change-points: %s',
[RevAndIdx(idx) for idx in candidate_indices])
change_points = []
for potential_index in reversed(sorted(candidate_indices)):
passed_filter, reject_reason = _PassesThresholds(
y_values,
potential_index,
min_segment_size=min_segment_size,
min_absolute_change=min_absolute_change,
min_relative_change=min_relative_change,
min_steppiness=min_steppiness,
multiple_of_std_dev=multiple_of_std_dev)
if passed_filter:
change_points.append(potential_index)
else:
logging.debug('Rejected %s as potential index (%s); reason = %s',
potential_index, RevAndIdx(potential_index), reject_reason)
logging.info('E-Divisive potential change-points: %s',
[RevAndIdx(idx) for idx in change_points])
return [MakeChangePoint(series, index) for index in change_points[0:1]]
def MakeChangePoint(series, split_index):
"""Makes a ChangePoint object for the given series at the given point.
Args:
series: A list of (x, y) pairs.
split_index: Index of the first point after the split.
Returns:
A ChangePoint object.
"""
assert 0 <= split_index < len(series)
x_values, y_values = zip(*series)
left, right = y_values[:split_index], y_values[split_index:]
left_median, right_median = math_utils.Median(left), math_utils.Median(right)
ttest_results = ttest.WelchsTTest(left, right)
return ChangePoint(
x_value=x_values[split_index],
median_before=left_median,
median_after=right_median,
size_before=len(left),
size_after=len(right),
window_start=x_values[0],
window_end=x_values[-1], # inclusive bound
relative_change=math_utils.RelativeChange(left_median, right_median),
std_dev_before=math_utils.StandardDeviation(left),
t_statistic=ttest_results.t,
degrees_of_freedom=ttest_results.df,
p_value=ttest_results.p)
def _PassesThresholds(values, split_index, min_segment_size,
min_absolute_change, min_relative_change, min_steppiness,
multiple_of_std_dev):
"""Checks whether a point in a series appears to be an change point.
Args:
values: A list of numbers.
split_index: An index in the list of numbers.
min_segment_size: Threshold for size of segments before or after a point.
min_absolute_change: Minimum absolute median change threshold.
min_relative_change: Minimum relative median change threshold.
min_steppiness: Threshold for how similar to a step a change point must be.
multiple_of_std_dev: Threshold for change as multiple of std. deviation.
Returns:
A tuple of (bool, string) where the bool indicates whether the split index
passes the thresholds and the string being the reason it did not.
"""
left, right = values[:split_index], values[split_index:]
left_median, right_median = math_utils.Median(left), math_utils.Median(right)
# 1. Segment size filter.
if len(left) < min_segment_size or len(right) < min_segment_size:
return (False, 'min_segment_size')
# 2. Absolute change filter.
absolute_change = abs(left_median - right_median)
if absolute_change < min_absolute_change:
return (False, 'min_absolute_change')
# 3. Relative change filter.
relative_change = math_utils.RelativeChange(left_median, right_median)
if relative_change < min_relative_change:
return (False, 'min_relative_change')
# 4. Multiple of standard deviation filter.
min_std_dev = min(
math_utils.StandardDeviation(left), math_utils.StandardDeviation(right))
if absolute_change < multiple_of_std_dev * min_std_dev:
return (False, 'min_std_dev')
# 5. Steppiness filter.
steppiness = find_step.Steppiness(values, split_index)
if steppiness < min_steppiness:
return (False, 'min_steppiness')
# Passed all filters!
return (True, 'passed')
| bsd-3-clause |
Flowerfan524/TriClustering | cotrain.py | 1 | 2741 | from __future__ import print_function, absolute_import
from reid.models import model_utils as mu
from reid.utils.data import data_process as dp
from reid.utils.serialization import save_checkpoint
from reid import datasets
from reid import models
from reid.config import Config
import torch
import numpy as np
import os
import argparse
parser = argparse.ArgumentParser(description='Cotrain args')
parser.add_argument('-s', '--seed', type=int, default=0)
args = parser.parse_args()
torch.manual_seed(args.seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(args.seed)
def cotrain(configs,data,iter_step=1,train_ratio=0.2):
"""
cotrain model:
params:
model_names: model configs
data: dataset include train and untrain data
save_paths: paths for storing models
iter_step: maximum iteration steps
train_ratio: labeled data ratio
"""
assert iter_step >= 1
train_data,untrain_data = dp.split_dataset(data.trainval, train_ratio, args.seed)
data_dir = data.images_dir
new_train_data = train_data
for step in range(iter_step):
pred_probs = []
add_ids = []
for view in range(2):
configs[view].set_training(True)
model = mu.train(new_train_data, data_dir, configs[view])
save_checkpoint({
'state_dict': model.state_dict(),
'epoch': step + 1,
'train_data': new_train_data}, False,
fpath = os.path.join(configs[view].logs_dir, configs[view].model_name, 'cotrain.epoch%d' % step)
)
if len(untrain_data) == 0:
continue
pred_probs.append(mu.predict_prob(
model,untrain_data,data_dir,configs[view]))
add_ids.append(dp.sel_idx(pred_probs[view], train_data))
# calculate predict probility on all data
p_b = mu.predict_prob(model, data.trainval, data_dir, configs[view])
p_y = np.argmax(p_b, axis=1)
t_y = [c for (_,c,_,_) in data.trainval]
print(np.mean(t_y == p_y))
if len(untrain_data) == 0:
break
# update training data
pred_y = np.argmax(sum(pred_probs), axis=1)
add_id = sum(add_ids)
new_train_data, untrain_data = dp.update_train_untrain(
add_id,new_train_data,untrain_data,pred_y)
config1 = Config()
config2 = Config(model_name='densenet121', height=224, width=224)
config3 = Config(model_name='resnet101', img_translation=2)
dataset = 'market1501std'
cur_path = os.getcwd()
logs_dir = os.path.join(cur_path, 'logs')
data_dir = os.path.join(cur_path,'data',dataset)
data = datasets.create(dataset, data_dir)
cotrain([config2,config3], data, 5)
| mit |
treycausey/scikit-learn | sklearn/cluster/tests/test_mean_shift.py | 9 | 2843 | """
Testing for mean shift clustering methods
"""
import numpy as np
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.cluster import MeanShift
from sklearn.cluster import mean_shift
from sklearn.cluster import estimate_bandwidth
from sklearn.cluster import get_bin_seeds
from sklearn.datasets.samples_generator import make_blobs
n_clusters = 3
centers = np.array([[1, 1], [-1, -1], [1, -1]]) + 10
X, _ = make_blobs(n_samples=500, n_features=2, centers=centers,
cluster_std=0.4, shuffle=True, random_state=0)
def test_estimate_bandwidth():
"""Test estimate_bandwidth"""
bandwidth = estimate_bandwidth(X, n_samples=300)
assert_true(0.9 <= bandwidth <= 1.5)
def test_mean_shift():
""" Test MeanShift algorithm """
bandwidth = 1.2
ms = MeanShift(bandwidth=bandwidth)
labels = ms.fit(X).labels_
cluster_centers = ms.cluster_centers_
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
assert_equal(n_clusters_, n_clusters)
cluster_centers, labels = mean_shift(X, bandwidth=bandwidth)
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
assert_equal(n_clusters_, n_clusters)
def test_meanshift_predict():
"""Test MeanShift.predict"""
ms = MeanShift(bandwidth=1.2)
labels = ms.fit_predict(X)
labels2 = ms.predict(X)
assert_array_equal(labels, labels2)
def test_unfitted():
"""Non-regression: before fit, there should be not fitted attributes."""
ms = MeanShift()
assert_false(hasattr(ms, "cluster_centers_"))
assert_false(hasattr(ms, "labels_"))
def test_bin_seeds():
"""
Test the bin seeding technique which can be used in the mean shift
algorithm
"""
# Data is just 6 points in the plane
X = np.array([[1., 1.], [1.5, 1.5], [1.8, 1.2],
[2., 1.], [2.1, 1.1], [0., 0.]])
# With a bin coarseness of 1.0 and min_bin_freq of 1, 3 bins should be
# found
ground_truth = set([(1., 1.), (2., 1.), (0., 0.)])
test_bins = get_bin_seeds(X, 1, 1)
test_result = set([tuple(p) for p in test_bins])
assert_true(len(ground_truth.symmetric_difference(test_result)) == 0)
# With a bin coarseness of 1.0 and min_bin_freq of 2, 2 bins should be
# found
ground_truth = set([(1., 1.), (2., 1.)])
test_bins = get_bin_seeds(X, 1, 2)
test_result = set([tuple(p) for p in test_bins])
assert_true(len(ground_truth.symmetric_difference(test_result)) == 0)
# With a bin size of 0.01 and min_bin_freq of 1, 6 bins should be found
test_bins = get_bin_seeds(X, 0.01, 1)
test_result = set([tuple(p) for p in test_bins])
assert_true(len(test_result) == 6)
| bsd-3-clause |
jounex/hue | desktop/core/ext-py/MySQL-python-1.2.5/tests/test_MySQLdb_nonstandard.py | 41 | 2737 | import unittest
import _mysql
import MySQLdb
from MySQLdb.constants import FIELD_TYPE
from configdb import connection_factory
import warnings
warnings.simplefilter("ignore")
class TestDBAPISet(unittest.TestCase):
def test_set_equality(self):
self.assertTrue(MySQLdb.STRING == MySQLdb.STRING)
def test_set_inequality(self):
self.assertTrue(MySQLdb.STRING != MySQLdb.NUMBER)
def test_set_equality_membership(self):
self.assertTrue(FIELD_TYPE.VAR_STRING == MySQLdb.STRING)
def test_set_inequality_membership(self):
self.assertTrue(FIELD_TYPE.DATE != MySQLdb.STRING)
class CoreModule(unittest.TestCase):
"""Core _mysql module features."""
def test_NULL(self):
"""Should have a NULL constant."""
self.assertEqual(_mysql.NULL, 'NULL')
def test_version(self):
"""Version information sanity."""
self.assertTrue(isinstance(_mysql.__version__, str))
self.assertTrue(isinstance(_mysql.version_info, tuple))
self.assertEqual(len(_mysql.version_info), 5)
def test_client_info(self):
self.assertTrue(isinstance(_mysql.get_client_info(), str))
def test_thread_safe(self):
self.assertTrue(isinstance(_mysql.thread_safe(), int))
class CoreAPI(unittest.TestCase):
"""Test _mysql interaction internals."""
def setUp(self):
self.conn = connection_factory(use_unicode=True)
def tearDown(self):
self.conn.close()
def test_thread_id(self):
tid = self.conn.thread_id()
self.assertTrue(isinstance(tid, int),
"thread_id didn't return an int.")
self.assertRaises(TypeError, self.conn.thread_id, ('evil',),
"thread_id shouldn't accept arguments.")
def test_affected_rows(self):
self.assertEquals(self.conn.affected_rows(), 0,
"Should return 0 before we do anything.")
#def test_debug(self):
## FIXME Only actually tests if you lack SUPER
#self.assertRaises(MySQLdb.OperationalError,
#self.conn.dump_debug_info)
def test_charset_name(self):
self.assertTrue(isinstance(self.conn.character_set_name(), str),
"Should return a string.")
def test_host_info(self):
self.assertTrue(isinstance(self.conn.get_host_info(), str),
"Should return a string.")
def test_proto_info(self):
self.assertTrue(isinstance(self.conn.get_proto_info(), int),
"Should return an int.")
def test_server_info(self):
self.assertTrue(isinstance(self.conn.get_server_info(), str),
"Should return an str.")
| apache-2.0 |
sidrakesh93/grpc | src/python/interop/interop/test_pb2.py | 22 | 8377 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: test/cpp/interop/test.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from test.cpp.interop import empty_pb2 as test_dot_cpp_dot_interop_dot_empty__pb2
from test.cpp.interop import messages_pb2 as test_dot_cpp_dot_interop_dot_messages__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='test/cpp/interop/test.proto',
package='grpc.testing',
serialized_pb=_b('\n\x1btest/cpp/interop/test.proto\x12\x0cgrpc.testing\x1a\x1ctest/cpp/interop/empty.proto\x1a\x1ftest/cpp/interop/messages.proto2\xbb\x04\n\x0bTestService\x12\x35\n\tEmptyCall\x12\x13.grpc.testing.Empty\x1a\x13.grpc.testing.Empty\x12\x46\n\tUnaryCall\x12\x1b.grpc.testing.SimpleRequest\x1a\x1c.grpc.testing.SimpleResponse\x12l\n\x13StreamingOutputCall\x12(.grpc.testing.StreamingOutputCallRequest\x1a).grpc.testing.StreamingOutputCallResponse0\x01\x12i\n\x12StreamingInputCall\x12\'.grpc.testing.StreamingInputCallRequest\x1a(.grpc.testing.StreamingInputCallResponse(\x01\x12i\n\x0e\x46ullDuplexCall\x12(.grpc.testing.StreamingOutputCallRequest\x1a).grpc.testing.StreamingOutputCallResponse(\x01\x30\x01\x12i\n\x0eHalfDuplexCall\x12(.grpc.testing.StreamingOutputCallRequest\x1a).grpc.testing.StreamingOutputCallResponse(\x01\x30\x01')
,
dependencies=[test_dot_cpp_dot_interop_dot_empty__pb2.DESCRIPTOR,test_dot_cpp_dot_interop_dot_messages__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
import abc
from grpc.early_adopter import implementations
from grpc.framework.alpha import utilities
class EarlyAdopterTestServiceServicer(object):
"""<fill me in later!>"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def EmptyCall(self, request, context):
raise NotImplementedError()
@abc.abstractmethod
def UnaryCall(self, request, context):
raise NotImplementedError()
@abc.abstractmethod
def StreamingOutputCall(self, request, context):
raise NotImplementedError()
@abc.abstractmethod
def StreamingInputCall(self, request_iterator, context):
raise NotImplementedError()
@abc.abstractmethod
def FullDuplexCall(self, request_iterator, context):
raise NotImplementedError()
@abc.abstractmethod
def HalfDuplexCall(self, request_iterator, context):
raise NotImplementedError()
class EarlyAdopterTestServiceServer(object):
"""<fill me in later!>"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def start(self):
raise NotImplementedError()
@abc.abstractmethod
def stop(self):
raise NotImplementedError()
class EarlyAdopterTestServiceStub(object):
"""<fill me in later!>"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def EmptyCall(self, request):
raise NotImplementedError()
EmptyCall.async = None
@abc.abstractmethod
def UnaryCall(self, request):
raise NotImplementedError()
UnaryCall.async = None
@abc.abstractmethod
def StreamingOutputCall(self, request):
raise NotImplementedError()
StreamingOutputCall.async = None
@abc.abstractmethod
def StreamingInputCall(self, request_iterator):
raise NotImplementedError()
StreamingInputCall.async = None
@abc.abstractmethod
def FullDuplexCall(self, request_iterator):
raise NotImplementedError()
FullDuplexCall.async = None
@abc.abstractmethod
def HalfDuplexCall(self, request_iterator):
raise NotImplementedError()
HalfDuplexCall.async = None
def early_adopter_create_TestService_server(servicer, port, private_key=None, certificate_chain=None):
import test.cpp.interop.empty_pb2
import test.cpp.interop.empty_pb2
import test.cpp.interop.messages_pb2
import test.cpp.interop.messages_pb2
import test.cpp.interop.messages_pb2
import test.cpp.interop.messages_pb2
import test.cpp.interop.messages_pb2
import test.cpp.interop.messages_pb2
import test.cpp.interop.messages_pb2
import test.cpp.interop.messages_pb2
import test.cpp.interop.messages_pb2
import test.cpp.interop.messages_pb2
method_service_descriptions = {
"EmptyCall": utilities.unary_unary_service_description(
servicer.EmptyCall,
test.cpp.interop.empty_pb2.Empty.FromString,
test.cpp.interop.empty_pb2.Empty.SerializeToString,
),
"FullDuplexCall": utilities.stream_stream_service_description(
servicer.FullDuplexCall,
test.cpp.interop.messages_pb2.StreamingOutputCallRequest.FromString,
test.cpp.interop.messages_pb2.StreamingOutputCallResponse.SerializeToString,
),
"HalfDuplexCall": utilities.stream_stream_service_description(
servicer.HalfDuplexCall,
test.cpp.interop.messages_pb2.StreamingOutputCallRequest.FromString,
test.cpp.interop.messages_pb2.StreamingOutputCallResponse.SerializeToString,
),
"StreamingInputCall": utilities.stream_unary_service_description(
servicer.StreamingInputCall,
test.cpp.interop.messages_pb2.StreamingInputCallRequest.FromString,
test.cpp.interop.messages_pb2.StreamingInputCallResponse.SerializeToString,
),
"StreamingOutputCall": utilities.unary_stream_service_description(
servicer.StreamingOutputCall,
test.cpp.interop.messages_pb2.StreamingOutputCallRequest.FromString,
test.cpp.interop.messages_pb2.StreamingOutputCallResponse.SerializeToString,
),
"UnaryCall": utilities.unary_unary_service_description(
servicer.UnaryCall,
test.cpp.interop.messages_pb2.SimpleRequest.FromString,
test.cpp.interop.messages_pb2.SimpleResponse.SerializeToString,
),
}
return implementations.server("grpc.testing.TestService", method_service_descriptions, port, private_key=private_key, certificate_chain=certificate_chain)
def early_adopter_create_TestService_stub(host, port, metadata_transformer=None, secure=False, root_certificates=None, private_key=None, certificate_chain=None, server_host_override=None):
import test.cpp.interop.empty_pb2
import test.cpp.interop.empty_pb2
import test.cpp.interop.messages_pb2
import test.cpp.interop.messages_pb2
import test.cpp.interop.messages_pb2
import test.cpp.interop.messages_pb2
import test.cpp.interop.messages_pb2
import test.cpp.interop.messages_pb2
import test.cpp.interop.messages_pb2
import test.cpp.interop.messages_pb2
import test.cpp.interop.messages_pb2
import test.cpp.interop.messages_pb2
method_invocation_descriptions = {
"EmptyCall": utilities.unary_unary_invocation_description(
test.cpp.interop.empty_pb2.Empty.SerializeToString,
test.cpp.interop.empty_pb2.Empty.FromString,
),
"FullDuplexCall": utilities.stream_stream_invocation_description(
test.cpp.interop.messages_pb2.StreamingOutputCallRequest.SerializeToString,
test.cpp.interop.messages_pb2.StreamingOutputCallResponse.FromString,
),
"HalfDuplexCall": utilities.stream_stream_invocation_description(
test.cpp.interop.messages_pb2.StreamingOutputCallRequest.SerializeToString,
test.cpp.interop.messages_pb2.StreamingOutputCallResponse.FromString,
),
"StreamingInputCall": utilities.stream_unary_invocation_description(
test.cpp.interop.messages_pb2.StreamingInputCallRequest.SerializeToString,
test.cpp.interop.messages_pb2.StreamingInputCallResponse.FromString,
),
"StreamingOutputCall": utilities.unary_stream_invocation_description(
test.cpp.interop.messages_pb2.StreamingOutputCallRequest.SerializeToString,
test.cpp.interop.messages_pb2.StreamingOutputCallResponse.FromString,
),
"UnaryCall": utilities.unary_unary_invocation_description(
test.cpp.interop.messages_pb2.SimpleRequest.SerializeToString,
test.cpp.interop.messages_pb2.SimpleResponse.FromString,
),
}
return implementations.stub("grpc.testing.TestService", method_invocation_descriptions, host, port, metadata_transformer=metadata_transformer, secure=secure, root_certificates=root_certificates, private_key=private_key, certificate_chain=certificate_chain, server_host_override=server_host_override)
# @@protoc_insertion_point(module_scope)
| bsd-3-clause |
alikins/ansible | lib/ansible/modules/storage/netapp/netapp_e_lun_mapping.py | 32 | 10739 | #!/usr/bin/python
# (c) 2016, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: netapp_e_lun_mapping
author: Kevin Hulquest (@hulquest)
short_description: Create or Remove LUN Mappings
description:
- Allows for the creation and removal of volume to host mappings for NetApp E-series storage arrays.
version_added: "2.2"
extends_documentation_fragment:
- netapp.eseries
options:
lun:
description:
- The LUN number you wish to give the mapping
- If the supplied I(volume_name) is associated with a different LUN, it will be updated to what is supplied here.
required: False
default: 0
target:
description:
- The name of host or hostgroup you wish to assign to the mapping
- If omitted, the default hostgroup is used.
- If the supplied I(volume_name) is associated with a different target, it will be updated to what is supplied here.
required: False
volume_name:
description:
- The name of the volume you wish to include in the mapping.
required: True
target_type:
description:
- Whether the target is a host or group.
- Required if supplying an explicit target.
required: False
choices: ["host", "group"]
state:
description:
- Present will ensure the mapping exists, absent will remove the mapping.
- All parameters I(lun), I(target), I(target_type) and I(volume_name) must still be supplied.
required: True
choices: ["present", "absent"]
'''
EXAMPLES = '''
---
- name: Lun Mapping Example
netapp_e_lun_mapping:
state: present
ssid: 1
lun: 12
target: Wilson
volume_name: Colby1
target_type: group
api_url: "{{ netapp_api_url }}"
api_username: "{{ netapp_api_username }}"
api_password: "{{ netapp_api_password }}"
'''
RETURN = '''
msg:
description: Status of mapping
returned: always
type: string
sample: 'Mapping existing'
'''
import json
from ansible.module_utils.api import basic_auth_argument_spec
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.netapp import request, eseries_host_argument_spec
from ansible.module_utils.pycompat24 import get_exception
HEADERS = {
"Content-Type": "application/json",
"Accept": "application/json"
}
def get_host_and_group_map(module, ssid, api_url, user, pwd, validate_certs):
mapping = dict(host=dict(), group=dict())
hostgroups = 'storage-systems/%s/host-groups' % ssid
groups_url = api_url + hostgroups
try:
hg_rc, hg_data = request(groups_url, headers=HEADERS, url_username=user, url_password=pwd,
validate_certs=validate_certs)
except:
err = get_exception()
module.fail_json(msg="Failed to get host groups. Id [%s]. Error [%s]" % (ssid, str(err)))
for group in hg_data:
mapping['group'][group['name']] = group['id']
hosts = 'storage-systems/%s/hosts' % ssid
hosts_url = api_url + hosts
try:
h_rc, h_data = request(hosts_url, headers=HEADERS, url_username=user, url_password=pwd,
validate_certs=validate_certs)
except:
err = get_exception()
module.fail_json(msg="Failed to get hosts. Id [%s]. Error [%s]" % (ssid, str(err)))
for host in h_data:
mapping['host'][host['name']] = host['id']
return mapping
def get_volume_id(module, data, ssid, name, api_url, user, pwd):
qty = 0
for volume in data:
if volume['name'] == name:
qty += 1
if qty > 1:
module.fail_json(msg="More than one volume with the name: %s was found, "
"please use the volume WWN instead" % name)
else:
wwn = volume['wwn']
try:
return wwn
except NameError:
module.fail_json(msg="No volume with the name: %s, was found" % (name))
def get_hostgroups(module, ssid, api_url, user, pwd, validate_certs):
groups = "storage-systems/%s/host-groups" % ssid
url = api_url + groups
try:
rc, data = request(url, headers=HEADERS, url_username=user, url_password=pwd, validate_certs=validate_certs)
return data
except Exception:
module.fail_json(msg="There was an issue with connecting, please check that your"
"endpoint is properly defined and your credentials are correct")
def get_volumes(module, ssid, api_url, user, pwd, mappable, validate_certs):
volumes = 'storage-systems/%s/%s' % (ssid, mappable)
url = api_url + volumes
try:
rc, data = request(url, url_username=user, url_password=pwd, validate_certs=validate_certs)
except Exception:
err = get_exception()
module.fail_json(
msg="Failed to mappable objects. Type[%s. Id [%s]. Error [%s]." % (mappable, ssid, str(err)))
return data
def get_lun_mappings(ssid, api_url, user, pwd, validate_certs, get_all=None):
mappings = 'storage-systems/%s/volume-mappings' % ssid
url = api_url + mappings
rc, data = request(url, url_username=user, url_password=pwd, validate_certs=validate_certs)
if not get_all:
remove_keys = ('ssid', 'perms', 'lunMappingRef', 'type', 'id')
for key in remove_keys:
for mapping in data:
del mapping[key]
return data
def create_mapping(module, ssid, lun_map, vol_name, api_url, user, pwd, validate_certs):
mappings = 'storage-systems/%s/volume-mappings' % ssid
url = api_url + mappings
if lun_map is not None:
post_body = json.dumps(dict(
mappableObjectId=lun_map['volumeRef'],
targetId=lun_map['mapRef'],
lun=lun_map['lun']
))
else:
post_body = json.dumps(dict(
mappableObjectId=lun_map['volumeRef'],
targetId=lun_map['mapRef'],
))
rc, data = request(url, data=post_body, method='POST', url_username=user, url_password=pwd, headers=HEADERS,
ignore_errors=True, validate_certs=validate_certs)
if rc == 422 and lun_map['lun'] is not None:
data = move_lun(module, ssid, lun_map, vol_name, api_url, user, pwd, validate_certs)
# module.fail_json(msg="The volume you specified '%s' is already "
# "part of a different LUN mapping. If you "
# "want to move it to a different host or "
# "hostgroup, then please use the "
# "netapp_e_move_lun module" % vol_name)
return data
def move_lun(module, ssid, lun_map, vol_name, api_url, user, pwd, validate_certs):
lun_id = get_lun_id(module, ssid, lun_map, api_url, user, pwd, validate_certs)
move_lun = "storage-systems/%s/volume-mappings/%s/move" % (ssid, lun_id)
url = api_url + move_lun
post_body = json.dumps(dict(targetId=lun_map['mapRef'], lun=lun_map['lun']))
rc, data = request(url, data=post_body, method='POST', url_username=user, url_password=pwd, headers=HEADERS,
validate_certs=validate_certs)
return data
def get_lun_id(module, ssid, lun_mapping, api_url, user, pwd, validate_certs):
data = get_lun_mappings(ssid, api_url, user, pwd, validate_certs, get_all=True)
for lun_map in data:
if lun_map['volumeRef'] == lun_mapping['volumeRef']:
return lun_map['id']
# This shouldn't ever get called
module.fail_json(msg="No LUN map found.")
def remove_mapping(module, ssid, lun_mapping, api_url, user, pwd, validate_certs):
lun_id = get_lun_id(module, ssid, lun_mapping, api_url, user, pwd)
lun_del = "storage-systems/%s/volume-mappings/%s" % (ssid, lun_id)
url = api_url + lun_del
rc, data = request(url, method='DELETE', url_username=user, url_password=pwd, headers=HEADERS,
validate_certs=validate_certs)
return data
def main():
argument_spec = eseries_host_argument_spec()
argument_spec.update(dict(
state=dict(required=True, choices=['present', 'absent']),
target=dict(required=False, default=None),
target_type=dict(required=False, choices=['host', 'group']),
lun=dict(required=False, type='int'),
volume_name=dict(required=True),
))
module = AnsibleModule(argument_spec=argument_spec)
state = module.params['state']
target = module.params['target']
target_type = module.params['target_type']
lun = module.params['lun']
ssid = module.params['ssid']
validate_certs = module.params['validate_certs']
vol_name = module.params['volume_name']
user = module.params['api_username']
pwd = module.params['api_password']
api_url = module.params['api_url']
if not api_url.endswith('/'):
api_url += '/'
volume_map = get_volumes(module, ssid, api_url, user, pwd, "volumes", validate_certs)
thin_volume_map = get_volumes(module, ssid, api_url, user, pwd, "thin-volumes", validate_certs)
volref = None
for vol in volume_map:
if vol['label'] == vol_name:
volref = vol['volumeRef']
if not volref:
for vol in thin_volume_map:
if vol['label'] == vol_name:
volref = vol['volumeRef']
if not volref:
module.fail_json(changed=False, msg="No volume with the name %s was found" % vol_name)
host_and_group_mapping = get_host_and_group_map(module, ssid, api_url, user, pwd, validate_certs)
desired_lun_mapping = dict(
mapRef=host_and_group_mapping[target_type][target],
lun=lun,
volumeRef=volref
)
lun_mappings = get_lun_mappings(ssid, api_url, user, pwd, validate_certs)
if state == 'present':
if desired_lun_mapping in lun_mappings:
module.exit_json(changed=False, msg="Mapping exists")
else:
result = create_mapping(module, ssid, desired_lun_mapping, vol_name, api_url, user, pwd, validate_certs)
module.exit_json(changed=True, **result)
elif state == 'absent':
if desired_lun_mapping in lun_mappings:
result = remove_mapping(module, ssid, desired_lun_mapping, api_url, user, pwd, validate_certs)
module.exit_json(changed=True, msg="Mapping removed")
else:
module.exit_json(changed=False, msg="Mapping absent")
if __name__ == '__main__':
main()
| gpl-3.0 |
RackHD-Mirror/RackHD | test/tests/api-cit/v2_0/swagger_tests.py | 10 | 1438 | '''
Copyright (c) 2016-2017 Dell Inc. or its subsidiaries. All Rights Reserved.
'''
import fit_path # NOQA: unused import
import fit_common
import flogging
import requests
from config.api2_0_config import config
from nose.plugins.attrib import attr
logs = flogging.get_loggers()
@attr(regression=False, smoke=True, swagger_api2_tests=True)
class SwaggerTests(fit_common.unittest.TestCase):
def setUp(self):
logs.info(config.api_root)
logs.info(config.host)
self.swagger_path = '{0}{1}/swagger'.format(config.host, config.api_root)
# @test(groups=['swagger.tests.tags'])
def test_swagger_tags(self):
# """Basic validation of swagger object tags"""
r = requests.get(self.swagger_path)
self.assertEqual(200, r.status_code)
swagger_def = r.json()
# There should be exactly one tag named '/api/2.0'
self.assertEqual(1, len(swagger_def['tags']))
self.assertEqual('/api/2.0', swagger_def['tags'][0]['name'])
self.assertEqual('RackHD 2.0 API', swagger_def['tags'][0]['description'])
# All endpoints should be tagged with 'api/2.0'
for path in swagger_def['paths']:
for method in swagger_def['paths'][path]:
logs.debug("Checking method %s, %s", method, swagger_def['paths'][path][method].get('summary'))
self.assertTrue('/api/2.0' in swagger_def['paths'][path][method]['tags'])
| apache-2.0 |
amjad-twalo/icsisumm | icsisumm-primary-sys34_v1/nltk/nltk-0.9.2/nltk_contrib/classifier/attribute.py | 9 | 3778 | # Natural Language Toolkit - Attribute
# can extract the name and values from a line and operate on them
#
# Author: Sumukh Ghodke <sumukh dot ghodke at gmail dot com>
#
# URL: <http://nltk.sf.net>
# This software is distributed under GPL, for license information see LICENSE.TXT
from nltk_contrib.classifier.exceptions import systemerror as se
from nltk_contrib.classifier import autoclass as ac, cfile, decisionstump as ds
from nltk import probability as prob
import UserList
CONTINUOUS = 'continuous'
DISCRETE = 'discrete'
class Attribute:
"""
Immutable object which represents an attribute/feature
"""
def __init__(self, name, values, index):
self.name = name
self.values = values
self.type = self.__get_type()
self.index = index
def __get_type(self):
if len(self.values) == 1 and self.values[0] == CONTINUOUS:
return CONTINUOUS
return DISCRETE
def has_value(self, to_test):
return self.values.__contains__(to_test)
def is_continuous(self):
return self.type == CONTINUOUS
def __eq__(self, other):
if other is None: return False
if self.__class__ != other.__class__: return False
if self.name == other.name and \
self.values == other.values and \
self.index == other.index:
return True
return False
def __str__(self):
return self.name +':[' + self.values_as_str() + '] index:' + str(self.index)
def values_as_str(self):
"""
Used to write contents back to file store
"""
return ','.join([str(value) for value in self.values])
def empty_freq_dists(self):
return dict([(value, prob.FreqDist()) for value in self.values])
def __hash__(self):
return hash(self.name) + hash(self.index)
class Attributes(UserList.UserList):
def __init__(self, attributes = []):
self.data = attributes
def has_values(self, test_values):
if len(test_values) != len(self): return False
for i in range(len(test_values)):
test_value = test_values[i]
if self.data[i].is_continuous(): continue #do not test continuous attributes
if not self.data[i].has_value(test_value): return False
return True
def has_continuous(self):
for attribute in self.data:
if attribute.is_continuous():
return True
return False
def subset(self, indices):
return [self.data[index] for index in indices]
def discretise(self, discretised_attributes):
for disc_attr in discretised_attributes:
self.data[disc_attr.index] = disc_attr
def empty_decision_stumps(self, ignore_attributes, klass):
filtered = filter(lambda attribute: attribute not in ignore_attributes, self.data)
return [ds.DecisionStump(attribute, klass) for attribute in filtered]
def remove_attributes(self, attributes):
for attribute in attributes:
self.remove(attribute)
self.reset_indices()
def reset_indices(self):
for i in range(len(self.data)):
self.data[i].index = i
def continuous_attribute_indices(self):
return [atr.index for atr in self.data if atr.is_continuous()]
def empty_freq_dists(self):
return dict([(attribute, attribute.empty_freq_dists()) for attribute in self.data])
def __str__(self):
return '[' + ', '.join([each.__str__() for each in self]) + ']'
def fact(n):
if n==0 or n==1: return 1
return n * fact(n -1)
def ncr(n, r):
return fact(n) / (fact(r) * fact(n -r))
| gpl-3.0 |
thesharp/botogram | tests/test_utils_calls.py | 1 | 2089 | """
Tests for botogram/utils/calls.py
Copyright (c) 2015 Pietro Albini <pietro@pietroalbini.io>
Released under the MIT license
"""
import pytest
import botogram.utils
def test_call():
testfunc_called = 0
# Just a test function
def testfunc(a, b, c):
nonlocal testfunc_called
testfunc_called += 1
assert a == 1
assert b == 2
assert c == 3
# Call the function with the exact list of arguments
botogram.utils.call(testfunc, a=1, b=2, c=3)
assert testfunc_called == 1
# Call the function with more arguments than the needed ones
botogram.utils.call(testfunc, a=1, b=2, c=3, d=4)
assert testfunc_called == 2
# Call the function with less arguments than the needed ones
with pytest.raises(TypeError):
botogram.utils.call(testfunc, a=1, b=2)
assert testfunc_called == 2
def test_call_with_wraps():
mywrapper_called = False
myfunc_called = False
def myfunc(a, b):
nonlocal myfunc_called
myfunc_called = True
assert a == 1
assert b == 2
@botogram.utils.wraps(myfunc)
def mywrapper(c):
nonlocal mywrapper_called
mywrapper_called = True
assert c == 3
botogram.utils.call(myfunc, a=1, b=2, c=3)
botogram.utils.call(mywrapper, a=1, b=2, c=3)
assert mywrapper_called
assert myfunc_called
def test_call_lazy_arguments():
myfunc1_called = False
myfunc2_called = False
myarg_called = False
def myfunc1(a):
nonlocal myfunc1_called
myfunc1_called = True
assert a == 1
def myfunc2(a, b):
nonlocal myfunc2_called
myfunc2_called = True
assert a == 1
assert b == 2
def myarg():
nonlocal myarg_called
myarg_called = True
return 2
lazy = botogram.utils.CallLazyArgument(myarg)
botogram.utils.call(myfunc1, a=1, b=lazy)
assert myfunc1_called
assert not myarg_called
botogram.utils.call(myfunc2, a=1, b=lazy)
assert myfunc2_called
assert myarg_called
| mit |
zulip/django | django/template/base.py | 91 | 39166 | """
This is the Django template system.
How it works:
The Lexer.tokenize() function converts a template string (i.e., a string containing
markup with custom template tags) to tokens, which can be either plain text
(TOKEN_TEXT), variables (TOKEN_VAR) or block statements (TOKEN_BLOCK).
The Parser() class takes a list of tokens in its constructor, and its parse()
method returns a compiled template -- which is, under the hood, a list of
Node objects.
Each Node is responsible for creating some sort of output -- e.g. simple text
(TextNode), variable values in a given context (VariableNode), results of basic
logic (IfNode), results of looping (ForNode), or anything else. The core Node
types are TextNode, VariableNode, IfNode and ForNode, but plugin modules can
define their own custom node types.
Each Node has a render() method, which takes a Context and returns a string of
the rendered node. For example, the render() method of a Variable Node returns
the variable's value as a string. The render() method of a ForNode returns the
rendered output of whatever was inside the loop, recursively.
The Template class is a convenient wrapper that takes care of template
compilation and rendering.
Usage:
The only thing you should ever use directly in this file is the Template class.
Create a compiled template object with a template_string, then call render()
with a context. In the compilation stage, the TemplateSyntaxError exception
will be raised if the template doesn't have proper syntax.
Sample code:
>>> from django import template
>>> s = '<html>{% if test %}<h1>{{ varvalue }}</h1>{% endif %}</html>'
>>> t = template.Template(s)
(t is now a compiled template, and its render() method can be called multiple
times with multiple contexts)
>>> c = template.Context({'test':True, 'varvalue': 'Hello'})
>>> t.render(c)
'<html><h1>Hello</h1></html>'
>>> c = template.Context({'test':False, 'varvalue': 'Hello'})
>>> t.render(c)
'<html></html>'
"""
from __future__ import unicode_literals
import inspect
import logging
import re
import warnings
from django.template.context import ( # NOQA: imported for backwards compatibility
BaseContext, Context, ContextPopException, RequestContext,
)
from django.utils import six
from django.utils.deprecation import RemovedInDjango110Warning
from django.utils.encoding import (
force_str, force_text, python_2_unicode_compatible,
)
from django.utils.formats import localize
from django.utils.html import conditional_escape, escape
from django.utils.inspect import getargspec
from django.utils.safestring import (
EscapeData, SafeData, mark_for_escaping, mark_safe,
)
from django.utils.text import (
get_text_list, smart_split, unescape_string_literal,
)
from django.utils.timezone import template_localtime
from django.utils.translation import pgettext_lazy, ugettext_lazy
from .exceptions import TemplateSyntaxError
TOKEN_TEXT = 0
TOKEN_VAR = 1
TOKEN_BLOCK = 2
TOKEN_COMMENT = 3
TOKEN_MAPPING = {
TOKEN_TEXT: 'Text',
TOKEN_VAR: 'Var',
TOKEN_BLOCK: 'Block',
TOKEN_COMMENT: 'Comment',
}
# template syntax constants
FILTER_SEPARATOR = '|'
FILTER_ARGUMENT_SEPARATOR = ':'
VARIABLE_ATTRIBUTE_SEPARATOR = '.'
BLOCK_TAG_START = '{%'
BLOCK_TAG_END = '%}'
VARIABLE_TAG_START = '{{'
VARIABLE_TAG_END = '}}'
COMMENT_TAG_START = '{#'
COMMENT_TAG_END = '#}'
TRANSLATOR_COMMENT_MARK = 'Translators'
SINGLE_BRACE_START = '{'
SINGLE_BRACE_END = '}'
# what to report as the origin for templates that come from non-loader sources
# (e.g. strings)
UNKNOWN_SOURCE = '<unknown source>'
# match a variable or block tag and capture the entire tag, including start/end
# delimiters
tag_re = (re.compile('(%s.*?%s|%s.*?%s|%s.*?%s)' %
(re.escape(BLOCK_TAG_START), re.escape(BLOCK_TAG_END),
re.escape(VARIABLE_TAG_START), re.escape(VARIABLE_TAG_END),
re.escape(COMMENT_TAG_START), re.escape(COMMENT_TAG_END))))
logger = logging.getLogger('django.template')
class TemplateEncodingError(Exception):
pass
@python_2_unicode_compatible
class VariableDoesNotExist(Exception):
def __init__(self, msg, params=()):
self.msg = msg
self.params = params
def __str__(self):
return self.msg % tuple(force_text(p, errors='replace') for p in self.params)
class Origin(object):
def __init__(self, name, template_name=None, loader=None):
self.name = name
self.template_name = template_name
self.loader = loader
def __str__(self):
return self.name
def __eq__(self, other):
if not isinstance(other, Origin):
return False
return (
self.name == other.name and
self.loader == other.loader
)
@property
def loader_name(self):
if self.loader:
return '%s.%s' % (
self.loader.__module__, self.loader.__class__.__name__,
)
class Template(object):
def __init__(self, template_string, origin=None, name=None, engine=None):
try:
template_string = force_text(template_string)
except UnicodeDecodeError:
raise TemplateEncodingError("Templates can only be constructed "
"from unicode or UTF-8 strings.")
# If Template is instantiated directly rather than from an Engine and
# exactly one Django template engine is configured, use that engine.
# This is required to preserve backwards-compatibility for direct use
# e.g. Template('...').render(Context({...}))
if engine is None:
from .engine import Engine
engine = Engine.get_default()
if origin is None:
origin = Origin(UNKNOWN_SOURCE)
self.name = name
self.origin = origin
self.engine = engine
self.source = template_string
self.nodelist = self.compile_nodelist()
def __iter__(self):
for node in self.nodelist:
for subnode in node:
yield subnode
def _render(self, context):
return self.nodelist.render(context)
def render(self, context):
"Display stage -- can be called many times"
context.render_context.push()
try:
if context.template is None:
with context.bind_template(self):
context.template_name = self.name
return self._render(context)
else:
return self._render(context)
finally:
context.render_context.pop()
def compile_nodelist(self):
"""
Parse and compile the template source into a nodelist. If debug
is True and an exception occurs during parsing, the exception is
is annotated with contextual line information where it occurred in the
template source.
"""
if self.engine.debug:
lexer = DebugLexer(self.source)
else:
lexer = Lexer(self.source)
tokens = lexer.tokenize()
parser = Parser(
tokens, self.engine.template_libraries, self.engine.template_builtins,
)
try:
return parser.parse()
except Exception as e:
if self.engine.debug:
e.template_debug = self.get_exception_info(e, e.token)
raise
def get_exception_info(self, exception, token):
"""
Return a dictionary containing contextual line information of where
the exception occurred in the template. The following information is
provided:
message
The message of the exception raised.
source_lines
The lines before, after, and including the line the exception
occurred on.
line
The line number the exception occurred on.
before, during, after
The line the exception occurred on split into three parts:
1. The content before the token that raised the error.
2. The token that raised the error.
3. The content after the token that raised the error.
total
The number of lines in source_lines.
top
The line number where source_lines starts.
bottom
The line number where source_lines ends.
start
The start position of the token in the template source.
end
The end position of the token in the template source.
"""
start, end = token.position
context_lines = 10
line = 0
upto = 0
source_lines = []
before = during = after = ""
for num, next in enumerate(linebreak_iter(self.source)):
if start >= upto and end <= next:
line = num
before = escape(self.source[upto:start])
during = escape(self.source[start:end])
after = escape(self.source[end:next])
source_lines.append((num, escape(self.source[upto:next])))
upto = next
total = len(source_lines)
top = max(1, line - context_lines)
bottom = min(total, line + 1 + context_lines)
# In some rare cases exc_value.args can be empty or an invalid
# unicode string.
try:
message = force_text(exception.args[0])
except (IndexError, UnicodeDecodeError):
message = '(Could not get exception message)'
return {
'message': message,
'source_lines': source_lines[top:bottom],
'before': before,
'during': during,
'after': after,
'top': top,
'bottom': bottom,
'total': total,
'line': line,
'name': self.origin.name,
'start': start,
'end': end,
}
def linebreak_iter(template_source):
yield 0
p = template_source.find('\n')
while p >= 0:
yield p + 1
p = template_source.find('\n', p + 1)
yield len(template_source) + 1
class Token(object):
def __init__(self, token_type, contents, position=None, lineno=None):
"""
A token representing a string from the template.
token_type
One of TOKEN_TEXT, TOKEN_VAR, TOKEN_BLOCK, or TOKEN_COMMENT.
contents
The token source string.
position
An optional tuple containing the start and end index of the token
in the template source. This is used for traceback information
when debug is on.
lineno
The line number the token appears on in the template source.
This is used for traceback information and gettext files.
"""
self.token_type, self.contents = token_type, contents
self.lineno = lineno
self.position = position
def __str__(self):
token_name = TOKEN_MAPPING[self.token_type]
return ('<%s token: "%s...">' %
(token_name, self.contents[:20].replace('\n', '')))
def split_contents(self):
split = []
bits = iter(smart_split(self.contents))
for bit in bits:
# Handle translation-marked template pieces
if bit.startswith(('_("', "_('")):
sentinal = bit[2] + ')'
trans_bit = [bit]
while not bit.endswith(sentinal):
bit = next(bits)
trans_bit.append(bit)
bit = ' '.join(trans_bit)
split.append(bit)
return split
class Lexer(object):
def __init__(self, template_string):
self.template_string = template_string
self.verbatim = False
def tokenize(self):
"""
Return a list of tokens from a given template_string.
"""
in_tag = False
lineno = 1
result = []
for bit in tag_re.split(self.template_string):
if bit:
result.append(self.create_token(bit, None, lineno, in_tag))
in_tag = not in_tag
lineno += bit.count('\n')
return result
def create_token(self, token_string, position, lineno, in_tag):
"""
Convert the given token string into a new Token object and return it.
If in_tag is True, we are processing something that matched a tag,
otherwise it should be treated as a literal string.
"""
if in_tag and token_string.startswith(BLOCK_TAG_START):
# The [2:-2] ranges below strip off *_TAG_START and *_TAG_END.
# We could do len(BLOCK_TAG_START) to be more "correct", but we've
# hard-coded the 2s here for performance. And it's not like
# the TAG_START values are going to change anytime, anyway.
block_content = token_string[2:-2].strip()
if self.verbatim and block_content == self.verbatim:
self.verbatim = False
if in_tag and not self.verbatim:
if token_string.startswith(VARIABLE_TAG_START):
token = Token(TOKEN_VAR, token_string[2:-2].strip(), position, lineno)
elif token_string.startswith(BLOCK_TAG_START):
if block_content[:9] in ('verbatim', 'verbatim '):
self.verbatim = 'end%s' % block_content
token = Token(TOKEN_BLOCK, block_content, position, lineno)
elif token_string.startswith(COMMENT_TAG_START):
content = ''
if token_string.find(TRANSLATOR_COMMENT_MARK):
content = token_string[2:-2].strip()
token = Token(TOKEN_COMMENT, content, position, lineno)
else:
token = Token(TOKEN_TEXT, token_string, position, lineno)
return token
class DebugLexer(Lexer):
def tokenize(self):
"""
Split a template string into tokens and annotates each token with its
start and end position in the source. This is slower than the default
lexer so we only use it when debug is True.
"""
lineno = 1
result = []
upto = 0
for match in tag_re.finditer(self.template_string):
start, end = match.span()
if start > upto:
token_string = self.template_string[upto:start]
result.append(self.create_token(token_string, (upto, start), lineno, in_tag=False))
lineno += token_string.count('\n')
upto = start
token_string = self.template_string[start:end]
result.append(self.create_token(token_string, (start, end), lineno, in_tag=True))
lineno += token_string.count('\n')
upto = end
last_bit = self.template_string[upto:]
if last_bit:
result.append(self.create_token(last_bit, (upto, upto + len(last_bit)), lineno, in_tag=False))
return result
class Parser(object):
def __init__(self, tokens, libraries=None, builtins=None):
self.tokens = tokens
self.tags = {}
self.filters = {}
self.command_stack = []
if libraries is None:
libraries = {}
if builtins is None:
builtins = []
self.libraries = libraries
for builtin in builtins:
self.add_library(builtin)
def parse(self, parse_until=None):
"""
Iterate through the parser tokens and compils each one into a node.
If parse_until is provided, parsing will stop once one of the
specified tokens has been reached. This is formatted as a list of
tokens, e.g. ['elif', 'else', 'endif']. If no matching token is
reached, raise an exception with the unclosed block tag details.
"""
if parse_until is None:
parse_until = []
nodelist = NodeList()
while self.tokens:
token = self.next_token()
# Use the raw values here for TOKEN_* for a tiny performance boost.
if token.token_type == 0: # TOKEN_TEXT
self.extend_nodelist(nodelist, TextNode(token.contents), token)
elif token.token_type == 1: # TOKEN_VAR
if not token.contents:
raise self.error(token, 'Empty variable tag')
try:
filter_expression = self.compile_filter(token.contents)
except TemplateSyntaxError as e:
raise self.error(token, e)
var_node = VariableNode(filter_expression)
self.extend_nodelist(nodelist, var_node, token)
elif token.token_type == 2: # TOKEN_BLOCK
try:
command = token.contents.split()[0]
except IndexError:
raise self.error(token, 'Empty block tag')
if command in parse_until:
# A matching token has been reached. Return control to
# the caller. Put the token back on the token list so the
# caller knows where it terminated.
self.prepend_token(token)
return nodelist
# Add the token to the command stack. This is used for error
# messages if further parsing fails due to an unclosed block
# tag.
self.command_stack.append((command, token))
# Get the tag callback function from the ones registered with
# the parser.
try:
compile_func = self.tags[command]
except KeyError:
self.invalid_block_tag(token, command, parse_until)
# Compile the callback into a node object and add it to
# the node list.
try:
compiled_result = compile_func(self, token)
except Exception as e:
raise self.error(token, e)
self.extend_nodelist(nodelist, compiled_result, token)
# Compile success. Remove the token from the command stack.
self.command_stack.pop()
if parse_until:
self.unclosed_block_tag(parse_until)
return nodelist
def skip_past(self, endtag):
while self.tokens:
token = self.next_token()
if token.token_type == TOKEN_BLOCK and token.contents == endtag:
return
self.unclosed_block_tag([endtag])
def extend_nodelist(self, nodelist, node, token):
# Check that non-text nodes don't appear before an extends tag.
if node.must_be_first and nodelist.contains_nontext:
raise self.error(
token, '%r must be the first tag in the template.' % node,
)
if isinstance(nodelist, NodeList) and not isinstance(node, TextNode):
nodelist.contains_nontext = True
# Set token here since we can't modify the node __init__ method
node.token = token
nodelist.append(node)
def error(self, token, e):
"""
Return an exception annotated with the originating token. Since the
parser can be called recursively, check if a token is already set. This
ensures the innermost token is highlighted if an exception occurs,
e.g. a compile error within the body of an if statement.
"""
if not isinstance(e, Exception):
e = TemplateSyntaxError(e)
if not hasattr(e, 'token'):
e.token = token
return e
def invalid_block_tag(self, token, command, parse_until=None):
if parse_until:
raise self.error(token, "Invalid block tag: '%s', expected %s" %
(command, get_text_list(["'%s'" % p for p in parse_until])))
raise self.error(token, "Invalid block tag: '%s'" % command)
def unclosed_block_tag(self, parse_until):
command, token = self.command_stack.pop()
msg = "Unclosed tag '%s'. Looking for one of: %s." % (command, ', '.join(parse_until))
raise self.error(token, msg)
def next_token(self):
return self.tokens.pop(0)
def prepend_token(self, token):
self.tokens.insert(0, token)
def delete_first_token(self):
del self.tokens[0]
def add_library(self, lib):
self.tags.update(lib.tags)
self.filters.update(lib.filters)
def compile_filter(self, token):
"""
Convenient wrapper for FilterExpression
"""
return FilterExpression(token, self)
def find_filter(self, filter_name):
if filter_name in self.filters:
return self.filters[filter_name]
else:
raise TemplateSyntaxError("Invalid filter: '%s'" % filter_name)
# This only matches constant *strings* (things in quotes or marked for
# translation). Numbers are treated as variables for implementation reasons
# (so that they retain their type when passed to filters).
constant_string = r"""
(?:%(i18n_open)s%(strdq)s%(i18n_close)s|
%(i18n_open)s%(strsq)s%(i18n_close)s|
%(strdq)s|
%(strsq)s)
""" % {
'strdq': r'"[^"\\]*(?:\\.[^"\\]*)*"', # double-quoted string
'strsq': r"'[^'\\]*(?:\\.[^'\\]*)*'", # single-quoted string
'i18n_open': re.escape("_("),
'i18n_close': re.escape(")"),
}
constant_string = constant_string.replace("\n", "")
filter_raw_string = r"""
^(?P<constant>%(constant)s)|
^(?P<var>[%(var_chars)s]+|%(num)s)|
(?:\s*%(filter_sep)s\s*
(?P<filter_name>\w+)
(?:%(arg_sep)s
(?:
(?P<constant_arg>%(constant)s)|
(?P<var_arg>[%(var_chars)s]+|%(num)s)
)
)?
)""" % {
'constant': constant_string,
'num': r'[-+\.]?\d[\d\.e]*',
'var_chars': "\w\.",
'filter_sep': re.escape(FILTER_SEPARATOR),
'arg_sep': re.escape(FILTER_ARGUMENT_SEPARATOR),
}
filter_re = re.compile(filter_raw_string, re.UNICODE | re.VERBOSE)
class FilterExpression(object):
"""
Parses a variable token and its optional filters (all as a single string),
and return a list of tuples of the filter name and arguments.
Sample::
>>> token = 'variable|default:"Default value"|date:"Y-m-d"'
>>> p = Parser('')
>>> fe = FilterExpression(token, p)
>>> len(fe.filters)
2
>>> fe.var
<Variable: 'variable'>
"""
def __init__(self, token, parser):
self.token = token
matches = filter_re.finditer(token)
var_obj = None
filters = []
upto = 0
for match in matches:
start = match.start()
if upto != start:
raise TemplateSyntaxError("Could not parse some characters: "
"%s|%s|%s" %
(token[:upto], token[upto:start],
token[start:]))
if var_obj is None:
var, constant = match.group("var", "constant")
if constant:
try:
var_obj = Variable(constant).resolve({})
except VariableDoesNotExist:
var_obj = None
elif var is None:
raise TemplateSyntaxError("Could not find variable at "
"start of %s." % token)
else:
var_obj = Variable(var)
else:
filter_name = match.group("filter_name")
args = []
constant_arg, var_arg = match.group("constant_arg", "var_arg")
if constant_arg:
args.append((False, Variable(constant_arg).resolve({})))
elif var_arg:
args.append((True, Variable(var_arg)))
filter_func = parser.find_filter(filter_name)
self.args_check(filter_name, filter_func, args)
filters.append((filter_func, args))
upto = match.end()
if upto != len(token):
raise TemplateSyntaxError("Could not parse the remainder: '%s' "
"from '%s'" % (token[upto:], token))
self.filters = filters
self.var = var_obj
def resolve(self, context, ignore_failures=False):
if isinstance(self.var, Variable):
try:
obj = self.var.resolve(context)
except VariableDoesNotExist:
if ignore_failures:
obj = None
else:
string_if_invalid = context.template.engine.string_if_invalid
if string_if_invalid:
if '%s' in string_if_invalid:
return string_if_invalid % self.var
else:
return string_if_invalid
else:
obj = string_if_invalid
else:
obj = self.var
for func, args in self.filters:
arg_vals = []
for lookup, arg in args:
if not lookup:
arg_vals.append(mark_safe(arg))
else:
arg_vals.append(arg.resolve(context))
if getattr(func, 'expects_localtime', False):
obj = template_localtime(obj, context.use_tz)
if getattr(func, 'needs_autoescape', False):
new_obj = func(obj, autoescape=context.autoescape, *arg_vals)
else:
new_obj = func(obj, *arg_vals)
if getattr(func, 'is_safe', False) and isinstance(obj, SafeData):
obj = mark_safe(new_obj)
elif isinstance(obj, EscapeData):
obj = mark_for_escaping(new_obj)
else:
obj = new_obj
return obj
def args_check(name, func, provided):
provided = list(provided)
# First argument, filter input, is implied.
plen = len(provided) + 1
# Check to see if a decorator is providing the real function.
func = getattr(func, '_decorated_function', func)
args, _, _, defaults = getargspec(func)
alen = len(args)
dlen = len(defaults or [])
# Not enough OR Too many
if plen < (alen - dlen) or plen > alen:
raise TemplateSyntaxError("%s requires %d arguments, %d provided" %
(name, alen - dlen, plen))
return True
args_check = staticmethod(args_check)
def __str__(self):
return self.token
def resolve_variable(path, context):
"""
Returns the resolved variable, which may contain attribute syntax, within
the given context.
Deprecated; use the Variable class instead.
"""
warnings.warn("resolve_variable() is deprecated. Use django.template."
"Variable(path).resolve(context) instead",
RemovedInDjango110Warning, stacklevel=2)
return Variable(path).resolve(context)
class Variable(object):
"""
A template variable, resolvable against a given context. The variable may
be a hard-coded string (if it begins and ends with single or double quote
marks)::
>>> c = {'article': {'section':'News'}}
>>> Variable('article.section').resolve(c)
'News'
>>> Variable('article').resolve(c)
{'section': 'News'}
>>> class AClass: pass
>>> c = AClass()
>>> c.article = AClass()
>>> c.article.section = 'News'
(The example assumes VARIABLE_ATTRIBUTE_SEPARATOR is '.')
"""
def __init__(self, var):
self.var = var
self.literal = None
self.lookups = None
self.translate = False
self.message_context = None
if not isinstance(var, six.string_types):
raise TypeError(
"Variable must be a string or number, got %s" % type(var))
try:
# First try to treat this variable as a number.
#
# Note that this could cause an OverflowError here that we're not
# catching. Since this should only happen at compile time, that's
# probably OK.
self.literal = float(var)
# So it's a float... is it an int? If the original value contained a
# dot or an "e" then it was a float, not an int.
if '.' not in var and 'e' not in var.lower():
self.literal = int(self.literal)
# "2." is invalid
if var.endswith('.'):
raise ValueError
except ValueError:
# A ValueError means that the variable isn't a number.
if var.startswith('_(') and var.endswith(')'):
# The result of the lookup should be translated at rendering
# time.
self.translate = True
var = var[2:-1]
# If it's wrapped with quotes (single or double), then
# we're also dealing with a literal.
try:
self.literal = mark_safe(unescape_string_literal(var))
except ValueError:
# Otherwise we'll set self.lookups so that resolve() knows we're
# dealing with a bonafide variable
if var.find(VARIABLE_ATTRIBUTE_SEPARATOR + '_') > -1 or var[0] == '_':
raise TemplateSyntaxError("Variables and attributes may "
"not begin with underscores: '%s'" %
var)
self.lookups = tuple(var.split(VARIABLE_ATTRIBUTE_SEPARATOR))
def resolve(self, context):
"""Resolve this variable against a given context."""
if self.lookups is not None:
# We're dealing with a variable that needs to be resolved
value = self._resolve_lookup(context)
else:
# We're dealing with a literal, so it's already been "resolved"
value = self.literal
if self.translate:
is_safe = isinstance(value, SafeData)
msgid = value.replace('%', '%%')
msgid = mark_safe(msgid) if is_safe else msgid
if self.message_context:
return pgettext_lazy(self.message_context, msgid)
else:
return ugettext_lazy(msgid)
return value
def __repr__(self):
return "<%s: %r>" % (self.__class__.__name__, self.var)
def __str__(self):
return self.var
def _resolve_lookup(self, context):
"""
Performs resolution of a real variable (i.e. not a literal) against the
given context.
As indicated by the method's name, this method is an implementation
detail and shouldn't be called by external code. Use Variable.resolve()
instead.
"""
current = context
try: # catch-all for silent variable failures
for bit in self.lookups:
try: # dictionary lookup
current = current[bit]
# ValueError/IndexError are for numpy.array lookup on
# numpy < 1.9 and 1.9+ respectively
except (TypeError, AttributeError, KeyError, ValueError, IndexError):
try: # attribute lookup
# Don't return class attributes if the class is the context:
if isinstance(current, BaseContext) and getattr(type(current), bit):
raise AttributeError
current = getattr(current, bit)
except (TypeError, AttributeError) as e:
# Reraise an AttributeError raised by a @property
if (isinstance(e, AttributeError) and
not isinstance(current, BaseContext) and bit in dir(current)):
raise
try: # list-index lookup
current = current[int(bit)]
except (IndexError, # list index out of range
ValueError, # invalid literal for int()
KeyError, # current is a dict without `int(bit)` key
TypeError): # unsubscriptable object
raise VariableDoesNotExist("Failed lookup for key "
"[%s] in %r",
(bit, current)) # missing attribute
if callable(current):
if getattr(current, 'do_not_call_in_templates', False):
pass
elif getattr(current, 'alters_data', False):
current = context.template.engine.string_if_invalid
else:
try: # method call (assuming no args required)
current = current()
except TypeError:
try:
inspect.getcallargs(current)
except TypeError: # arguments *were* required
current = context.template.engine.string_if_invalid # invalid method call
else:
raise
except Exception as e:
template_name = getattr(context, 'template_name', 'unknown')
logger.debug('{} - {}'.format(template_name, e))
if getattr(e, 'silent_variable_failure', False):
current = context.template.engine.string_if_invalid
else:
raise
return current
class Node(object):
# Set this to True for nodes that must be first in the template (although
# they can be preceded by text nodes.
must_be_first = False
child_nodelists = ('nodelist',)
token = None
def render(self, context):
"""
Return the node rendered as a string.
"""
pass
def render_annotated(self, context):
"""
Render the node. If debug is True and an exception occurs during
rendering, the exception is annotated with contextual line information
where it occurred in the template. For internal usage this method is
preferred over using the render method directly.
"""
try:
return self.render(context)
except Exception as e:
if context.template.engine.debug and not hasattr(e, 'template_debug'):
e.template_debug = context.template.get_exception_info(e, self.token)
raise
def __iter__(self):
yield self
def get_nodes_by_type(self, nodetype):
"""
Return a list of all nodes (within this node and its nodelist)
of the given type
"""
nodes = []
if isinstance(self, nodetype):
nodes.append(self)
for attr in self.child_nodelists:
nodelist = getattr(self, attr, None)
if nodelist:
nodes.extend(nodelist.get_nodes_by_type(nodetype))
return nodes
class NodeList(list):
# Set to True the first time a non-TextNode is inserted by
# extend_nodelist().
contains_nontext = False
def render(self, context):
bits = []
for node in self:
if isinstance(node, Node):
bit = node.render_annotated(context)
else:
bit = node
bits.append(force_text(bit))
return mark_safe(''.join(bits))
def get_nodes_by_type(self, nodetype):
"Return a list of all nodes of the given type"
nodes = []
for node in self:
nodes.extend(node.get_nodes_by_type(nodetype))
return nodes
class TextNode(Node):
def __init__(self, s):
self.s = s
def __repr__(self):
rep = "<%s: %r>" % (self.__class__.__name__, self.s[:25])
return force_str(rep, 'ascii', errors='replace')
def render(self, context):
return self.s
def render_value_in_context(value, context):
"""
Converts any value to a string to become part of a rendered template. This
means escaping, if required, and conversion to a unicode object. If value
is a string, it is expected to have already been translated.
"""
value = template_localtime(value, use_tz=context.use_tz)
value = localize(value, use_l10n=context.use_l10n)
value = force_text(value)
if ((context.autoescape and not isinstance(value, SafeData)) or
isinstance(value, EscapeData)):
return conditional_escape(value)
else:
return value
class VariableNode(Node):
def __init__(self, filter_expression):
self.filter_expression = filter_expression
def __repr__(self):
return "<Variable Node: %s>" % self.filter_expression
def render(self, context):
try:
output = self.filter_expression.resolve(context)
except UnicodeDecodeError:
# Unicode conversion can fail sometimes for reasons out of our
# control (e.g. exception rendering). In that case, we fail
# quietly.
return ''
return render_value_in_context(output, context)
# Regex for token keyword arguments
kwarg_re = re.compile(r"(?:(\w+)=)?(.+)")
def token_kwargs(bits, parser, support_legacy=False):
"""
A utility method for parsing token keyword arguments.
:param bits: A list containing remainder of the token (split by spaces)
that is to be checked for arguments. Valid arguments will be removed
from this list.
:param support_legacy: If set to true ``True``, the legacy format
``1 as foo`` will be accepted. Otherwise, only the standard ``foo=1``
format is allowed.
:returns: A dictionary of the arguments retrieved from the ``bits`` token
list.
There is no requirement for all remaining token ``bits`` to be keyword
arguments, so the dictionary will be returned as soon as an invalid
argument format is reached.
"""
if not bits:
return {}
match = kwarg_re.match(bits[0])
kwarg_format = match and match.group(1)
if not kwarg_format:
if not support_legacy:
return {}
if len(bits) < 3 or bits[1] != 'as':
return {}
kwargs = {}
while bits:
if kwarg_format:
match = kwarg_re.match(bits[0])
if not match or not match.group(1):
return kwargs
key, value = match.groups()
del bits[:1]
else:
if len(bits) < 3 or bits[1] != 'as':
return kwargs
key, value = bits[2], bits[0]
del bits[:3]
kwargs[key] = parser.compile_filter(value)
if bits and not kwarg_format:
if bits[0] != 'and':
return kwargs
del bits[:1]
return kwargs
| bsd-3-clause |
Jusedawg/SickRage | lib/github/GitTree.py | 74 | 3323 | # -*- coding: utf-8 -*-
# ########################## Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 AKFish <akfish@gmail.com> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# #
# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
# ##############################################################################
import github.GithubObject
import github.GitTreeElement
class GitTree(github.GithubObject.CompletableGithubObject):
"""
This class represents GitTrees as returned for example by http://developer.github.com/v3/todo
"""
@property
def sha(self):
"""
:type: string
"""
self._completeIfNotSet(self._sha)
return self._sha.value
@property
def tree(self):
"""
:type: list of :class:`github.GitTreeElement.GitTreeElement`
"""
self._completeIfNotSet(self._tree)
return self._tree.value
@property
def url(self):
"""
:type: string
"""
self._completeIfNotSet(self._url)
return self._url.value
@property
def _identity(self):
return self.sha
def _initAttributes(self):
self._sha = github.GithubObject.NotSet
self._tree = github.GithubObject.NotSet
self._url = github.GithubObject.NotSet
def _useAttributes(self, attributes):
if "sha" in attributes: # pragma no branch
self._sha = self._makeStringAttribute(attributes["sha"])
if "tree" in attributes: # pragma no branch
self._tree = self._makeListOfClassesAttribute(github.GitTreeElement.GitTreeElement, attributes["tree"])
if "url" in attributes: # pragma no branch
self._url = self._makeStringAttribute(attributes["url"])
| gpl-3.0 |
nouiz/pygithooks | hooks/util.py | 2 | 2663 | #!/usr/bin/env python
"""
Shared pygithooks utils.
"""
import shlex
import subprocess
def run_command(command, shell=False):
command_subprocess = subprocess.Popen(shlex.split(command),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=shell)
command_out, command_err = command_subprocess.communicate()
return_code = command_subprocess.returncode
return command_out, command_err, return_code
def run_piped_commands(commands, shell=False):
"""
Run multiple commands, chaining stdout to stdin a la shell pipelining.
>>> run_piped_commands(["ls -l util.py", "wc -l"])[0].strip()
'1'
>>> run_piped_commands(["ls -l util.py", "wc -l"])[1:]
('', 0)
"""
if not commands:
raise ValueError("run_piped_commands requires at least one command")
stdin = None
for command in commands:
command_subprocess = subprocess.Popen(shlex.split(command),
stdin=stdin,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=shell)
stdin = command_subprocess.stdout
# The sample code at http://docs.python.org/library/subprocess.html
# seems to indicate the following line is needed, but it breaks things
# for me...
# command_subprocess.stdout.close() # Allow prior process to receive a SIGPIPE if this process exits
command_out, command_err = command_subprocess.communicate()
return_code = command_subprocess.returncode
return command_out, command_err, return_code
def get_config(config_key, as_bool=False, default=None):
"""
Retrieves the value of pygithooks.<config_key> from git config, optionally forcing to be boolean.
"""
git_config_command = "git config --null %(bool_flag)s --get pygithooks.%(config_key)s" % dict(config_key=config_key,
bool_flag="--bool" if as_bool else "")
git_out, git_err, git_rc = run_command(git_config_command)
if git_err:
raise RuntimeError("git config command returned an error", git_config_command, git_err)
if not git_out or git_rc:
return default
null_index = git_out.find(chr(0))
if null_index < 0:
return default
config_val = git_out[:null_index]
if as_bool:
return config_val == "true"
else:
return config_val
| bsd-3-clause |
lache/RacingKingLee | monitor/engine.win64/2.74/python/lib/sre_constants.py | 106 | 7267 | #
# Secret Labs' Regular Expression Engine
#
# various symbols used by the regular expression engine.
# run this script to update the _sre include files!
#
# Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved.
#
# See the sre.py file for information on usage and redistribution.
#
"""Internal support module for sre"""
# update when constants are added or removed
MAGIC = 20031017
from _sre import MAXREPEAT
# SRE standard exception (access as sre.error)
# should this really be here?
class error(Exception):
pass
# operators
FAILURE = "failure"
SUCCESS = "success"
ANY = "any"
ANY_ALL = "any_all"
ASSERT = "assert"
ASSERT_NOT = "assert_not"
AT = "at"
BIGCHARSET = "bigcharset"
BRANCH = "branch"
CALL = "call"
CATEGORY = "category"
CHARSET = "charset"
GROUPREF = "groupref"
GROUPREF_IGNORE = "groupref_ignore"
GROUPREF_EXISTS = "groupref_exists"
IN = "in"
IN_IGNORE = "in_ignore"
INFO = "info"
JUMP = "jump"
LITERAL = "literal"
LITERAL_IGNORE = "literal_ignore"
MARK = "mark"
MAX_REPEAT = "max_repeat"
MAX_UNTIL = "max_until"
MIN_REPEAT = "min_repeat"
MIN_UNTIL = "min_until"
NEGATE = "negate"
NOT_LITERAL = "not_literal"
NOT_LITERAL_IGNORE = "not_literal_ignore"
RANGE = "range"
REPEAT = "repeat"
REPEAT_ONE = "repeat_one"
SUBPATTERN = "subpattern"
MIN_REPEAT_ONE = "min_repeat_one"
# positions
AT_BEGINNING = "at_beginning"
AT_BEGINNING_LINE = "at_beginning_line"
AT_BEGINNING_STRING = "at_beginning_string"
AT_BOUNDARY = "at_boundary"
AT_NON_BOUNDARY = "at_non_boundary"
AT_END = "at_end"
AT_END_LINE = "at_end_line"
AT_END_STRING = "at_end_string"
AT_LOC_BOUNDARY = "at_loc_boundary"
AT_LOC_NON_BOUNDARY = "at_loc_non_boundary"
AT_UNI_BOUNDARY = "at_uni_boundary"
AT_UNI_NON_BOUNDARY = "at_uni_non_boundary"
# categories
CATEGORY_DIGIT = "category_digit"
CATEGORY_NOT_DIGIT = "category_not_digit"
CATEGORY_SPACE = "category_space"
CATEGORY_NOT_SPACE = "category_not_space"
CATEGORY_WORD = "category_word"
CATEGORY_NOT_WORD = "category_not_word"
CATEGORY_LINEBREAK = "category_linebreak"
CATEGORY_NOT_LINEBREAK = "category_not_linebreak"
CATEGORY_LOC_WORD = "category_loc_word"
CATEGORY_LOC_NOT_WORD = "category_loc_not_word"
CATEGORY_UNI_DIGIT = "category_uni_digit"
CATEGORY_UNI_NOT_DIGIT = "category_uni_not_digit"
CATEGORY_UNI_SPACE = "category_uni_space"
CATEGORY_UNI_NOT_SPACE = "category_uni_not_space"
CATEGORY_UNI_WORD = "category_uni_word"
CATEGORY_UNI_NOT_WORD = "category_uni_not_word"
CATEGORY_UNI_LINEBREAK = "category_uni_linebreak"
CATEGORY_UNI_NOT_LINEBREAK = "category_uni_not_linebreak"
OPCODES = [
# failure=0 success=1 (just because it looks better that way :-)
FAILURE, SUCCESS,
ANY, ANY_ALL,
ASSERT, ASSERT_NOT,
AT,
BRANCH,
CALL,
CATEGORY,
CHARSET, BIGCHARSET,
GROUPREF, GROUPREF_EXISTS, GROUPREF_IGNORE,
IN, IN_IGNORE,
INFO,
JUMP,
LITERAL, LITERAL_IGNORE,
MARK,
MAX_UNTIL,
MIN_UNTIL,
NOT_LITERAL, NOT_LITERAL_IGNORE,
NEGATE,
RANGE,
REPEAT,
REPEAT_ONE,
SUBPATTERN,
MIN_REPEAT_ONE
]
ATCODES = [
AT_BEGINNING, AT_BEGINNING_LINE, AT_BEGINNING_STRING, AT_BOUNDARY,
AT_NON_BOUNDARY, AT_END, AT_END_LINE, AT_END_STRING,
AT_LOC_BOUNDARY, AT_LOC_NON_BOUNDARY, AT_UNI_BOUNDARY,
AT_UNI_NON_BOUNDARY
]
CHCODES = [
CATEGORY_DIGIT, CATEGORY_NOT_DIGIT, CATEGORY_SPACE,
CATEGORY_NOT_SPACE, CATEGORY_WORD, CATEGORY_NOT_WORD,
CATEGORY_LINEBREAK, CATEGORY_NOT_LINEBREAK, CATEGORY_LOC_WORD,
CATEGORY_LOC_NOT_WORD, CATEGORY_UNI_DIGIT, CATEGORY_UNI_NOT_DIGIT,
CATEGORY_UNI_SPACE, CATEGORY_UNI_NOT_SPACE, CATEGORY_UNI_WORD,
CATEGORY_UNI_NOT_WORD, CATEGORY_UNI_LINEBREAK,
CATEGORY_UNI_NOT_LINEBREAK
]
def makedict(list):
d = {}
i = 0
for item in list:
d[item] = i
i = i + 1
return d
OPCODES = makedict(OPCODES)
ATCODES = makedict(ATCODES)
CHCODES = makedict(CHCODES)
# replacement operations for "ignore case" mode
OP_IGNORE = {
GROUPREF: GROUPREF_IGNORE,
IN: IN_IGNORE,
LITERAL: LITERAL_IGNORE,
NOT_LITERAL: NOT_LITERAL_IGNORE
}
AT_MULTILINE = {
AT_BEGINNING: AT_BEGINNING_LINE,
AT_END: AT_END_LINE
}
AT_LOCALE = {
AT_BOUNDARY: AT_LOC_BOUNDARY,
AT_NON_BOUNDARY: AT_LOC_NON_BOUNDARY
}
AT_UNICODE = {
AT_BOUNDARY: AT_UNI_BOUNDARY,
AT_NON_BOUNDARY: AT_UNI_NON_BOUNDARY
}
CH_LOCALE = {
CATEGORY_DIGIT: CATEGORY_DIGIT,
CATEGORY_NOT_DIGIT: CATEGORY_NOT_DIGIT,
CATEGORY_SPACE: CATEGORY_SPACE,
CATEGORY_NOT_SPACE: CATEGORY_NOT_SPACE,
CATEGORY_WORD: CATEGORY_LOC_WORD,
CATEGORY_NOT_WORD: CATEGORY_LOC_NOT_WORD,
CATEGORY_LINEBREAK: CATEGORY_LINEBREAK,
CATEGORY_NOT_LINEBREAK: CATEGORY_NOT_LINEBREAK
}
CH_UNICODE = {
CATEGORY_DIGIT: CATEGORY_UNI_DIGIT,
CATEGORY_NOT_DIGIT: CATEGORY_UNI_NOT_DIGIT,
CATEGORY_SPACE: CATEGORY_UNI_SPACE,
CATEGORY_NOT_SPACE: CATEGORY_UNI_NOT_SPACE,
CATEGORY_WORD: CATEGORY_UNI_WORD,
CATEGORY_NOT_WORD: CATEGORY_UNI_NOT_WORD,
CATEGORY_LINEBREAK: CATEGORY_UNI_LINEBREAK,
CATEGORY_NOT_LINEBREAK: CATEGORY_UNI_NOT_LINEBREAK
}
# flags
SRE_FLAG_TEMPLATE = 1 # template mode (disable backtracking)
SRE_FLAG_IGNORECASE = 2 # case insensitive
SRE_FLAG_LOCALE = 4 # honour system locale
SRE_FLAG_MULTILINE = 8 # treat target as multiline string
SRE_FLAG_DOTALL = 16 # treat target as a single string
SRE_FLAG_UNICODE = 32 # use unicode "locale"
SRE_FLAG_VERBOSE = 64 # ignore whitespace and comments
SRE_FLAG_DEBUG = 128 # debugging
SRE_FLAG_ASCII = 256 # use ascii "locale"
# flags for INFO primitive
SRE_INFO_PREFIX = 1 # has prefix
SRE_INFO_LITERAL = 2 # entire pattern is literal (given by prefix)
SRE_INFO_CHARSET = 4 # pattern starts with character from given set
if __name__ == "__main__":
def dump(f, d, prefix):
items = sorted(d.items(), key=lambda a: a[1])
for k, v in items:
f.write("#define %s_%s %s\n" % (prefix, k.upper(), v))
f = open("sre_constants.h", "w")
f.write("""\
/*
* Secret Labs' Regular Expression Engine
*
* regular expression matching engine
*
* NOTE: This file is generated by sre_constants.py. If you need
* to change anything in here, edit sre_constants.py and run it.
*
* Copyright (c) 1997-2001 by Secret Labs AB. All rights reserved.
*
* See the _sre.c file for information on usage and redistribution.
*/
""")
f.write("#define SRE_MAGIC %d\n" % MAGIC)
dump(f, OPCODES, "SRE_OP")
dump(f, ATCODES, "SRE")
dump(f, CHCODES, "SRE")
f.write("#define SRE_FLAG_TEMPLATE %d\n" % SRE_FLAG_TEMPLATE)
f.write("#define SRE_FLAG_IGNORECASE %d\n" % SRE_FLAG_IGNORECASE)
f.write("#define SRE_FLAG_LOCALE %d\n" % SRE_FLAG_LOCALE)
f.write("#define SRE_FLAG_MULTILINE %d\n" % SRE_FLAG_MULTILINE)
f.write("#define SRE_FLAG_DOTALL %d\n" % SRE_FLAG_DOTALL)
f.write("#define SRE_FLAG_UNICODE %d\n" % SRE_FLAG_UNICODE)
f.write("#define SRE_FLAG_VERBOSE %d\n" % SRE_FLAG_VERBOSE)
f.write("#define SRE_FLAG_DEBUG %d\n" % SRE_FLAG_DEBUG)
f.write("#define SRE_FLAG_ASCII %d\n" % SRE_FLAG_ASCII)
f.write("#define SRE_INFO_PREFIX %d\n" % SRE_INFO_PREFIX)
f.write("#define SRE_INFO_LITERAL %d\n" % SRE_INFO_LITERAL)
f.write("#define SRE_INFO_CHARSET %d\n" % SRE_INFO_CHARSET)
f.close()
print("done")
| mit |
xiandiancloud/edx-platform-Y | common/djangoapps/django_comment_common/migrations/0001_initial.py | 188 | 6980 | # -*- coding: utf-8 -*-
from south.v2 import SchemaMigration
class Migration(SchemaMigration):
#
# cdodge: This is basically an empty migration since everything has - up to now - managed in the django_comment_client app
# But going forward we should be using this migration
#
def forwards(self, orm):
pass
def backwards(self, orm):
pass
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'about': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'avatar_type': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '1'}),
'bronze': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'consecutive_days_visit_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'display_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'email_isvalid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'email_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'email_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'gold': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'gravatar': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ignored_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'interesting_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'new_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'questions_per_page': ('django.db.models.fields.SmallIntegerField', [], {'default': '10'}),
'real_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'reputation': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'seen_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'show_country': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'silver': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'w'", 'max_length': '2'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'django_comment_common.permission': {
'Meta': {'object_name': 'Permission'},
'name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'primary_key': 'True'}),
'roles': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'permissions'", 'symmetrical': 'False', 'to': "orm['django_comment_common.Role']"})
},
'django_comment_common.role': {
'Meta': {'object_name': 'Role'},
'course_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'roles'", 'symmetrical': 'False', 'to': "orm['auth.User']"})
}
}
complete_apps = ['django_comment_common']
| agpl-3.0 |
fpy171/django | tests/template_tests/filter_tests/test_iriencode.py | 388 | 1603 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.template.defaultfilters import iriencode, urlencode
from django.test import SimpleTestCase
from django.utils.safestring import mark_safe
from ..utils import setup
class IriencodeTests(SimpleTestCase):
"""
Ensure iriencode keeps safe strings.
"""
@setup({'iriencode01': '{{ url|iriencode }}'})
def test_iriencode01(self):
output = self.engine.render_to_string('iriencode01', {'url': '?test=1&me=2'})
self.assertEqual(output, '?test=1&me=2')
@setup({'iriencode02': '{% autoescape off %}{{ url|iriencode }}{% endautoescape %}'})
def test_iriencode02(self):
output = self.engine.render_to_string('iriencode02', {'url': '?test=1&me=2'})
self.assertEqual(output, '?test=1&me=2')
@setup({'iriencode03': '{{ url|iriencode }}'})
def test_iriencode03(self):
output = self.engine.render_to_string('iriencode03', {'url': mark_safe('?test=1&me=2')})
self.assertEqual(output, '?test=1&me=2')
@setup({'iriencode04': '{% autoescape off %}{{ url|iriencode }}{% endautoescape %}'})
def test_iriencode04(self):
output = self.engine.render_to_string('iriencode04', {'url': mark_safe('?test=1&me=2')})
self.assertEqual(output, '?test=1&me=2')
class FunctionTests(SimpleTestCase):
def test_unicode(self):
self.assertEqual(iriencode('S\xf8r-Tr\xf8ndelag'), 'S%C3%B8r-Tr%C3%B8ndelag')
def test_urlencoded(self):
self.assertEqual(iriencode(urlencode('fran\xe7ois & jill')), 'fran%C3%A7ois%20%26%20jill')
| bsd-3-clause |
mhild/Sick-Beard | sickbeard/metadata/tivo.py | 8 | 13235 | # Author: Nic Wolfe <nic@wolfeden.ca>
# Author: Gordon Turner <gordonturner@gordonturner.ca>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
from __future__ import with_statement
import datetime
import os
import sickbeard
from sickbeard import logger, exceptions, helpers
from sickbeard.metadata import generic
from sickbeard import encodingKludge as ek
from sickbeard.exceptions import ex
from lib.tvdb_api import tvdb_api, tvdb_exceptions
class TIVOMetadata(generic.GenericMetadata):
"""
Metadata generation class for TIVO
The following file structure is used:
show_root/Season ##/filename.ext (*)
show_root/Season ##/.meta/filename.ext.txt (episode metadata)
This class only generates episode specific metadata files, it does NOT generate a default.txt file.
"""
def __init__(self,
show_metadata=False,
episode_metadata=False,
fanart=False,
poster=False,
banner=False,
episode_thumbnails=False,
season_posters=False,
season_banners=False,
season_all_poster=False,
season_all_banner=False):
generic.GenericMetadata.__init__(self,
show_metadata,
episode_metadata,
fanart,
poster,
banner,
episode_thumbnails,
season_posters,
season_banners,
season_all_poster,
season_all_banner)
self.name = 'TIVO'
self._ep_nfo_extension = "txt"
# web-ui metadata template
self.eg_show_metadata = "<i>not supported</i>"
self.eg_episode_metadata = "Season##\\.meta\\<i>filename</i>.ext.txt"
self.eg_fanart = "<i>not supported</i>"
self.eg_poster = "<i>not supported</i>"
self.eg_banner = "<i>not supported</i>"
self.eg_episode_thumbnails = "<i>not supported</i>"
self.eg_season_posters = "<i>not supported</i>"
self.eg_season_banners = "<i>not supported</i>"
self.eg_season_all_poster = "<i>not supported</i>"
self.eg_season_all_banner = "<i>not supported</i>"
# Override with empty methods for unsupported features
def retrieveShowMetadata(self, folder):
# no show metadata generated, we abort this lookup function
return (None, None)
def create_show_metadata(self, show_obj):
pass
def get_show_file_path(self, show_obj):
pass
def create_fanart(self, show_obj):
pass
def create_poster(self, show_obj):
pass
def create_banner(self, show_obj):
pass
def create_episode_thumb(self, ep_obj):
pass
def get_episode_thumb_path(self, ep_obj):
pass
def create_season_posters(self, ep_obj):
pass
def create_season_banners(self, ep_obj):
pass
def create_season_all_poster(self, show_obj):
pass
def create_season_all_banner(self, show_obj):
pass
# Override generic class
def get_episode_file_path(self, ep_obj):
"""
Returns a full show dir/.meta/episode.txt path for Tivo
episode metadata files.
Note, that pyTivo requires the metadata filename to include the original extention.
ie If the episode name is foo.avi, the metadata name is foo.avi.txt
ep_obj: a TVEpisode object to get the path for
"""
if ek.ek(os.path.isfile, ep_obj.location):
metadata_file_name = ek.ek(os.path.basename, ep_obj.location) + "." + self._ep_nfo_extension
metadata_dir_name = ek.ek(os.path.join, ek.ek(os.path.dirname, ep_obj.location), '.meta')
metadata_file_path = ek.ek(os.path.join, metadata_dir_name, metadata_file_name)
else:
logger.log(u"Episode location doesn't exist: " + str(ep_obj.location), logger.DEBUG)
return ''
return metadata_file_path
def _ep_data(self, ep_obj):
"""
Creates a key value structure for a Tivo episode metadata file and
returns the resulting data object.
ep_obj: a TVEpisode instance to create the metadata file for.
Lookup the show in http://thetvdb.com/ using the python library:
https://github.com/dbr/tvdb_api/
The results are saved in the object myShow.
The key values for the tivo metadata file are from:
http://pytivo.sourceforge.net/wiki/index.php/Metadata
"""
data = ""
eps_to_write = [ep_obj] + ep_obj.relatedEps
tvdb_lang = ep_obj.show.lang
try:
# There's gotta be a better way of doing this but we don't wanna
# change the language value elsewhere
ltvdb_api_parms = sickbeard.TVDB_API_PARMS.copy()
if tvdb_lang and not tvdb_lang == 'en':
ltvdb_api_parms['language'] = tvdb_lang
t = tvdb_api.Tvdb(actors=True, **ltvdb_api_parms)
myShow = t[ep_obj.show.tvdbid]
except tvdb_exceptions.tvdb_shownotfound, e:
raise exceptions.ShowNotFoundException(str(e))
except tvdb_exceptions.tvdb_error, e:
logger.log(u"Unable to connect to TVDB while creating meta files - skipping - " + str(e), logger.ERROR)
return False
for curEpToWrite in eps_to_write:
try:
myEp = myShow[curEpToWrite.season][curEpToWrite.episode]
except (tvdb_exceptions.tvdb_episodenotfound, tvdb_exceptions.tvdb_seasonnotfound):
logger.log(u"Unable to find episode " + str(curEpToWrite.season) + "x" + str(curEpToWrite.episode) + " on tvdb... has it been removed? Should I delete from db?")
return None
if myEp["firstaired"] is None and ep_obj.season == 0:
myEp["firstaired"] = str(datetime.date.fromordinal(1))
if myEp["episodename"] is None or myEp["firstaired"] is None:
return None
if myShow["seriesname"] is not None:
data += ("title : " + myShow["seriesname"] + "\n")
data += ("seriesTitle : " + myShow["seriesname"] + "\n")
data += ("episodeTitle : " + curEpToWrite._format_pattern('%Sx%0E %EN') + "\n")
# This should be entered for episodic shows and omitted for movies. The standard tivo format is to enter
# the season number followed by the episode number for that season. For example, enter 201 for season 2
# episode 01.
# This only shows up if you go into the Details from the Program screen.
# This seems to disappear once the video is transferred to TiVo.
# NOTE: May not be correct format, missing season, but based on description from wiki leaving as is.
data += ("episodeNumber : " + str(curEpToWrite.episode) + "\n")
# Must be entered as true or false. If true, the year from originalAirDate will be shown in parentheses
# after the episode's title and before the description on the Program screen.
# FIXME: Hardcode isEpisode to true for now, not sure how to handle movies
data += ("isEpisode : true\n")
# Write the synopsis of the video here
# Micrsoft Word's smartquotes can die in a fire.
sanitizedDescription = curEpToWrite.description
# Replace double curly quotes
sanitizedDescription = sanitizedDescription.replace(u"\u201c", "\"").replace(u"\u201d", "\"")
# Replace single curly quotes
sanitizedDescription = sanitizedDescription.replace(u"\u2018", "'").replace(u"\u2019", "'").replace(u"\u02BC", "'")
data += ("description : " + sanitizedDescription + "\n")
# Usually starts with "SH" and followed by 6-8 digits.
# Tivo uses zap2it for their data, so the series id is the zap2it_id.
if myShow["zap2it_id"] is not None:
data += ("seriesId : " + myShow["zap2it_id"] + "\n")
# This is the call sign of the channel the episode was recorded from.
if myShow["network"] is not None:
data += ("callsign : " + myShow["network"] + "\n")
# This must be entered as yyyy-mm-ddThh:mm:ssZ (the t is capitalized and never changes, the Z is also
# capitalized and never changes). This is the original air date of the episode.
# NOTE: Hard coded the time to T00:00:00Z as we really don't know when during the day the first run happened.
if curEpToWrite.airdate != datetime.date.fromordinal(1):
data += ("originalAirDate : " + str(curEpToWrite.airdate) + "T00:00:00Z\n")
# This shows up at the beginning of the description on the Program screen and on the Details screen.
if myShow["actors"]:
for actor in myShow["actors"].split('|'):
if actor is not None and actor.strip():
data += ("vActor : " + actor.strip() + "\n")
# This is shown on both the Program screen and the Details screen.
if myEp["rating"] is not None:
try:
rating = float(myEp['rating'])
except ValueError:
rating = 0.0
# convert 10 to 4 star rating. 4 * rating / 10
# only whole numbers or half numbers work. multiply by 2, round, divide by 2.0
rating = round(8 * rating / 10) / 2.0
data += ("starRating : " + str(rating) + "\n")
# This is shown on both the Program screen and the Details screen.
# It uses the standard TV rating system of: TV-Y7, TV-Y, TV-G, TV-PG, TV-14, TV-MA and TV-NR.
if myShow["contentrating"]:
data += ("tvRating : " + str(myShow["contentrating"]) + "\n")
# This field can be repeated as many times as necessary or omitted completely.
if ep_obj.show.genre:
for genre in ep_obj.show.genre.split('|'):
if genre and genre.strip():
data += ("vProgramGenre : " + str(genre.strip()) + "\n")
# NOTE: The following are metadata keywords are not used
# displayMajorNumber
# showingBits
# displayMinorNumber
# colorCode
# vSeriesGenre
# vGuestStar, vDirector, vExecProducer, vProducer, vWriter, vHost, vChoreographer
# partCount
# partIndex
return data
def write_ep_file(self, ep_obj):
"""
Generates and writes ep_obj's metadata under the given path with the
given filename root. Uses the episode's name with the extension in
_ep_nfo_extension.
ep_obj: TVEpisode object for which to create the metadata
file_name_path: The file name to use for this metadata. Note that the extension
will be automatically added based on _ep_nfo_extension. This should
include an absolute path.
"""
data = self._ep_data(ep_obj)
if not data:
return False
nfo_file_path = self.get_episode_file_path(ep_obj)
nfo_file_dir = ek.ek(os.path.dirname, nfo_file_path)
try:
if not ek.ek(os.path.isdir, nfo_file_dir):
logger.log(u"Metadata dir didn't exist, creating it at " + nfo_file_dir, logger.DEBUG)
ek.ek(os.makedirs, nfo_file_dir)
helpers.chmodAsParent(nfo_file_dir)
logger.log(u"Writing episode nfo file to " + nfo_file_path, logger.DEBUG)
with ek.ek(open, nfo_file_path, 'w') as nfo_file:
# Calling encode directly, b/c often descriptions have wonky characters.
nfo_file.write(data.encode("utf-8"))
helpers.chmodAsParent(nfo_file_path)
except EnvironmentError, e:
logger.log(u"Unable to write file to " + nfo_file_path + " - are you sure the folder is writable? " + ex(e), logger.ERROR)
return False
return True
# present a standard "interface" from the module
metadata_class = TIVOMetadata
| gpl-3.0 |
lancezlin/pyjs | examples/jsimport/__main__.py | 8 | 1050 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
TARGETS = [
'examplejs.py',
]
PACKAGE = {
'title': 'jsimport',
'desc': 'JS import example',
}
def setup(targets):
'''Setup example for translation, MUST call util.setup(targets).'''
util.setup(targets)
def translate():
'''Translate example, MUST call util.translate().'''
util.translate()
def install(package):
'''Install and cleanup example module. MUST call util.install(package)'''
util.install(package)
##---------------------------------------##
# --------- (-: DO NOT EDIT :-) --------- #
##---------------------------------------##
import sys
import os
examples = head = os.path.abspath(os.path.dirname(__file__))
while os.path.split(examples)[1].lower() != 'examples':
examples = os.path.split(examples)[0]
if not examples:
raise ValueError("Cannot determine examples directory")
sys.path.insert(0, os.path.join(examples))
from _examples import util
sys.path.pop(0)
util.init(head)
setup(TARGETS)
translate()
install(PACKAGE)
| apache-2.0 |
pwarren/AGDeviceControl | agdevicecontrol/thirdparty/site-packages/win32/twisted/manhole/explorer.py | 81 | 21497 | # -*- test-case-name: twisted.test.test_explorer -*-
# $Id: explorer.py,v 1.6 2003/02/18 21:15:30 acapnotic Exp $
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
"""Support for python object introspection and exploration.
Note that Explorers, what with their list of attributes, are much like
manhole.coil.Configurables. Someone should investigate this further. (TODO)
Also TODO: Determine how much code in here (particularly the function
signature stuff) can be replaced with functions available in the
L{inspect} module available in Python 2.1.
"""
# System Imports
import inspect, new, string, sys, types
import UserDict
# Twisted Imports
from twisted.spread import pb
from twisted.python import reflect
True=(1==1)
False=not True
class Pool(UserDict.UserDict):
def getExplorer(self, object, identifier):
oid = id(object)
if self.data.has_key(oid):
# XXX: This potentially returns something with
# 'identifier' set to a different value.
return self.data[oid]
else:
klass = typeTable.get(type(object), ExplorerGeneric)
e = new.instance(klass, {})
self.data[oid] = e
klass.__init__(e, object, identifier)
return e
explorerPool = Pool()
class Explorer(pb.Cacheable):
properties = ["id", "identifier"]
attributeGroups = []
accessors = ["get_refcount"]
id = None
identifier = None
def __init__(self, object, identifier):
self.object = object
self.identifier = identifier
self.id = id(object)
self.properties = []
reflect.accumulateClassList(self.__class__, 'properties',
self.properties)
self.attributeGroups = []
reflect.accumulateClassList(self.__class__, 'attributeGroups',
self.attributeGroups)
self.accessors = []
reflect.accumulateClassList(self.__class__, 'accessors',
self.accessors)
def getStateToCopyFor(self, perspective):
all = ["properties", "attributeGroups", "accessors"]
all.extend(self.properties)
all.extend(self.attributeGroups)
state = {}
for key in all:
state[key] = getattr(self, key)
state['view'] = pb.ViewPoint(perspective, self)
state['explorerClass'] = self.__class__.__name__
return state
def view_get_refcount(self, perspective):
return sys.getrefcount(self)
class ExplorerGeneric(Explorer):
properties = ["str", "repr", "typename"]
def __init__(self, object, identifier):
Explorer.__init__(self, object, identifier)
self.str = str(object)
self.repr = repr(object)
self.typename = type(object).__name__
class ExplorerImmutable(Explorer):
properties = ["value"]
def __init__(self, object, identifier):
Explorer.__init__(self, object, identifier)
self.value = object
class ExplorerSequence(Explorer):
properties = ["len"]
attributeGroups = ["elements"]
accessors = ["get_elements"]
def __init__(self, seq, identifier):
Explorer.__init__(self, seq, identifier)
self.seq = seq
self.len = len(seq)
# Use accessor method to fill me in.
self.elements = []
def get_elements(self):
self.len = len(self.seq)
l = []
for i in xrange(self.len):
identifier = "%s[%s]" % (self.identifier, i)
# GLOBAL: using global explorerPool
l.append(explorerPool.getExplorer(self.seq[i], identifier))
return l
def view_get_elements(self, perspective):
# XXX: set the .elements member of all my remoteCaches
return self.get_elements()
class ExplorerMapping(Explorer):
properties = ["len"]
attributeGroups = ["keys"]
accessors = ["get_keys", "get_item"]
def __init__(self, dct, identifier):
Explorer.__init__(self, dct, identifier)
self.dct = dct
self.len = len(dct)
# Use accessor method to fill me in.
self.keys = []
def get_keys(self):
keys = self.dct.keys()
self.len = len(keys)
l = []
for i in xrange(self.len):
identifier = "%s.keys()[%s]" % (self.identifier, i)
# GLOBAL: using global explorerPool
l.append(explorerPool.getExplorer(keys[i], identifier))
return l
def view_get_keys(self, perspective):
# XXX: set the .keys member of all my remoteCaches
return self.get_keys()
def view_get_item(self, perspective, key):
if type(key) is types.InstanceType:
key = key.object
item = self.dct[key]
identifier = "%s[%s]" % (self.identifier, repr(key))
# GLOBAL: using global explorerPool
item = explorerPool.getExplorer(item, identifier)
return item
class ExplorerBuiltin(Explorer):
"""
@ivar name: the name the function was defined as
@ivar doc: function's docstring, or C{None} if unavailable
@ivar self: if not C{None}, the function is a method of this object.
"""
properties = ["doc", "name", "self"]
def __init__(self, function, identifier):
Explorer.__init__(self, function, identifier)
self.doc = function.__doc__
self.name = function.__name__
self.self = function.__self__
class ExplorerInstance(Explorer):
"""
Attribute groups:
- B{methods} -- dictionary of methods
- B{data} -- dictionary of data members
Note these are only the *instance* methods and members --
if you want the class methods, you'll have to look up the class.
TODO: Detail levels (me, me & class, me & class ancestory)
@ivar klass: the class this is an instance of.
"""
properties = ["klass"]
attributeGroups = ["methods", "data"]
def __init__(self, instance, identifier):
Explorer.__init__(self, instance, identifier)
members = {}
methods = {}
for i in dir(instance):
# TODO: Make screening of private attributes configurable.
if i[0] == '_':
continue
mIdentifier = string.join([identifier, i], ".")
member = getattr(instance, i)
mType = type(member)
if mType is types.MethodType:
methods[i] = explorerPool.getExplorer(member, mIdentifier)
else:
members[i] = explorerPool.getExplorer(member, mIdentifier)
self.klass = explorerPool.getExplorer(instance.__class__,
self.identifier +
'.__class__')
self.data = members
self.methods = methods
class ExplorerClass(Explorer):
"""
@ivar name: the name the class was defined with
@ivar doc: the class's docstring
@ivar bases: a list of this class's base classes.
@ivar module: the module the class is defined in
Attribute groups:
- B{methods} -- class methods
- B{data} -- other members of the class
"""
properties = ["name", "doc", "bases", "module"]
attributeGroups = ["methods", "data"]
def __init__(self, theClass, identifier):
Explorer.__init__(self, theClass, identifier)
if not identifier:
identifier = theClass.__name__
members = {}
methods = {}
for i in dir(theClass):
if (i[0] == '_') and (i != '__init__'):
continue
mIdentifier = string.join([identifier, i], ".")
member = getattr(theClass, i)
mType = type(member)
if mType is types.MethodType:
methods[i] = explorerPool.getExplorer(member, mIdentifier)
else:
members[i] = explorerPool.getExplorer(member, mIdentifier)
self.name = theClass.__name__
self.doc = inspect.getdoc(theClass)
self.data = members
self.methods = methods
self.bases = explorerPool.getExplorer(theClass.__bases__,
identifier + ".__bases__")
self.module = getattr(theClass, '__module__', None)
class ExplorerFunction(Explorer):
properties = ["name", "doc", "file", "line","signature"]
"""
name -- the name the function was defined as
signature -- the function's calling signature (Signature instance)
doc -- the function's docstring
file -- the file the function is defined in
line -- the line in the file the function begins on
"""
def __init__(self, function, identifier):
Explorer.__init__(self, function, identifier)
code = function.func_code
argcount = code.co_argcount
takesList = (code.co_flags & 0x04) and 1
takesKeywords = (code.co_flags & 0x08) and 1
n = (argcount + takesList + takesKeywords)
signature = Signature(code.co_varnames[:n])
if function.func_defaults:
i_d = 0
for i in xrange(argcount - len(function.func_defaults),
argcount):
default = function.func_defaults[i_d]
default = explorerPool.getExplorer(
default, '%s.func_defaults[%d]' % (identifier, i_d))
signature.set_default(i, default)
i_d = i_d + 1
if takesKeywords:
signature.set_keyword(n - 1)
if takesList:
signature.set_varlist(n - 1 - takesKeywords)
# maybe also: function.func_globals,
# or at least func_globals.__name__?
# maybe the bytecode, for disassembly-view?
self.name = function.__name__
self.signature = signature
self.doc = inspect.getdoc(function)
self.file = code.co_filename
self.line = code.co_firstlineno
class ExplorerMethod(ExplorerFunction):
properties = ["self", "klass"]
"""
In addition to ExplorerFunction properties:
self -- the object I am bound to, or None if unbound
klass -- the class I am a method of
"""
def __init__(self, method, identifier):
function = method.im_func
if type(function) is types.InstanceType:
function = function.__call__.im_func
ExplorerFunction.__init__(self, function, identifier)
self.id = id(method)
self.klass = explorerPool.getExplorer(method.im_class,
identifier + '.im_class')
self.self = explorerPool.getExplorer(method.im_self,
identifier + '.im_self')
if method.im_self:
# I'm a bound method -- eat the 'self' arg.
self.signature.discardSelf()
class ExplorerModule(Explorer):
"""
@ivar name: the name the module was defined as
@ivar doc: documentation string for the module
@ivar file: the file the module is defined in
Attribute groups:
- B{classes} -- the public classes provided by the module
- B{functions} -- the public functions provided by the module
- B{data} -- the public data members provided by the module
(\"Public\" is taken to be \"anything that doesn't start with _\")
"""
properties = ["name","doc","file"]
attributeGroups = ["classes", "functions", "data"]
def __init__(self, module, identifier):
Explorer.__init__(self, module, identifier)
functions = {}
classes = {}
data = {}
for key, value in module.__dict__.items():
if key[0] == '_':
continue
mIdentifier = "%s.%s" % (identifier, key)
if type(value) is types.ClassType:
classes[key] = explorerPool.getExplorer(value,
mIdentifier)
elif type(value) is types.FunctionType:
functions[key] = explorerPool.getExplorer(value,
mIdentifier)
elif type(value) is types.ModuleType:
pass # pass on imported modules
else:
data[key] = explorerPool.getExplorer(value, mIdentifier)
self.name = module.__name__
self.doc = inspect.getdoc(module)
self.file = getattr(module, '__file__', None)
self.classes = classes
self.functions = functions
self.data = data
typeTable = {types.InstanceType: ExplorerInstance,
types.ClassType: ExplorerClass,
types.MethodType: ExplorerMethod,
types.FunctionType: ExplorerFunction,
types.ModuleType: ExplorerModule,
types.BuiltinFunctionType: ExplorerBuiltin,
types.ListType: ExplorerSequence,
types.TupleType: ExplorerSequence,
types.DictType: ExplorerMapping,
types.StringType: ExplorerImmutable,
types.NoneType: ExplorerImmutable,
types.IntType: ExplorerImmutable,
types.FloatType: ExplorerImmutable,
types.LongType: ExplorerImmutable,
types.ComplexType: ExplorerImmutable,
}
class Signature(pb.Copyable):
"""I represent the signature of a callable.
Signatures are immutable, so don't expect my contents to change once
they've been set.
"""
_FLAVOURLESS = None
_HAS_DEFAULT = 2
_VAR_LIST = 4
_KEYWORD_DICT = 8
def __init__(self, argNames):
self.name = argNames
self.default = [None] * len(argNames)
self.flavour = [None] * len(argNames)
def get_name(self, arg):
return self.name[arg]
def get_default(self, arg):
if arg is types.StringType:
arg = self.name.index(arg)
# Wouldn't it be nice if we just returned "None" when there
# wasn't a default? Well, yes, but often times "None" *is*
# the default, so return a tuple instead.
if self.flavour[arg] == self._HAS_DEFAULT:
return (True, self.default[arg])
else:
return (False, None)
def set_default(self, arg, value):
if arg is types.StringType:
arg = self.name.index(arg)
self.flavour[arg] = self._HAS_DEFAULT
self.default[arg] = value
def set_varlist(self, arg):
if arg is types.StringType:
arg = self.name.index(arg)
self.flavour[arg] = self._VAR_LIST
def set_keyword(self, arg):
if arg is types.StringType:
arg = self.name.index(arg)
self.flavour[arg] = self._KEYWORD_DICT
def is_varlist(self, arg):
if arg is types.StringType:
arg = self.name.index(arg)
return (self.flavour[arg] == self._VAR_LIST)
def is_keyword(self, arg):
if arg is types.StringType:
arg = self.name.index(arg)
return (self.flavour[arg] == self._KEYWORD_DICT)
def discardSelf(self):
"""Invoke me to discard the first argument if this is a bound method.
"""
## if self.name[0] != 'self':
## log.msg("Warning: Told to discard self, but name is %s" %
## self.name[0])
self.name = self.name[1:]
self.default.pop(0)
self.flavour.pop(0)
def getStateToCopy(self):
return {'name': tuple(self.name),
'flavour': tuple(self.flavour),
'default': tuple(self.default)}
def __len__(self):
return len(self.name)
def __str__(self):
arglist = []
for arg in xrange(len(self)):
name = self.get_name(arg)
hasDefault, default = self.get_default(arg)
if hasDefault:
a = "%s=%s" % (name, default)
elif self.is_varlist(arg):
a = "*%s" % (name,)
elif self.is_keyword(arg):
a = "**%s" % (name,)
else:
a = name
arglist.append(a)
return string.join(arglist,", ")
class CRUFT_WatchyThingie:
# TODO:
#
# * an exclude mechanism for the watcher's browser, to avoid
# sending back large and uninteresting data structures.
#
# * an exclude mechanism for the watcher's trigger, to avoid
# triggering on some frequently-called-method-that-doesn't-
# actually-change-anything.
#
# * XXX! need removeWatch()
def watchIdentifier(self, identifier, callback):
"""Watch the object returned by evaluating the identifier.
Whenever I think the object might have changed, I'll send an
ObjectLink of it to the callback.
WARNING: This calls eval() on its argument!
"""
object = eval(identifier,
self.globalNamespace,
self.localNamespace)
return self.watchObject(object, identifier, callback)
def watchObject(self, object, identifier, callback):
"""Watch the given object.
Whenever I think the object might have changed, I'll send an
ObjectLink of it to the callback.
The identifier argument is used to generate identifiers for
objects which are members of this one.
"""
if type(object) is not types.InstanceType:
raise TypeError, "Sorry, can only place a watch on Instances."
# uninstallers = []
dct = {}
reflect.addMethodNamesToDict(object.__class__, dct, '')
for k in object.__dict__.keys():
dct[k] = 1
members = dct.keys()
clazzNS = {}
clazz = new.classobj('Watching%s%X' %
(object.__class__.__name__, id(object)),
(_MonkeysSetattrMixin, object.__class__,),
clazzNS)
clazzNS['_watchEmitChanged'] = new.instancemethod(
lambda slf, i=identifier, b=self, cb=callback:
cb(b.browseObject(slf, i)),
None, clazz)
# orig_class = object.__class__
object.__class__ = clazz
for name in members:
m = getattr(object, name)
# Only hook bound methods.
if ((type(m) is types.MethodType)
and (m.im_self is not None)):
# What's the use of putting watch monkeys on methods
# in addition to __setattr__? Well, um, uh, if the
# methods modify their attributes (i.e. add a key to
# a dictionary) instead of [re]setting them, then
# we wouldn't know about it unless we did this.
# (Is that convincing?)
monkey = _WatchMonkey(object)
monkey.install(name)
# uninstallers.append(monkey.uninstall)
# XXX: This probably prevents these objects from ever having a
# zero refcount. Leak, Leak!
## self.watchUninstallers[object] = uninstallers
class _WatchMonkey:
"""I hang on a method and tell you what I see.
TODO: Aya! Now I just do browseObject all the time, but I could
tell you what got called with what when and returning what.
"""
oldMethod = None
def __init__(self, instance):
"""Make a monkey to hang on this instance object.
"""
self.instance = instance
def install(self, methodIdentifier):
"""Install myself on my instance in place of this method.
"""
oldMethod = getattr(self.instance, methodIdentifier, None)
# XXX: this conditional probably isn't effective.
if oldMethod is not self:
# avoid triggering __setattr__
self.instance.__dict__[methodIdentifier] = (
new.instancemethod(self, self.instance,
self.instance.__class__))
self.oldMethod = (methodIdentifier, oldMethod)
def uninstall(self):
"""Remove myself from this instance and restore the original method.
(I hope.)
"""
if self.oldMethod is None:
return
# XXX: This probably doesn't work if multiple monkies are hanging
# on a method and they're not removed in order.
if self.oldMethod[1] is None:
delattr(self.instance, self.oldMethod[0])
else:
setattr(self.instance, self.oldMethod[0], self.oldMethod[1])
def __call__(self, instance, *a, **kw):
"""Pretend to be the method I replaced, and ring the bell.
"""
if self.oldMethod[1]:
rval = apply(self.oldMethod[1], a, kw)
else:
rval = None
instance._watchEmitChanged()
return rval
class _MonkeysSetattrMixin:
"""A mix-in class providing __setattr__ for objects being watched.
"""
def __setattr__(self, k, v):
"""Set the attribute and ring the bell.
"""
if hasattr(self.__class__.__bases__[1], '__setattr__'):
# Hack! Using __bases__[1] is Bad, but since we created
# this class, we can be reasonably sure it'll work.
self.__class__.__bases__[1].__setattr__(self, k, v)
else:
self.__dict__[k] = v
# XXX: Hey, waitasec, did someone just hang a new method on me?
# Do I need to put a monkey on it?
self._watchEmitChanged()
| gpl-2.0 |
Wilee999/panda3d | direct/src/actor/DistributedActor.py | 11 | 1310 | """DistributedActor module: contains the DistributedActor class"""
__all__ = ['DistributedActor']
from direct.distributed import DistributedNode
import Actor
class DistributedActor(DistributedNode.DistributedNode, Actor.Actor):
def __init__(self, cr):
try:
self.DistributedActor_initialized
except:
self.DistributedActor_initialized = 1
Actor.Actor.__init__(self)
DistributedNode.DistributedNode.__init__(self, cr)
# Since actors are probably fairly heavyweight, we'd
# rather cache them than delete them if possible.
self.setCacheable(1)
def disable(self):
# remove all anims, on all parts and all lods
if (not self.isEmpty()):
Actor.Actor.unloadAnims(self, None, None, None)
DistributedNode.DistributedNode.disable(self)
def delete(self):
try:
self.DistributedActor_deleted
except:
self.DistributedActor_deleted = 1
DistributedNode.DistributedNode.delete(self)
Actor.Actor.delete(self)
def loop(self, animName, restart=1, partName=None, fromFrame=None, toFrame=None):
return Actor.Actor.loop(self, animName, restart, partName, fromFrame, toFrame)
| bsd-3-clause |
pixelrebel/st2 | st2common/st2common/logging/filters.py | 13 | 1591 | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
__all__ = [
'ExclusionFilter',
'LogLevelFilter',
]
class ExclusionFilter(object):
def __init__(self, exclusions):
self._exclusions = set(exclusions)
def filter(self, record):
if len(self._exclusions) < 1:
return True
module_decomposition = record.name.split('.')
exclude = len(module_decomposition) > 0 and module_decomposition[0] in self._exclusions
return not exclude
class LogLevelFilter(logging.Filter):
"""
Filter which excludes log messages which match the provided log levels.
"""
def __init__(self, log_levels):
self._log_levels = log_levels
def filter(self, record):
level = record.levelno
if level in self._log_levels:
return False
return True
| apache-2.0 |
theflofly/tensorflow | tensorflow/python/keras/layers/pooling_test.py | 10 | 7660 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for pooling layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python import keras
from tensorflow.python.eager import context
from tensorflow.python.framework import test_util as tf_test_util
from tensorflow.python.keras import testing_utils
from tensorflow.python.platform import test
class GlobalPoolingTest(test.TestCase):
@tf_test_util.run_in_graph_and_eager_modes
def test_globalpooling_1d(self):
testing_utils.layer_test(keras.layers.pooling.GlobalMaxPooling1D,
input_shape=(3, 4, 5))
testing_utils.layer_test(keras.layers.pooling.GlobalMaxPooling1D,
kwargs={'data_format': 'channels_first'},
input_shape=(3, 4, 5))
testing_utils.layer_test(
keras.layers.pooling.GlobalAveragePooling1D, input_shape=(3, 4, 5))
testing_utils.layer_test(keras.layers.pooling.GlobalAveragePooling1D,
kwargs={'data_format': 'channels_first'},
input_shape=(3, 4, 5))
@tf_test_util.run_in_graph_and_eager_modes
def test_globalpooling_1d_masking_support(self):
model = keras.Sequential()
model.add(keras.layers.Masking(mask_value=0., input_shape=(3, 4)))
model.add(keras.layers.GlobalAveragePooling1D())
model.compile(loss='mae', optimizer='rmsprop')
model_input = np.random.random((2, 3, 4))
model_input[0, 1:, :] = 0
output = model.predict(model_input)
self.assertAllClose(output[0], model_input[0, 0, :])
@tf_test_util.run_in_graph_and_eager_modes
def test_globalpooling_2d(self):
testing_utils.layer_test(
keras.layers.pooling.GlobalMaxPooling2D,
kwargs={'data_format': 'channels_first'},
input_shape=(3, 4, 5, 6))
testing_utils.layer_test(
keras.layers.pooling.GlobalMaxPooling2D,
kwargs={'data_format': 'channels_last'},
input_shape=(3, 5, 6, 4))
testing_utils.layer_test(
keras.layers.pooling.GlobalAveragePooling2D,
kwargs={'data_format': 'channels_first'},
input_shape=(3, 4, 5, 6))
testing_utils.layer_test(
keras.layers.pooling.GlobalAveragePooling2D,
kwargs={'data_format': 'channels_last'},
input_shape=(3, 5, 6, 4))
@tf_test_util.run_in_graph_and_eager_modes
def test_globalpooling_3d(self):
testing_utils.layer_test(
keras.layers.pooling.GlobalMaxPooling3D,
kwargs={'data_format': 'channels_first'},
input_shape=(3, 4, 3, 4, 3))
testing_utils.layer_test(
keras.layers.pooling.GlobalMaxPooling3D,
kwargs={'data_format': 'channels_last'},
input_shape=(3, 4, 3, 4, 3))
testing_utils.layer_test(
keras.layers.pooling.GlobalAveragePooling3D,
kwargs={'data_format': 'channels_first'},
input_shape=(3, 4, 3, 4, 3))
testing_utils.layer_test(
keras.layers.pooling.GlobalAveragePooling3D,
kwargs={'data_format': 'channels_last'},
input_shape=(3, 4, 3, 4, 3))
class Pooling2DTest(test.TestCase):
@tf_test_util.run_in_graph_and_eager_modes
def test_maxpooling_2d(self):
pool_size = (3, 3)
for strides in [(1, 1), (2, 2)]:
testing_utils.layer_test(
keras.layers.MaxPooling2D,
kwargs={
'strides': strides,
'padding': 'valid',
'pool_size': pool_size
},
input_shape=(3, 5, 6, 4))
@tf_test_util.run_in_graph_and_eager_modes
def test_averagepooling_2d(self):
testing_utils.layer_test(
keras.layers.AveragePooling2D,
kwargs={'strides': (2, 2),
'padding': 'same',
'pool_size': (2, 2)},
input_shape=(3, 5, 6, 4))
testing_utils.layer_test(
keras.layers.AveragePooling2D,
kwargs={'strides': (2, 2),
'padding': 'valid',
'pool_size': (3, 3)},
input_shape=(3, 5, 6, 4))
# This part of the test can only run on GPU but doesn't appear
# to be properly assigned to a GPU when running in eager mode.
if not context.executing_eagerly():
# Only runs on GPU with CUDA, channels_first is not supported on CPU.
# TODO(b/62340061): Support channels_first on CPU.
if test.is_gpu_available(cuda_only=True):
testing_utils.layer_test(
keras.layers.AveragePooling2D,
kwargs={
'strides': (1, 1),
'padding': 'valid',
'pool_size': (2, 2),
'data_format': 'channels_first'
},
input_shape=(3, 4, 5, 6))
class Pooling3DTest(test.TestCase):
@tf_test_util.run_in_graph_and_eager_modes
def test_maxpooling_3d(self):
pool_size = (3, 3, 3)
testing_utils.layer_test(
keras.layers.MaxPooling3D,
kwargs={'strides': 2,
'padding': 'valid',
'pool_size': pool_size},
input_shape=(3, 11, 12, 10, 4))
testing_utils.layer_test(
keras.layers.MaxPooling3D,
kwargs={
'strides': 3,
'padding': 'valid',
'data_format': 'channels_first',
'pool_size': pool_size
},
input_shape=(3, 4, 11, 12, 10))
@tf_test_util.run_in_graph_and_eager_modes
def test_averagepooling_3d(self):
pool_size = (3, 3, 3)
testing_utils.layer_test(
keras.layers.AveragePooling3D,
kwargs={'strides': 2,
'padding': 'valid',
'pool_size': pool_size},
input_shape=(3, 11, 12, 10, 4))
testing_utils.layer_test(
keras.layers.AveragePooling3D,
kwargs={
'strides': 3,
'padding': 'valid',
'data_format': 'channels_first',
'pool_size': pool_size
},
input_shape=(3, 4, 11, 12, 10))
class Pooling1DTest(test.TestCase):
@tf_test_util.run_in_graph_and_eager_modes
def test_maxpooling_1d(self):
for padding in ['valid', 'same']:
for stride in [1, 2]:
testing_utils.layer_test(
keras.layers.MaxPooling1D,
kwargs={'strides': stride,
'padding': padding},
input_shape=(3, 5, 4))
testing_utils.layer_test(
keras.layers.MaxPooling1D,
kwargs={'data_format': 'channels_first'},
input_shape=(3, 2, 6))
@tf_test_util.run_in_graph_and_eager_modes
def test_averagepooling_1d(self):
for padding in ['valid', 'same']:
for stride in [1, 2]:
testing_utils.layer_test(
keras.layers.AveragePooling1D,
kwargs={'strides': stride,
'padding': padding},
input_shape=(3, 5, 4))
testing_utils.layer_test(
keras.layers.AveragePooling1D,
kwargs={'data_format': 'channels_first'},
input_shape=(3, 2, 6))
if __name__ == '__main__':
test.main()
| apache-2.0 |
endlessm/chromium-browser | tools/swarming_client/tests/lru_test.py | 4 | 6409 | #!/usr/bin/env vpython3
# Copyright 2013 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
import json
import os
import tempfile
import unittest
# Mutates sys.path.
import test_env
from utils import lru
def _load_from_raw(state_text):
"""Makes a LRUDict by loading the given JSON from a file."""
handle, tmp_name = tempfile.mkstemp(prefix=u'lru_test')
os.close(handle)
try:
with open(tmp_name, 'w') as f:
f.write(state_text)
return lru.LRUDict.load(tmp_name)
finally:
try:
os.unlink(tmp_name)
except OSError:
pass
def _save_and_load(lru_dict):
"""Saves then reloads a LRUDict instance."""
handle, tmp_name = tempfile.mkstemp(prefix=u'lru_test')
os.close(handle)
try:
lru_dict.save(tmp_name)
return lru.LRUDict.load(tmp_name)
finally:
try:
os.unlink(tmp_name)
except OSError:
pass
def _prepare_lru_dict(data):
"""Returns new LRUDict with given |keys| added one by one."""
lru_dict = lru.LRUDict()
for key, val in data:
lru_dict.add(key, val)
return lru_dict
class LRUDictTest(unittest.TestCase):
def assert_same_data(self, expected, lru_dict):
"""Asserts that given |lru_dict| contains same data as |expected|.
Tests items(), values(), get(), __iter__.
"""
self.assertEqual(list(lru_dict.items()), expected)
self.assertEqual(set(lru_dict), set(k for k, v in expected))
self.assertEqual(list(lru_dict.values()), [v for k, v in expected])
for k, v in expected:
self.assertEqual(lru_dict.get(k), v)
def test_empty(self):
self.assert_same_data([], lru.LRUDict())
def test_magic_methods_empty(self):
"""Tests __nonzero__, __iter, __len__, __getitem__ and __contains__."""
# Check for empty dict.
lru_dict = lru.LRUDict()
self.assertFalse(lru_dict)
self.assertEqual(len(lru_dict), 0)
self.assertFalse(1 in lru_dict)
self.assertFalse([i for i in lru_dict])
with self.assertRaises(KeyError):
_ = lru_dict[1]
def test_magic_methods_nonempty(self):
"""Tests __nonzero__, __iter, __len__, __getitem__ and __contains__."""
# Dict with one item.
lru_dict = lru.LRUDict()
lru_dict.add(1, 'one')
self.assertTrue(lru_dict)
self.assertEqual(len(lru_dict), 1)
self.assertTrue(1 in lru_dict)
self.assertFalse(2 in lru_dict)
self.assertTrue([i for i in lru_dict])
self.assertEqual('one', lru_dict[1])
def test_add(self):
lru_dict = _prepare_lru_dict([(1, 'one'), (2, 'two'), (3, 'three')])
lru_dict.add(1, 'one!!!')
expected = [(2, 'two'), (3, 'three'), (1, 'one!!!')]
self.assert_same_data(expected, lru_dict)
lru_dict.add(0, 'zero')
expected = [(2, 'two'), (3, 'three'), (1, 'one!!!'), (0, 'zero')]
self.assert_same_data(expected, lru_dict)
def test_pop_first(self):
lru_dict = _prepare_lru_dict([(1, 'one'), (2, 'two'), (3, 'three')])
lru_dict.pop(1)
self.assert_same_data([(2, 'two'), (3, 'three')], lru_dict)
def test_pop_middle(self):
lru_dict = _prepare_lru_dict([(1, 'one'), (2, 'two'), (3, 'three')])
lru_dict.pop(2)
self.assert_same_data([(1, 'one'), (3, 'three')], lru_dict)
def test_pop_last(self):
lru_dict = _prepare_lru_dict([(1, 'one'), (2, 'two'), (3, 'three')])
lru_dict.pop(3)
self.assert_same_data([(1, 'one'), (2, 'two')], lru_dict)
def test_pop_missing(self):
lru_dict = _prepare_lru_dict([(1, 'one'), (2, 'two'), (3, 'three')])
with self.assertRaises(KeyError):
lru_dict.pop(4)
def test_touch(self):
lru_dict = _prepare_lru_dict([(1, 'one'), (2, 'two'), (3, 'three')])
lru_dict.touch(2)
self.assert_same_data([(1, 'one'), (3, 'three'), (2, 'two')], lru_dict)
with self.assertRaises(KeyError):
lru_dict.touch(4)
def test_timestamp(self):
"""Tests get_oldest, pop_oldest."""
lru_dict = lru.LRUDict()
now = 0
lru_dict.time_fn = lambda: now
lru_dict.add('ka', 'va')
now += 1
lru_dict.add('kb', 'vb')
now += 1
self.assertEqual(lru_dict.get_oldest(), ('ka', ('va', 0)))
self.assertEqual(lru_dict.pop_oldest(), ('ka', ('va', 0)))
self.assertEqual(lru_dict.get_oldest(), ('kb', ('vb', 1)))
self.assertEqual(lru_dict.pop_oldest(), ('kb', ('vb', 1)))
def test_transform(self):
lru_dict = lru.LRUDict()
lru_dict.add('ka', 'va')
lru_dict.add('kb', 'vb')
lru_dict.transform(lambda k, v: v + '*')
self.assert_same_data([('ka', 'va*'), ('kb', 'vb*')], lru_dict)
def test_load_save_empty(self):
self.assertFalse(_save_and_load(lru.LRUDict()))
def test_load_save(self):
data = [(1, None), (2, None), (3, None)]
# Normal flow.
lru_dict = _prepare_lru_dict(data)
expected = [(1, None), (2, None), (3, None)]
self.assert_same_data(expected, _save_and_load(lru_dict))
# After touches.
lru_dict = _prepare_lru_dict(data)
lru_dict.touch(2)
expected = [(1, None), (3, None), (2, None)]
self.assert_same_data(expected, _save_and_load(lru_dict))
# After pop.
lru_dict = _prepare_lru_dict(data)
lru_dict.pop(2)
expected = [(1, None), (3, None)]
self.assert_same_data(expected, _save_and_load(lru_dict))
# After add.
lru_dict = _prepare_lru_dict(data)
lru_dict.add(4, 4)
expected = [(1, None), (2, None), (3, None), (4, 4)]
self.assert_same_data(expected, _save_and_load(lru_dict))
def test_corrupted_state_file(self):
# Loads correct state just fine.
s = _load_from_raw(
json.dumps({
'version': lru.CURRENT_VERSION,
'items': [
['key1', ['value1', 1]],
['key2', ['value2', 2]],
],
}))
self.assertIsNotNone(s)
self.assertEqual(2, len(s))
# Not a json.
with self.assertRaises(ValueError):
_load_from_raw('garbage, not a state')
# Not a list.
with self.assertRaises(ValueError):
_load_from_raw('{}')
# Not a list of pairs.
with self.assertRaises(ValueError):
_load_from_raw(json.dumps([
['key', 'value', 'and whats this?'],
]))
# Duplicate keys.
with self.assertRaises(ValueError):
_load_from_raw(json.dumps([
['key', 'value'],
['key', 'another_value'],
]))
if __name__ == '__main__':
test_env.main()
| bsd-3-clause |
frappe/frappe | frappe/tests/test_permissions.py | 1 | 19796 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
"""Use blog post test to test user permissions logic"""
import frappe
import frappe.defaults
import unittest
import frappe.model.meta
from frappe.permissions import (add_user_permission, remove_user_permission,
clear_user_permissions_for_doctype, get_doc_permissions, add_permission, update_permission_property)
from frappe.core.page.permission_manager.permission_manager import update, reset
from frappe.test_runner import make_test_records_for_doctype
from frappe.core.doctype.user_permission.user_permission import clear_user_permissions
from frappe.desk.form.load import getdoc
test_dependencies = ['Blogger', 'Blog Post', "User", "Contact", "Salutation"]
class TestPermissions(unittest.TestCase):
def setUp(self):
frappe.clear_cache(doctype="Blog Post")
if not frappe.flags.permission_user_setup_done:
user = frappe.get_doc("User", "test1@example.com")
user.add_roles("Website Manager")
user.add_roles("System Manager")
user = frappe.get_doc("User", "test2@example.com")
user.add_roles("Blogger")
user = frappe.get_doc("User", "test3@example.com")
user.add_roles("Sales User")
user = frappe.get_doc("User", "testperm@example.com")
user.add_roles("Website Manager")
frappe.flags.permission_user_setup_done = True
reset('Blogger')
reset('Blog Post')
frappe.db.sql('delete from `tabUser Permission`')
frappe.set_user("test1@example.com")
def tearDown(self):
frappe.set_user("Administrator")
frappe.db.set_value("Blogger", "_Test Blogger 1", "user", None)
clear_user_permissions_for_doctype("Blog Category")
clear_user_permissions_for_doctype("Blog Post")
clear_user_permissions_for_doctype("Blogger")
@staticmethod
def set_strict_user_permissions(ignore):
ss = frappe.get_doc("System Settings")
ss.apply_strict_user_permissions = ignore
ss.flags.ignore_mandatory = 1
ss.save()
def test_basic_permission(self):
post = frappe.get_doc("Blog Post", "-test-blog-post")
self.assertTrue(post.has_permission("read"))
def test_select_permission(self):
# grant only select perm to blog post
add_permission('Blog Post', 'Sales User', 0)
update_permission_property('Blog Post', 'Sales User', 0, 'select', 1)
update_permission_property('Blog Post', 'Sales User', 0, 'read', 0)
update_permission_property('Blog Post', 'Sales User', 0, 'write', 0)
frappe.clear_cache(doctype="Blog Post")
frappe.set_user("test3@example.com")
# validate select perm
post = frappe.get_doc("Blog Post", "-test-blog-post")
self.assertTrue(post.has_permission("select"))
# validate does not have read and write perm
self.assertFalse(post.has_permission("read"))
self.assertRaises(frappe.PermissionError, post.save)
def test_user_permissions_in_doc(self):
add_user_permission("Blog Category", "-test-blog-category-1",
"test2@example.com")
frappe.set_user("test2@example.com")
post = frappe.get_doc("Blog Post", "-test-blog-post")
self.assertFalse(post.has_permission("read"))
self.assertFalse(get_doc_permissions(post).get("read"))
post1 = frappe.get_doc("Blog Post", "-test-blog-post-1")
self.assertTrue(post1.has_permission("read"))
self.assertTrue(get_doc_permissions(post1).get("read"))
def test_user_permissions_in_report(self):
add_user_permission("Blog Category", "-test-blog-category-1", "test2@example.com")
frappe.set_user("test2@example.com")
names = [d.name for d in frappe.get_list("Blog Post", fields=["name", "blog_category"])]
self.assertTrue("-test-blog-post-1" in names)
self.assertFalse("-test-blog-post" in names)
def test_default_values(self):
doc = frappe.new_doc("Blog Post")
self.assertFalse(doc.get("blog_category"))
# Fetch default based on single user permission
add_user_permission("Blog Category", "-test-blog-category-1", "test2@example.com")
frappe.set_user("test2@example.com")
doc = frappe.new_doc("Blog Post")
self.assertEqual(doc.get("blog_category"), "-test-blog-category-1")
# Don't fetch default if user permissions is more than 1
add_user_permission("Blog Category", "-test-blog-category", "test2@example.com", ignore_permissions=True)
frappe.clear_cache()
doc = frappe.new_doc("Blog Post")
self.assertFalse(doc.get("blog_category"))
# Fetch user permission set as default from multiple user permission
add_user_permission("Blog Category", "-test-blog-category-2", "test2@example.com", ignore_permissions=True, is_default=1)
frappe.clear_cache()
doc = frappe.new_doc("Blog Post")
self.assertEqual(doc.get("blog_category"), "-test-blog-category-2")
def test_user_link_match_doc(self):
blogger = frappe.get_doc("Blogger", "_Test Blogger 1")
blogger.user = "test2@example.com"
blogger.save()
frappe.set_user("test2@example.com")
post = frappe.get_doc("Blog Post", "-test-blog-post-2")
self.assertTrue(post.has_permission("read"))
post1 = frappe.get_doc("Blog Post", "-test-blog-post-1")
self.assertFalse(post1.has_permission("read"))
def test_user_link_match_report(self):
blogger = frappe.get_doc("Blogger", "_Test Blogger 1")
blogger.user = "test2@example.com"
blogger.save()
frappe.set_user("test2@example.com")
names = [d.name for d in frappe.get_list("Blog Post", fields=["name", "owner"])]
self.assertTrue("-test-blog-post-2" in names)
self.assertFalse("-test-blog-post-1" in names)
def test_set_user_permissions(self):
frappe.set_user("test1@example.com")
add_user_permission("Blog Post", "-test-blog-post", "test2@example.com")
def test_not_allowed_to_set_user_permissions(self):
frappe.set_user("test2@example.com")
# this user can't add user permissions
self.assertRaises(frappe.PermissionError, add_user_permission,
"Blog Post", "-test-blog-post", "test2@example.com")
def test_read_if_explicit_user_permissions_are_set(self):
self.test_set_user_permissions()
frappe.set_user("test2@example.com")
# user can only access permitted blog post
doc = frappe.get_doc("Blog Post", "-test-blog-post")
self.assertTrue(doc.has_permission("read"))
# and not this one
doc = frappe.get_doc("Blog Post", "-test-blog-post-1")
self.assertFalse(doc.has_permission("read"))
def test_not_allowed_to_remove_user_permissions(self):
self.test_set_user_permissions()
frappe.set_user("test2@example.com")
# user cannot remove their own user permissions
self.assertRaises(frappe.PermissionError, remove_user_permission,
"Blog Post", "-test-blog-post", "test2@example.com")
def test_user_permissions_if_applied_on_doc_being_evaluated(self):
frappe.set_user("test2@example.com")
doc = frappe.get_doc("Blog Post", "-test-blog-post-1")
self.assertTrue(doc.has_permission("read"))
frappe.set_user("test1@example.com")
add_user_permission("Blog Post", "-test-blog-post", "test2@example.com")
frappe.set_user("test2@example.com")
doc = frappe.get_doc("Blog Post", "-test-blog-post-1")
self.assertFalse(doc.has_permission("read"))
doc = frappe.get_doc("Blog Post", "-test-blog-post")
self.assertTrue(doc.has_permission("read"))
def test_set_only_once(self):
blog_post = frappe.get_meta("Blog Post")
doc = frappe.get_doc("Blog Post", "-test-blog-post-1")
doc.db_set('title', 'Old')
blog_post.get_field("title").set_only_once = 1
doc.title = "New"
self.assertRaises(frappe.CannotChangeConstantError, doc.save)
blog_post.get_field("title").set_only_once = 0
def test_set_only_once_child_table_rows(self):
doctype_meta = frappe.get_meta("DocType")
doctype_meta.get_field("fields").set_only_once = 1
doc = frappe.get_doc("DocType", "Blog Post")
# remove last one
doc.fields = doc.fields[:-1]
self.assertRaises(frappe.CannotChangeConstantError, doc.save)
frappe.clear_cache(doctype='DocType')
def test_set_only_once_child_table_row_value(self):
doctype_meta = frappe.get_meta("DocType")
doctype_meta.get_field("fields").set_only_once = 1
doc = frappe.get_doc("DocType", "Blog Post")
# change one property from the child table
doc.fields[-1].fieldtype = 'Check'
self.assertRaises(frappe.CannotChangeConstantError, doc.save)
frappe.clear_cache(doctype='DocType')
def test_set_only_once_child_table_okay(self):
doctype_meta = frappe.get_meta("DocType")
doctype_meta.get_field("fields").set_only_once = 1
doc = frappe.get_doc("DocType", "Blog Post")
doc.load_doc_before_save()
self.assertFalse(doc.validate_set_only_once())
frappe.clear_cache(doctype='DocType')
def test_user_permission_doctypes(self):
add_user_permission("Blog Category", "-test-blog-category-1",
"test2@example.com")
add_user_permission("Blogger", "_Test Blogger 1",
"test2@example.com")
frappe.set_user("test2@example.com")
frappe.clear_cache(doctype="Blog Post")
doc = frappe.get_doc("Blog Post", "-test-blog-post")
self.assertFalse(doc.has_permission("read"))
doc = frappe.get_doc("Blog Post", "-test-blog-post-2")
self.assertTrue(doc.has_permission("read"))
frappe.clear_cache(doctype="Blog Post")
def if_owner_setup(self):
update('Blog Post', 'Blogger', 0, 'if_owner', 1)
add_user_permission("Blog Category", "-test-blog-category-1",
"test2@example.com")
add_user_permission("Blogger", "_Test Blogger 1",
"test2@example.com")
frappe.clear_cache(doctype="Blog Post")
def test_insert_if_owner_with_user_permissions(self):
"""If `If Owner` is checked for a Role, check if that document
is allowed to be read, updated, submitted, etc. except be created,
even if the document is restricted based on User Permissions."""
frappe.delete_doc('Blog Post', '-test-blog-post-title')
self.if_owner_setup()
frappe.set_user("test2@example.com")
doc = frappe.get_doc({
"doctype": "Blog Post",
"blog_category": "-test-blog-category",
"blogger": "_Test Blogger 1",
"title": "_Test Blog Post Title",
"content": "_Test Blog Post Content"
})
self.assertRaises(frappe.PermissionError, doc.insert)
frappe.set_user('test1@example.com')
add_user_permission("Blog Category", "-test-blog-category",
"test2@example.com")
frappe.set_user("test2@example.com")
doc.insert()
frappe.set_user("Administrator")
remove_user_permission("Blog Category", "-test-blog-category",
"test2@example.com")
frappe.set_user("test2@example.com")
doc = frappe.get_doc(doc.doctype, doc.name)
self.assertTrue(doc.has_permission("read"))
self.assertTrue(doc.has_permission("write"))
self.assertFalse(doc.has_permission("create"))
# delete created record
frappe.set_user("Administrator")
frappe.delete_doc('Blog Post', '-test-blog-post-title')
def test_ignore_user_permissions_if_missing(self):
"""If there are no user permissions, then allow as per role"""
add_user_permission("Blog Category", "-test-blog-category",
"test2@example.com")
frappe.set_user("test2@example.com")
doc = frappe.get_doc({
"doctype": "Blog Post",
"blog_category": "-test-blog-category-2",
"blogger": "_Test Blogger 1",
"title": "_Test Blog Post Title",
"content": "_Test Blog Post Content"
})
self.assertFalse(doc.has_permission("write"))
frappe.set_user("Administrator")
remove_user_permission("Blog Category", "-test-blog-category",
"test2@example.com")
frappe.set_user("test2@example.com")
self.assertTrue(doc.has_permission('write'))
def test_strict_user_permissions(self):
"""If `Strict User Permissions` is checked in System Settings,
show records even if User Permissions are missing for a linked
doctype"""
frappe.set_user('Administrator')
frappe.db.sql('DELETE FROM `tabContact`')
frappe.db.sql('DELETE FROM `tabContact Email`')
frappe.db.sql('DELETE FROM `tabContact Phone`')
reset('Salutation')
reset('Contact')
make_test_records_for_doctype('Contact', force=True)
add_user_permission("Salutation", "Mr", "test3@example.com")
self.set_strict_user_permissions(0)
allowed_contact = frappe.get_doc('Contact', '_Test Contact For _Test Customer')
other_contact = frappe.get_doc('Contact', '_Test Contact For _Test Supplier')
frappe.set_user("test3@example.com")
self.assertTrue(allowed_contact.has_permission('read'))
self.assertTrue(other_contact.has_permission('read'))
self.assertEqual(len(frappe.get_list("Contact")), 2)
frappe.set_user("Administrator")
self.set_strict_user_permissions(1)
frappe.set_user("test3@example.com")
self.assertTrue(allowed_contact.has_permission('read'))
self.assertFalse(other_contact.has_permission('read'))
self.assertTrue(len(frappe.get_list("Contact")), 1)
frappe.set_user("Administrator")
self.set_strict_user_permissions(0)
clear_user_permissions_for_doctype("Salutation")
clear_user_permissions_for_doctype("Contact")
def test_user_permissions_not_applied_if_user_can_edit_user_permissions(self):
add_user_permission('Blogger', '_Test Blogger 1', 'test1@example.com')
# test1@example.com has rights to create user permissions
# so it should not matter if explicit user permissions are not set
self.assertTrue(frappe.get_doc('Blogger', '_Test Blogger').has_permission('read'))
def test_user_permission_is_not_applied_if_user_roles_does_not_have_permission(self):
add_user_permission('Blog Post', '-test-blog-post-1', 'test3@example.com')
frappe.set_user("test3@example.com")
doc = frappe.get_doc("Blog Post", "-test-blog-post-1")
self.assertFalse(doc.has_permission("read"))
frappe.set_user("Administrator")
user = frappe.get_doc("User", "test3@example.com")
user.add_roles("Blogger")
frappe.set_user("test3@example.com")
self.assertTrue(doc.has_permission("read"))
frappe.set_user("Administrator")
user.remove_roles("Blogger")
def test_contextual_user_permission(self):
# should be applicable for across all doctypes
add_user_permission('Blogger', '_Test Blogger', 'test2@example.com')
# should be applicable only while accessing Blog Post
add_user_permission('Blogger', '_Test Blogger 1', 'test2@example.com', applicable_for='Blog Post')
# should be applicable only while accessing User
add_user_permission('Blogger', '_Test Blogger 2', 'test2@example.com', applicable_for='User')
posts = frappe.get_all('Blog Post', fields=['name', 'blogger'])
# Get all posts for admin
self.assertEqual(len(posts), 4)
frappe.set_user('test2@example.com')
posts = frappe.get_list('Blog Post', fields=['name', 'blogger'])
# Should get only posts with allowed blogger via user permission
# only '_Test Blogger', '_Test Blogger 1' are allowed in Blog Post
self.assertEqual(len(posts), 3)
for post in posts:
self.assertIn(post.blogger, ['_Test Blogger', '_Test Blogger 1'], 'A post from {} is not expected.'.format(post.blogger))
def test_if_owner_permission_overrides_properly(self):
# check if user is not granted access if the user is not the owner of the doc
# Blogger has only read access on the blog post unless he is the owner of the blog
update('Blog Post', 'Blogger', 0, 'if_owner', 1)
update('Blog Post', 'Blogger', 0, 'read', 1)
update('Blog Post', 'Blogger', 0, 'write', 1)
update('Blog Post', 'Blogger', 0, 'delete', 1)
# currently test2 user has not created any document
# still he should be able to do get_list query which should
# not raise permission error but simply return empty list
frappe.set_user("test2@example.com")
self.assertEqual(frappe.get_list('Blog Post'), [])
frappe.set_user("Administrator")
# creates a custom docperm with just read access
# now any user can read any blog post (but other rights are limited to the blog post owner)
add_permission('Blog Post', 'Blogger')
frappe.clear_cache(doctype="Blog Post")
frappe.delete_doc('Blog Post', '-test-blog-post-title')
frappe.set_user("test1@example.com")
doc = frappe.get_doc({
"doctype": "Blog Post",
"blog_category": "-test-blog-category",
"blogger": "_Test Blogger 1",
"title": "_Test Blog Post Title",
"content": "_Test Blog Post Content"
})
doc.insert()
frappe.set_user("test2@example.com")
doc = frappe.get_doc(doc.doctype, doc.name)
self.assertTrue(doc.has_permission("read"))
self.assertFalse(doc.has_permission("write"))
self.assertFalse(doc.has_permission("delete"))
# check if owner of the doc has the access that is available only for the owner of the doc
frappe.set_user("test1@example.com")
doc = frappe.get_doc(doc.doctype, doc.name)
self.assertTrue(doc.has_permission("read"))
self.assertTrue(doc.has_permission("write"))
self.assertTrue(doc.has_permission("delete"))
# delete the created doc
frappe.delete_doc('Blog Post', '-test-blog-post-title')
def test_if_owner_permission_on_getdoc(self):
update('Blog Post', 'Blogger', 0, 'if_owner', 1)
update('Blog Post', 'Blogger', 0, 'read', 1)
update('Blog Post', 'Blogger', 0, 'write', 1)
update('Blog Post', 'Blogger', 0, 'delete', 1)
frappe.clear_cache(doctype="Blog Post")
frappe.set_user("test1@example.com")
doc = frappe.get_doc({
"doctype": "Blog Post",
"blog_category": "-test-blog-category",
"blogger": "_Test Blogger 1",
"title": "_Test Blog Post Title New",
"content": "_Test Blog Post Content"
})
doc.insert()
getdoc('Blog Post', doc.name)
doclist = [d.name for d in frappe.response.docs]
self.assertTrue(doc.name in doclist)
frappe.set_user("test2@example.com")
self.assertRaises(frappe.PermissionError, getdoc, 'Blog Post', doc.name)
def test_if_owner_permission_on_delete(self):
update('Blog Post', 'Blogger', 0, 'if_owner', 1)
update('Blog Post', 'Blogger', 0, 'read', 1)
update('Blog Post', 'Blogger', 0, 'write', 1)
update('Blog Post', 'Blogger', 0, 'delete', 1)
# Remove delete perm
update('Blog Post', 'Website Manager', 0, 'delete', 0)
frappe.clear_cache(doctype="Blog Post")
frappe.set_user("test2@example.com")
doc = frappe.get_doc({
"doctype": "Blog Post",
"blog_category": "-test-blog-category",
"blogger": "_Test Blogger 1",
"title": "_Test Blog Post Title New 1",
"content": "_Test Blog Post Content"
})
doc.insert()
getdoc('Blog Post', doc.name)
doclist = [d.name for d in frappe.response.docs]
self.assertTrue(doc.name in doclist)
frappe.set_user("testperm@example.com")
# Website Manager able to read
getdoc('Blog Post', doc.name)
doclist = [d.name for d in frappe.response.docs]
self.assertTrue(doc.name in doclist)
# Website Manager should not be able to delete
self.assertRaises(frappe.PermissionError, frappe.delete_doc, 'Blog Post', doc.name)
frappe.set_user("test2@example.com")
frappe.delete_doc('Blog Post', '-test-blog-post-title-new-1')
update('Blog Post', 'Website Manager', 0, 'delete', 1)
def test_clear_user_permissions(self):
current_user = frappe.session.user
frappe.set_user('Administrator')
clear_user_permissions_for_doctype('Blog Category', 'test2@example.com')
clear_user_permissions_for_doctype('Blog Post', 'test2@example.com')
add_user_permission('Blog Post', '-test-blog-post-1', 'test2@example.com')
add_user_permission('Blog Post', '-test-blog-post-2', 'test2@example.com')
add_user_permission("Blog Category", '-test-blog-category-1', 'test2@example.com')
deleted_user_permission_count = clear_user_permissions('test2@example.com', 'Blog Post')
self.assertEqual(deleted_user_permission_count, 2)
blog_post_user_permission_count = frappe.db.count('User Permission', filters={
'user': 'test2@example.com',
'allow': 'Blog Post'
})
self.assertEqual(blog_post_user_permission_count, 0)
blog_category_user_permission_count = frappe.db.count('User Permission', filters={
'user': 'test2@example.com',
'allow': 'Blog Category'
})
self.assertEqual(blog_category_user_permission_count, 1)
# reset the user
frappe.set_user(current_user)
| mit |
anhaidgroup/py_stringsimjoin | py_stringsimjoin/utils/generic_helper.py | 1 | 4541 |
import multiprocessing
import operator
import os
from six.moves import xrange
import pandas as pd
COMP_OP_MAP = {'>=': operator.ge,
'>': operator.gt,
'<=': operator.le,
'<': operator.lt,
'=': operator.eq,
'!=': operator.ne}
def get_output_row_from_tables(l_row, r_row,
l_key_attr_index, r_key_attr_index,
l_out_attrs_indices=None,
r_out_attrs_indices=None):
output_row = []
# add ltable id attr
output_row.append(l_row[l_key_attr_index])
# add rtable id attr
output_row.append(r_row[r_key_attr_index])
# add ltable output attributes
if l_out_attrs_indices:
for l_attr_index in l_out_attrs_indices:
output_row.append(l_row[l_attr_index])
# add rtable output attributes
if r_out_attrs_indices:
for r_attr_index in r_out_attrs_indices:
output_row.append(r_row[r_attr_index])
return output_row
def get_output_header_from_tables(l_key_attr, r_key_attr,
l_out_attrs, r_out_attrs,
l_out_prefix, r_out_prefix):
output_header = []
output_header.append(l_out_prefix + l_key_attr)
output_header.append(r_out_prefix + r_key_attr)
if l_out_attrs:
for l_attr in l_out_attrs:
output_header.append(l_out_prefix + l_attr)
if r_out_attrs:
for r_attr in r_out_attrs:
output_header.append(r_out_prefix + r_attr)
return output_header
def convert_dataframe_to_list(table, join_attr_index,
remove_null=True):
table_list = []
for row in table.itertuples(index=False):
if remove_null and pd.isnull(row[join_attr_index]):
continue
table_list.append(tuple(row))
return table_list
def convert_dataframe_to_array(dataframe, proj_attrs, join_attr,
remove_nan=True):
if remove_nan:
projected_dataframe = dataframe[proj_attrs].dropna(0,
subset=[join_attr])
else:
projected_dataframe = dataframe[proj_attrs]
return projected_dataframe.values
def build_dict_from_table(table, key_attr_index, join_attr_index,
remove_null=True):
table_dict = {}
for row in table.itertuples(index=False):
if remove_null and pd.isnull(row[join_attr_index]):
continue
table_dict[row[key_attr_index]] = tuple(row)
return table_dict
def find_output_attribute_indices(original_columns, output_attributes):
output_attribute_indices = []
if output_attributes is not None:
for attr in output_attributes:
output_attribute_indices.append(original_columns.index(attr))
return output_attribute_indices
def split_table(table, num_splits):
splits = []
split_size = 1.0/num_splits*len(table)
for i in xrange(num_splits):
splits.append(table[int(round(i*split_size)):
int(round((i+1)*split_size))])
return splits
def remove_non_ascii(s):
return ''.join(i for i in s if ord(i) < 128)
def get_num_processes_to_launch(n_jobs):
# determine number of processes to launch parallely
num_procs = n_jobs
if n_jobs < 0:
num_cpus = multiprocessing.cpu_count()
num_procs = num_cpus + 1 + n_jobs
return max(num_procs, 1)
def get_install_path():
current_dir = os.path.dirname(os.path.realpath(__file__))
return os.path.dirname(current_dir)
def remove_redundant_attrs(out_attrs, key_attr):
# this method removes key_attr from out_attrs, if present.
# further it removes redundant attributes in out_attrs,
# but preserves the order of attributes.
if out_attrs is None:
return out_attrs
uniq_attrs = []
seen_attrs = {}
for attr in out_attrs:
if attr == key_attr or seen_attrs.get(attr) is not None:
continue
uniq_attrs.append(attr)
seen_attrs[attr] = True
return uniq_attrs
def get_attrs_to_project(out_attrs, key_attr, join_attr):
# this method assumes key_attr has already been removed from
# out_attrs, if present.
proj_attrs = [key_attr, join_attr]
if out_attrs is not None:
for attr in out_attrs:
if attr != join_attr:
proj_attrs.append(attr)
return proj_attrs
| bsd-3-clause |
gabrielfalcao/lettuce | tests/integration/lib/Django-1.2.5/tests/regressiontests/cache/tests.py | 38 | 28208 | # -*- coding: utf-8 -*-
# Unit tests for cache framework
# Uses whatever cache backend is set in the test settings file.
import os
import tempfile
import time
import unittest
import warnings
from django.conf import settings
from django.core import management
from django.core.cache import get_cache
from django.core.cache.backends.base import CacheKeyWarning
from django.http import HttpResponse, HttpRequest
from django.middleware.cache import FetchFromCacheMiddleware, UpdateCacheMiddleware
from django.test import TestCase
from django.test.utils import get_warnings_state, restore_warnings_state
from django.utils import translation
from django.utils.cache import patch_vary_headers, get_cache_key, learn_cache_key
from django.utils.hashcompat import md5_constructor
from regressiontests.cache.models import Poll, expensive_calculation
# functions/classes for complex data type tests
def f():
return 42
class C:
def m(n):
return 24
class DummyCacheTests(unittest.TestCase):
# The Dummy cache backend doesn't really behave like a test backend,
# so it has different test requirements.
def setUp(self):
self.cache = get_cache('dummy://')
def test_simple(self):
"Dummy cache backend ignores cache set calls"
self.cache.set("key", "value")
self.assertEqual(self.cache.get("key"), None)
def test_add(self):
"Add doesn't do anything in dummy cache backend"
self.cache.add("addkey1", "value")
result = self.cache.add("addkey1", "newvalue")
self.assertEqual(result, True)
self.assertEqual(self.cache.get("addkey1"), None)
def test_non_existent(self):
"Non-existent keys aren't found in the dummy cache backend"
self.assertEqual(self.cache.get("does_not_exist"), None)
self.assertEqual(self.cache.get("does_not_exist", "bang!"), "bang!")
def test_get_many(self):
"get_many returns nothing for the dummy cache backend"
self.cache.set('a', 'a')
self.cache.set('b', 'b')
self.cache.set('c', 'c')
self.cache.set('d', 'd')
self.assertEqual(self.cache.get_many(['a', 'c', 'd']), {})
self.assertEqual(self.cache.get_many(['a', 'b', 'e']), {})
def test_delete(self):
"Cache deletion is transparently ignored on the dummy cache backend"
self.cache.set("key1", "spam")
self.cache.set("key2", "eggs")
self.assertEqual(self.cache.get("key1"), None)
self.cache.delete("key1")
self.assertEqual(self.cache.get("key1"), None)
self.assertEqual(self.cache.get("key2"), None)
def test_has_key(self):
"The has_key method doesn't ever return True for the dummy cache backend"
self.cache.set("hello1", "goodbye1")
self.assertEqual(self.cache.has_key("hello1"), False)
self.assertEqual(self.cache.has_key("goodbye1"), False)
def test_in(self):
"The in operator doesn't ever return True for the dummy cache backend"
self.cache.set("hello2", "goodbye2")
self.assertEqual("hello2" in self.cache, False)
self.assertEqual("goodbye2" in self.cache, False)
def test_incr(self):
"Dummy cache values can't be incremented"
self.cache.set('answer', 42)
self.assertRaises(ValueError, self.cache.incr, 'answer')
self.assertRaises(ValueError, self.cache.incr, 'does_not_exist')
def test_decr(self):
"Dummy cache values can't be decremented"
self.cache.set('answer', 42)
self.assertRaises(ValueError, self.cache.decr, 'answer')
self.assertRaises(ValueError, self.cache.decr, 'does_not_exist')
def test_data_types(self):
"All data types are ignored equally by the dummy cache"
stuff = {
'string' : 'this is a string',
'int' : 42,
'list' : [1, 2, 3, 4],
'tuple' : (1, 2, 3, 4),
'dict' : {'A': 1, 'B' : 2},
'function' : f,
'class' : C,
}
self.cache.set("stuff", stuff)
self.assertEqual(self.cache.get("stuff"), None)
def test_expiration(self):
"Expiration has no effect on the dummy cache"
self.cache.set('expire1', 'very quickly', 1)
self.cache.set('expire2', 'very quickly', 1)
self.cache.set('expire3', 'very quickly', 1)
time.sleep(2)
self.assertEqual(self.cache.get("expire1"), None)
self.cache.add("expire2", "newvalue")
self.assertEqual(self.cache.get("expire2"), None)
self.assertEqual(self.cache.has_key("expire3"), False)
def test_unicode(self):
"Unicode values are ignored by the dummy cache"
stuff = {
u'ascii': u'ascii_value',
u'unicode_ascii': u'Iñtërnâtiônàlizætiøn1',
u'Iñtërnâtiônàlizætiøn': u'Iñtërnâtiônàlizætiøn2',
u'ascii': {u'x' : 1 }
}
for (key, value) in stuff.items():
self.cache.set(key, value)
self.assertEqual(self.cache.get(key), None)
def test_set_many(self):
"set_many does nothing for the dummy cache backend"
self.cache.set_many({'a': 1, 'b': 2})
def test_delete_many(self):
"delete_many does nothing for the dummy cache backend"
self.cache.delete_many(['a', 'b'])
def test_clear(self):
"clear does nothing for the dummy cache backend"
self.cache.clear()
class BaseCacheTests(object):
# A common set of tests to apply to all cache backends
def tearDown(self):
self.cache.clear()
def test_simple(self):
# Simple cache set/get works
self.cache.set("key", "value")
self.assertEqual(self.cache.get("key"), "value")
def test_add(self):
# A key can be added to a cache
self.cache.add("addkey1", "value")
result = self.cache.add("addkey1", "newvalue")
self.assertEqual(result, False)
self.assertEqual(self.cache.get("addkey1"), "value")
def test_non_existent(self):
# Non-existent cache keys return as None/default
# get with non-existent keys
self.assertEqual(self.cache.get("does_not_exist"), None)
self.assertEqual(self.cache.get("does_not_exist", "bang!"), "bang!")
def test_get_many(self):
# Multiple cache keys can be returned using get_many
self.cache.set('a', 'a')
self.cache.set('b', 'b')
self.cache.set('c', 'c')
self.cache.set('d', 'd')
self.assertEqual(self.cache.get_many(['a', 'c', 'd']), {'a' : 'a', 'c' : 'c', 'd' : 'd'})
self.assertEqual(self.cache.get_many(['a', 'b', 'e']), {'a' : 'a', 'b' : 'b'})
def test_delete(self):
# Cache keys can be deleted
self.cache.set("key1", "spam")
self.cache.set("key2", "eggs")
self.assertEqual(self.cache.get("key1"), "spam")
self.cache.delete("key1")
self.assertEqual(self.cache.get("key1"), None)
self.assertEqual(self.cache.get("key2"), "eggs")
def test_has_key(self):
# The cache can be inspected for cache keys
self.cache.set("hello1", "goodbye1")
self.assertEqual(self.cache.has_key("hello1"), True)
self.assertEqual(self.cache.has_key("goodbye1"), False)
def test_in(self):
# The in operator can be used to inspet cache contents
self.cache.set("hello2", "goodbye2")
self.assertEqual("hello2" in self.cache, True)
self.assertEqual("goodbye2" in self.cache, False)
def test_incr(self):
# Cache values can be incremented
self.cache.set('answer', 41)
self.assertEqual(self.cache.incr('answer'), 42)
self.assertEqual(self.cache.get('answer'), 42)
self.assertEqual(self.cache.incr('answer', 10), 52)
self.assertEqual(self.cache.get('answer'), 52)
self.assertRaises(ValueError, self.cache.incr, 'does_not_exist')
def test_decr(self):
# Cache values can be decremented
self.cache.set('answer', 43)
self.assertEqual(self.cache.decr('answer'), 42)
self.assertEqual(self.cache.get('answer'), 42)
self.assertEqual(self.cache.decr('answer', 10), 32)
self.assertEqual(self.cache.get('answer'), 32)
self.assertRaises(ValueError, self.cache.decr, 'does_not_exist')
def test_data_types(self):
# Many different data types can be cached
stuff = {
'string' : 'this is a string',
'int' : 42,
'list' : [1, 2, 3, 4],
'tuple' : (1, 2, 3, 4),
'dict' : {'A': 1, 'B' : 2},
'function' : f,
'class' : C,
}
self.cache.set("stuff", stuff)
self.assertEqual(self.cache.get("stuff"), stuff)
def test_cache_read_for_model_instance(self):
# Don't want fields with callable as default to be called on cache read
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
my_poll = Poll.objects.create(question="Well?")
self.assertEqual(Poll.objects.count(), 1)
pub_date = my_poll.pub_date
self.cache.set('question', my_poll)
cached_poll = self.cache.get('question')
self.assertEqual(cached_poll.pub_date, pub_date)
# We only want the default expensive calculation run once
self.assertEqual(expensive_calculation.num_runs, 1)
def test_cache_write_for_model_instance_with_deferred(self):
# Don't want fields with callable as default to be called on cache write
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
my_poll = Poll.objects.create(question="What?")
self.assertEqual(expensive_calculation.num_runs, 1)
defer_qs = Poll.objects.all().defer('question')
self.assertEqual(defer_qs.count(), 1)
self.assertEqual(expensive_calculation.num_runs, 1)
self.cache.set('deferred_queryset', defer_qs)
# cache set should not re-evaluate default functions
self.assertEqual(expensive_calculation.num_runs, 1)
def test_cache_read_for_model_instance_with_deferred(self):
# Don't want fields with callable as default to be called on cache read
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
my_poll = Poll.objects.create(question="What?")
self.assertEqual(expensive_calculation.num_runs, 1)
defer_qs = Poll.objects.all().defer('question')
self.assertEqual(defer_qs.count(), 1)
self.cache.set('deferred_queryset', defer_qs)
self.assertEqual(expensive_calculation.num_runs, 1)
runs_before_cache_read = expensive_calculation.num_runs
cached_polls = self.cache.get('deferred_queryset')
# We only want the default expensive calculation run on creation and set
self.assertEqual(expensive_calculation.num_runs, runs_before_cache_read)
def test_expiration(self):
# Cache values can be set to expire
self.cache.set('expire1', 'very quickly', 1)
self.cache.set('expire2', 'very quickly', 1)
self.cache.set('expire3', 'very quickly', 1)
time.sleep(2)
self.assertEqual(self.cache.get("expire1"), None)
self.cache.add("expire2", "newvalue")
self.assertEqual(self.cache.get("expire2"), "newvalue")
self.assertEqual(self.cache.has_key("expire3"), False)
def test_unicode(self):
# Unicode values can be cached
stuff = {
u'ascii': u'ascii_value',
u'unicode_ascii': u'Iñtërnâtiônàlizætiøn1',
u'Iñtërnâtiônàlizætiøn': u'Iñtërnâtiônàlizætiøn2',
u'ascii': {u'x' : 1 }
}
for (key, value) in stuff.items():
self.cache.set(key, value)
self.assertEqual(self.cache.get(key), value)
def test_binary_string(self):
# Binary strings should be cachable
from zlib import compress, decompress
value = 'value_to_be_compressed'
compressed_value = compress(value)
self.cache.set('binary1', compressed_value)
compressed_result = self.cache.get('binary1')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result))
def test_set_many(self):
# Multiple keys can be set using set_many
self.cache.set_many({"key1": "spam", "key2": "eggs"})
self.assertEqual(self.cache.get("key1"), "spam")
self.assertEqual(self.cache.get("key2"), "eggs")
def test_set_many_expiration(self):
# set_many takes a second ``timeout`` parameter
self.cache.set_many({"key1": "spam", "key2": "eggs"}, 1)
time.sleep(2)
self.assertEqual(self.cache.get("key1"), None)
self.assertEqual(self.cache.get("key2"), None)
def test_delete_many(self):
# Multiple keys can be deleted using delete_many
self.cache.set("key1", "spam")
self.cache.set("key2", "eggs")
self.cache.set("key3", "ham")
self.cache.delete_many(["key1", "key2"])
self.assertEqual(self.cache.get("key1"), None)
self.assertEqual(self.cache.get("key2"), None)
self.assertEqual(self.cache.get("key3"), "ham")
def test_clear(self):
# The cache can be emptied using clear
self.cache.set("key1", "spam")
self.cache.set("key2", "eggs")
self.cache.clear()
self.assertEqual(self.cache.get("key1"), None)
self.assertEqual(self.cache.get("key2"), None)
def test_long_timeout(self):
'''
Using a timeout greater than 30 days makes memcached think
it is an absolute expiration timestamp instead of a relative
offset. Test that we honour this convention. Refs #12399.
'''
self.cache.set('key1', 'eggs', 60*60*24*30 + 1) #30 days + 1 second
self.assertEqual(self.cache.get('key1'), 'eggs')
self.cache.add('key2', 'ham', 60*60*24*30 + 1)
self.assertEqual(self.cache.get('key2'), 'ham')
self.cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, 60*60*24*30 + 1)
self.assertEqual(self.cache.get('key3'), 'sausage')
self.assertEqual(self.cache.get('key4'), 'lobster bisque')
def perform_cull_test(self, initial_count, final_count):
"""This is implemented as a utility method, because only some of the backends
implement culling. The culling algorithm also varies slightly, so the final
number of entries will vary between backends"""
# Create initial cache key entries. This will overflow the cache, causing a cull
for i in range(1, initial_count):
self.cache.set('cull%d' % i, 'value', 1000)
count = 0
# Count how many keys are left in the cache.
for i in range(1, initial_count):
if self.cache.has_key('cull%d' % i):
count = count + 1
self.assertEqual(count, final_count)
def test_invalid_keys(self):
"""
All the builtin backends (except memcached, see below) should warn on
keys that would be refused by memcached. This encourages portable
caching code without making it too difficult to use production backends
with more liberal key rules. Refs #6447.
"""
# On Python 2.6+ we could use the catch_warnings context
# manager to test this warning nicely. Since we can't do that
# yet, the cleanest option is to temporarily ask for
# CacheKeyWarning to be raised as an exception.
_warnings_state = get_warnings_state()
warnings.simplefilter("error", CacheKeyWarning)
try:
# memcached does not allow whitespace or control characters in keys
self.assertRaises(CacheKeyWarning, self.cache.set, 'key with spaces', 'value')
# memcached limits key length to 250
self.assertRaises(CacheKeyWarning, self.cache.set, 'a' * 251, 'value')
finally:
restore_warnings_state(_warnings_state)
class DBCacheTests(unittest.TestCase, BaseCacheTests):
def setUp(self):
# Spaces are used in the table name to ensure quoting/escaping is working
self._table_name = 'test cache table'
management.call_command('createcachetable', self._table_name, verbosity=0, interactive=False)
self.cache = get_cache('db://%s?max_entries=30' % self._table_name)
def tearDown(self):
from django.db import connection
cursor = connection.cursor()
cursor.execute('DROP TABLE %s' % connection.ops.quote_name(self._table_name))
def test_cull(self):
self.perform_cull_test(50, 29)
class LocMemCacheTests(unittest.TestCase, BaseCacheTests):
def setUp(self):
self.cache = get_cache('locmem://?max_entries=30')
def test_cull(self):
self.perform_cull_test(50, 29)
# memcached backend isn't guaranteed to be available.
# To check the memcached backend, the test settings file will
# need to contain a CACHE_BACKEND setting that points at
# your memcache server.
if settings.CACHE_BACKEND.startswith('memcached://'):
class MemcachedCacheTests(unittest.TestCase, BaseCacheTests):
def setUp(self):
self.cache = get_cache(settings.CACHE_BACKEND)
def test_invalid_keys(self):
"""
On memcached, we don't introduce a duplicate key validation
step (for speed reasons), we just let the memcached API
library raise its own exception on bad keys. Refs #6447.
In order to be memcached-API-library agnostic, we only assert
that a generic exception of some kind is raised.
"""
# memcached does not allow whitespace or control characters in keys
self.assertRaises(Exception, self.cache.set, 'key with spaces', 'value')
# memcached limits key length to 250
self.assertRaises(Exception, self.cache.set, 'a' * 251, 'value')
class FileBasedCacheTests(unittest.TestCase, BaseCacheTests):
"""
Specific test cases for the file-based cache.
"""
def setUp(self):
self.dirname = tempfile.mkdtemp()
self.cache = get_cache('file://%s?max_entries=30' % self.dirname)
def test_hashing(self):
"""Test that keys are hashed into subdirectories correctly"""
self.cache.set("foo", "bar")
keyhash = md5_constructor("foo").hexdigest()
keypath = os.path.join(self.dirname, keyhash[:2], keyhash[2:4], keyhash[4:])
self.assert_(os.path.exists(keypath))
def test_subdirectory_removal(self):
"""
Make sure that the created subdirectories are correctly removed when empty.
"""
self.cache.set("foo", "bar")
keyhash = md5_constructor("foo").hexdigest()
keypath = os.path.join(self.dirname, keyhash[:2], keyhash[2:4], keyhash[4:])
self.assert_(os.path.exists(keypath))
self.cache.delete("foo")
self.assert_(not os.path.exists(keypath))
self.assert_(not os.path.exists(os.path.dirname(keypath)))
self.assert_(not os.path.exists(os.path.dirname(os.path.dirname(keypath))))
def test_cull(self):
self.perform_cull_test(50, 28)
class CustomCacheKeyValidationTests(unittest.TestCase):
"""
Tests for the ability to mixin a custom ``validate_key`` method to
a custom cache backend that otherwise inherits from a builtin
backend, and override the default key validation. Refs #6447.
"""
def test_custom_key_validation(self):
cache = get_cache('regressiontests.cache.liberal_backend://')
# this key is both longer than 250 characters, and has spaces
key = 'some key with spaces' * 15
val = 'a value'
cache.set(key, val)
self.assertEqual(cache.get(key), val)
class CacheUtils(unittest.TestCase):
"""TestCase for django.utils.cache functions."""
def setUp(self):
self.path = '/cache/test/'
self.old_settings_key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX
self.old_middleware_seconds = settings.CACHE_MIDDLEWARE_SECONDS
self.orig_use_i18n = settings.USE_I18N
settings.CACHE_MIDDLEWARE_KEY_PREFIX = 'settingsprefix'
settings.CACHE_MIDDLEWARE_SECONDS = 1
settings.USE_I18N = False
def tearDown(self):
settings.CACHE_MIDDLEWARE_KEY_PREFIX = self.old_settings_key_prefix
settings.CACHE_MIDDLEWARE_SECONDS = self.old_middleware_seconds
settings.USE_I18N = self.orig_use_i18n
def _get_request(self, path):
request = HttpRequest()
request.META = {
'SERVER_NAME': 'testserver',
'SERVER_PORT': 80,
}
request.path = request.path_info = "/cache/%s" % path
return request
def test_patch_vary_headers(self):
headers = (
# Initial vary, new headers, resulting vary.
(None, ('Accept-Encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('accept-encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('ACCEPT-ENCODING',), 'Accept-Encoding'),
('Cookie', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
(None, ('Accept-Encoding', 'COOKIE'), 'Accept-Encoding, COOKIE'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
('Cookie , Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
)
for initial_vary, newheaders, resulting_vary in headers:
response = HttpResponse()
if initial_vary is not None:
response['Vary'] = initial_vary
patch_vary_headers(response, newheaders)
self.assertEqual(response['Vary'], resulting_vary)
def test_get_cache_key(self):
request = self._get_request(self.path)
response = HttpResponse()
key_prefix = 'localprefix'
# Expect None if no headers have been set yet.
self.assertEqual(get_cache_key(request), None)
# Set headers to an empty list.
learn_cache_key(request, response)
self.assertEqual(get_cache_key(request), 'views.decorators.cache.cache_page.settingsprefix.a8c87a3d8c44853d7f79474f7ffe4ad5.d41d8cd98f00b204e9800998ecf8427e')
# Verify that a specified key_prefix is taken in to account.
learn_cache_key(request, response, key_prefix=key_prefix)
self.assertEqual(get_cache_key(request, key_prefix=key_prefix), 'views.decorators.cache.cache_page.localprefix.a8c87a3d8c44853d7f79474f7ffe4ad5.d41d8cd98f00b204e9800998ecf8427e')
def test_learn_cache_key(self):
request = self._get_request(self.path)
response = HttpResponse()
response['Vary'] = 'Pony'
# Make sure that the Vary header is added to the key hash
learn_cache_key(request, response)
self.assertEqual(get_cache_key(request), 'views.decorators.cache.cache_page.settingsprefix.a8c87a3d8c44853d7f79474f7ffe4ad5.d41d8cd98f00b204e9800998ecf8427e')
class CacheI18nTest(unittest.TestCase):
def setUp(self):
self.orig_cache_middleware_seconds = settings.CACHE_MIDDLEWARE_SECONDS
self.orig_cache_middleware_key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX
self.orig_cache_backend = settings.CACHE_BACKEND
self.orig_use_i18n = settings.USE_I18N
self.orig_languages = settings.LANGUAGES
settings.LANGUAGES = (
('en', 'English'),
('es', 'Spanish'),
)
settings.CACHE_MIDDLEWARE_KEY_PREFIX = 'settingsprefix'
self.path = '/cache/test/'
def tearDown(self):
settings.CACHE_MIDDLEWARE_SECONDS = self.orig_cache_middleware_seconds
settings.CACHE_MIDDLEWARE_KEY_PREFIX = self.orig_cache_middleware_key_prefix
settings.CACHE_BACKEND = self.orig_cache_backend
settings.USE_I18N = self.orig_use_i18n
settings.LANGUAGES = self.orig_languages
translation.deactivate()
def _get_request(self):
request = HttpRequest()
request.META = {
'SERVER_NAME': 'testserver',
'SERVER_PORT': 80,
}
request.path = request.path_info = self.path
return request
def _get_request_cache(self):
request = HttpRequest()
request.META = {
'SERVER_NAME': 'testserver',
'SERVER_PORT': 80,
}
request.path = request.path_info = self.path
request._cache_update_cache = True
request.method = 'GET'
request.session = {}
return request
def test_cache_key_i18n(self):
settings.USE_I18N = True
request = self._get_request()
lang = translation.get_language()
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertTrue(key.endswith(lang), "Cache keys should include the language name when i18n is active")
key2 = get_cache_key(request)
self.assertEqual(key, key2)
def test_cache_key_no_i18n (self):
settings.USE_I18N = False
request = self._get_request()
lang = translation.get_language()
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertFalse(key.endswith(lang), "Cache keys shouldn't include the language name when i18n is inactive")
def test_middleware(self):
def set_cache(request, lang, msg):
translation.activate(lang)
response = HttpResponse()
response.content= msg
return UpdateCacheMiddleware().process_response(request, response)
settings.CACHE_MIDDLEWARE_SECONDS = 60
settings.CACHE_MIDDLEWARE_KEY_PREFIX="test"
settings.CACHE_BACKEND='locmem:///'
settings.USE_I18N = True
en_message ="Hello world!"
es_message ="Hola mundo!"
request = self._get_request_cache()
set_cache(request, 'en', en_message)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
# Check that we can recover the cache
self.assertNotEqual(get_cache_data.content, None)
self.assertEqual(en_message, get_cache_data.content)
# change the session language and set content
request = self._get_request_cache()
set_cache(request, 'es', es_message)
# change again the language
translation.activate('en')
# retrieve the content from cache
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertEqual(get_cache_data.content, en_message)
# change again the language
translation.activate('es')
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertEqual(get_cache_data.content, es_message)
class CacheMiddlewareAnonymousOnlyTests(TestCase):
urls = 'regressiontests.cache.urls'
def setUp(self):
self._orig_cache_middleware_anonymous_only = \
getattr(settings, 'CACHE_MIDDLEWARE_ANONYMOUS_ONLY', False)
self._orig_middleware_classes = settings.MIDDLEWARE_CLASSES
settings.MIDDLEWARE_CLASSES = list(settings.MIDDLEWARE_CLASSES)
settings.MIDDLEWARE_CLASSES.insert(0, 'django.middleware.cache.UpdateCacheMiddleware')
settings.MIDDLEWARE_CLASSES += ['django.middleware.cache.FetchFromCacheMiddleware']
def tearDown(self):
settings.CACHE_MIDDLEWARE_ANONYMOUS_ONLY = self._orig_cache_middleware_anonymous_only
settings.MIDDLEWARE_CLASSES = self._orig_middleware_classes
def test_cache_middleware_anonymous_only_does_not_cause_vary_cookie(self):
settings.CACHE_MIDDLEWARE_ANONYMOUS_ONLY = True
response = self.client.get('/')
self.failIf('Cookie' in response.get('Vary', ''))
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
vatsan/pymadlib | pymadlib/utils.py | 2 | 21752 | '''
4 Jan 2013, vatsan.cs@utexas.edu>
Utility functions for PyMADlib. Currently this supports dummy coding of categorical columns (Pivoting).
'''
import pickle
default_schema = 'public'
default_prefix = 'gp_pymdlib_'
default_prefix_arr = 'gp_pymdlib_arr_'
MAX_DISTINCT_VALS = 128
#Easy way of encoding strings without worrying about escaping quotes etc
GP_STRING_IDENTIFIER = '$GP_STR_IDENTIFIER${string_to_encode}$GP_STR_IDENTIFIER$'
PARALLEL_INSERT_FUNC = '''
DROP FUNCTION IF EXISTS gp_pivotify({table_name}, bytea, bytea, bytea, text);
CREATE FUNCTION gp_pivotify(rec_row {table_name}, cols_bin bytea, col_types_dict_bin bytea, col_distinct_vals_dict_bin bytea, label_col text)
RETURNS {output_table} AS
$$
import pickle
cols = pickle.loads(cols_bin)
col_types_dict = pickle.loads(col_types_dict_bin)
col_distinct_vals_dict = pickle.loads(col_distinct_vals_dict_bin)
insert_rec = []
#Insert ID column if it exists
if(col_types_dict.has_key('id')):
insert_rec.append(str(rec_row.get('id')))
#Insert values for the independent variables
for c in cols:
if(col_distinct_vals_dict.has_key(c)):
#A list of zeros will be used to initialized the binarized categorical column
#binarized_vals[i] will be 1, if 'i' is the value for the categorical column in the current row
num_dummy_variables = len(col_distinct_vals_dict[c].keys())
#In Dummy Coding, if a variable has K possible values, we add K-1 dummy variables (binary)
#to represent the original variable
if(num_dummy_variables > 1):
num_dummy_variables = num_dummy_variables - 1
binarized_vals = [0 for k in range(num_dummy_variables)]
#Set the index of the categorical variable's value to 1, rest will be zeros
cat_val = rec_row.get(c)
if(col_distinct_vals_dict[c].has_key(cat_val) and col_distinct_vals_dict[c][cat_val] < num_dummy_variables):
binarized_vals[col_distinct_vals_dict[c][cat_val]]=1
insert_rec.extend(binarized_vals)
else:
insert_rec.append(rec_row.get(c))
#Insert label value if it was passed in the input
if(label_col):
insert_rec.append(rec_row.get(label_col))
#Return the row
return insert_rec
$$ LANGUAGE PLPYTHONU;
'''
PARALLEL_INSERT_QUERY = '''
insert into {output_table_name}
(
select (binarized_table_type).* from
(
select gp_pivotify({table_name}.*,
{cols}::bytea,
{col_types_dict}::bytea,
{col_distinct_vals_dict}::bytea,
'{label_col}'
) as binarized_table_type
from {table_name}
) q1
);
'''
def isNumeric(num):
'''
Returns True if the string representation of num is numeric
Inputs:
=======
num : A string representation of a number.
Outputs:
========
True if num is numeric, False otherwise
'''
try:
float(num)
except ValueError, typError:
print 'valueError :',ValueError
print 'typeError :',typError
return False
else:
return True
def __binarizeInParallel__(conn, table_name, output_table, cols, col_types_dict, col_distinct_vals_dict, label):
'''
Transform the categorical columns into a collection of binary values columns and insert rows
into this column in parallel using PL/Python function
Inputs:
=======
conn : A DBConnect object
table_name : (string) Name of input table
output_table : (string) Name of output table
cols: (list) list of independent feature column names
col_types_dict : (dict) a dict of column names and types
col_distinct_vals_dict : (dict) a dict of column name, and the set of all distinct values in the column
label : (string) label column name. If empty, it will be ignored.
Outputs:
=======
A new table is created with the rows of the original table transformed
'''
pinsert_func = PARALLEL_INSERT_FUNC.format(table_name=table_name, output_table=output_table)
conn.executeQuery(pinsert_func)
pinsert_stmt = PARALLEL_INSERT_QUERY.format(output_table_name=output_table,
table_name=table_name,
cols = GP_STRING_IDENTIFIER.format(string_to_encode=pickle.dumps(cols)),
col_types_dict = GP_STRING_IDENTIFIER.format(string_to_encode=pickle.dumps(col_types_dict)),
col_distinct_vals_dict = GP_STRING_IDENTIFIER.format(string_to_encode=pickle.dumps(col_distinct_vals_dict)),
label_col=label
)
conn.executeQuery(pinsert_stmt)
def __getColTypesDict__(conn,tbl_schema,tbl_nm):
'''
Return a dict containing column names and their type, by querying the information schema
Inputs:
=======
conn : A DBConnect object
tbl_schema : (string) The schema of the table
tbl_nm : (string) The name of the table whose columns we need to query
Outputs:
========
col_types_dict : A dict of col_name and types
'''
col_types_stmt = '''
select column_name, data_type
from information_schema.columns
where table_schema = '{table_schema}' and table_name = '{table_name}';
'''.format(table_schema=tbl_schema, table_name = tbl_nm)
cursor = conn.getCursor()
cursor.execute(col_types_stmt)
col_types_dict = {}
for row in cursor:
col_types_dict[row.get('column_name')] = row.get('data_type')
cursor.close()
return col_types_dict
def __getColDistinctValsDict__(conn, cols, col_types_dict, table_name):
'''
Return a dict of column name, and the set of all distinct values in the column
Inputs:
=======
conn : A DBConnect object
cols : (list) list of independent feature column names
col_types_dict : (dict) a dict of column names and types
table_name : (string) name of the input table
Outputs:
========
col_distinct_vals_dict : (dict) a dict of column name, and the set of all distinct values in the column
'''
distinct_vals_stmt = '''
select distinct {col_name}
from {table_name}
order by {col_name};
'''
#If any of the columns is of type character or varchar or text, and the number of distinct values in these columns < N (for now let's set it at 32)
#Then 'binarize' this column's values and create a new table for this column
col_distinct_vals_dict = {}
for col in cols:
if(col_types_dict[col] in ['char','character varying', 'text']):
#Find distinct values of the column
cursor = conn.getCursor()
stmt = distinct_vals_stmt.format(col_name=col,table_name=table_name)
cursor.execute(stmt)
distinct_vals = [row.get(col) for row in cursor]
cursor.close()
distinct_vals_dict = {}
for i in range(len(distinct_vals)):
distinct_vals_dict[distinct_vals[i]]=i
#If the number of distinct values of a categorical column is reasonable, then add it to a mapper
if(len(distinct_vals) < MAX_DISTINCT_VALS):
col_distinct_vals_dict[col] = distinct_vals_dict
return col_distinct_vals_dict
def __getColNamesAndTypesList__(cols,col_types_dict, col_distinct_vals_dict):
'''
Return a list of column names and types, where any categorical column in the original table have
been 'binarized'. Dummy coding is used to convert categorical columns into dummy variables.
Refer: http://en.wikipedia.org/wiki/Categorical_variable#Dummy_coding
Inputs:
=======
cols : (list) list of independent feature column names
col_types_dict : (dict) a dict of column names and types
col_distinct_vals_dict: (dict) a dict of column name, and the set of all distinct values in the column
Outputs:
========
col_names_and_types_lst : (list) a list of column names and types, where any categorical
column in the original table have
'''
col_names_and_types_lst = []
for col in cols:
if(col_distinct_vals_dict.has_key(col)):
dist_vals = col_distinct_vals_dict[col].keys()
dist_vals.sort()
#In Dummy Coding, if a variable has K possible values, we add K-1 dummy variables (binary) to represent the original variable
if(len(dist_vals) > 1):
dist_vals = dist_vals[:-1]
for valIndx in range(len(dist_vals)):
col_names_and_types_lst.append(['{column}_val_{indx}'.format(column=col,indx=valIndx),'integer'])
else:
col_names_and_types_lst.append([col, col_types_dict[col]])
return col_names_and_types_lst
def __createPivotTable__(conn, output_table, col_types_dict, col_names_and_types_lst, label):
'''
Create a Pivot table, where every categorical column in the original table
has been expanded into n columns, where n is the number of distinct values in the column
Inputs:
=======
conn : DBConnect object
output_table : (string) name of the pivot table (output)
col_types_dict : (dict) a dict of column names and types
col_names_and_types_lst : (list) a list of column names and types, where any categorical
column in the original table have
label : (string) name of the label column (if it is an empty string, it will be ignored)
Outputs:
========
A Pivot table is created.
'''
cnames_and_types = ', '.join([' '.join(pair) for pair in col_names_and_types_lst])
stmt = ''' '''
data_dict = {}
data_dict['output_table'] = output_table
data_dict['col_names_and_types'] = cnames_and_types
if(col_types_dict.has_key('id') and label):
stmt = '''
drop table if exists {output_table} cascade;
create table {output_table}
({id_col} {id_col_type},
{col_names_and_types},
{label_col_name} {label_col_type}
);
'''
data_dict['id_col'] = 'id'
data_dict['id_col_type'] = col_types_dict['id']
data_dict['label_col_name'] = label
data_dict['label_col_type'] = col_types_dict[label]
elif(col_types_dict.has_key('id')):
#ID column exists, but there is no label column specified
stmt = '''
drop table if exists {output_table} cascade;
create table {output_table}
({id_col} {id_col_type},
{col_names_and_types}
);
'''
data_dict['id_col'] = 'id'
data_dict['id_col_type'] = col_types_dict['id']
else:
#Neither ID column nor label column exists (i.e there only are features in the table)
stmt = '''
drop table if exists {output_table} cascade;
create table {output_table}
(
{col_names_and_types}
);
'''
stmt = stmt.format(**data_dict)
conn.executeQuery(stmt)
def pivotCategoricalColumns(conn,table_name,cols,label='',col_distinct_vals_dict=None):
'''
Take a table_name and a set of columns (some of which may be categorical
and return a new table, where the categorical columns have been pivoted.
This method uses the "Dummy Coding" approach:
http://en.wikipedia.org/wiki/Categorical_variable#Dummy_coding
Inputs:
=======
conn : A psycopg2 connection to a database.
table_name : (String) the name of the input table
cols : (List) a list of columns (some of which may be categorical) to be used as independent variable
label : (String) the dependent column
col_distinct_vals_dict : (dict) A dict of distinct values for each column. If not specified, this will be
computed from the training data.
Outputs:
========
output_table : (String) a new table containing the categorical columns
which have been pivoted
output_indep_cols : The new set of columns where every categorical column has been pivoted.
output_dep_col : (String) the dependent column (un-transformed) in the output_table
col_distinct_vals_dict : (dict) A dict of distinct values for each column
'''
#Since we allow the user to specify the table name as "schema_name.table_name) and since the
#information schema requires the table name to be separated out from schema name, so the following
#ste is required only for the query to look-up column types. It will be used thereafter.
tbl_schema = 'public' if '.' not in table_name else table_name.split('.')[0]
tbl_nm = table_name.split('.')[1] if '.' in table_name else table_name
output_table = '{default_schema}.{default_prefix}{table_name}'.format(default_schema=default_schema,
default_prefix=default_prefix,
table_name=tbl_nm
)
col_types_dict = __getColTypesDict__(conn,tbl_schema, tbl_nm)
#It is possible that the input columns could also have a 'bias' variable (intercept in linear regression)
#The intercept is a number represented as a string. If such a value exists, remove it from the input columns
#and consider it separately (we don't have to create a column for this variable in the transformed table).
numeric_cols = []
for col in cols:
if (not col_types_dict.has_key(col) and isNumeric(col)):
numeric_cols.append(col)
#Remove the intercept variables from cols
cols = [c for c in cols if c not in numeric_cols]
#If all the columns in the input are numeric, return the original table along with its columns.
has_categorical=False
for col in cols:
if(col_types_dict[col] in ['char','character varying', 'text']):
has_categorical = True
#Return original table and original list of columns
if(not has_categorical):
return table_name, numeric_cols+cols, label, col_distinct_vals_dict
if (not col_distinct_vals_dict):
col_distinct_vals_dict = __getColDistinctValsDict__(conn, cols, col_types_dict, table_name)
col_names_and_types_lst = __getColNamesAndTypesList__(cols,col_types_dict, col_distinct_vals_dict)
__createPivotTable__(conn, output_table, col_types_dict, col_names_and_types_lst, label)
#Now insert values into the new table
__binarizeInParallel__(conn, table_name,output_table,cols,col_types_dict, col_distinct_vals_dict, label)
#First include all numeric columns that correspond to any intercept/constant/bias variables
#Then combine them with the independent variables (transformed or un-transformed)
output_indep_cols = numeric_cols + [c[0] for c in col_names_and_types_lst]
output_dep_col = label
return output_table, output_indep_cols, output_dep_col, col_distinct_vals_dict
def convertsColsToArray(conn, table_name, indep, dep=''):
'''
Convert a list of independent columns (all numeric) to an array column and return the transformed table
Inputs:
=======
conn : A DBConnect object
table_name : (string) the input table name
indep : (list) a list of independent columns (all numeric)
dep : (string) the dependent column in the input table. If empty, it will be ignored.
Outputs:
========
output_table : (string) the transformed table, where the list of columns in indep have been converted
to an array.
indep_cols_arr_name : (string) the name of the independent column (of type array) in the transformed table
'''
tbl_schema = 'public' if '.' not in table_name else table_name.split('.')[0]
tbl_nm = table_name.split('.')[1] if '.' in table_name else table_name
col_types_dict = __getColTypesDict__(conn,tbl_schema, tbl_nm)
out_tbl_nm = tbl_nm.replace(default_prefix,'')
output_table = '{default_schema}.{default_prefix_arr}{table_name}'.format(default_schema=default_schema,
default_prefix_arr=default_prefix_arr,
table_name=out_tbl_nm
)
indep_cols_arr_name = 'indep'
#Check all columns are numeric
for col in indep:
if (
(col_types_dict.has_key(col) and col_types_dict[col] in ['char','character varying', 'text']) or
(not col_types_dict.has_key(col) and not isNumeric(col))
):
raise 'Only numeric columns supported. Use pivotCategoricalColumns() to transform categorical columns'
return
#Verify if all columns are numeric
data_dict = {}
data_dict['table_name'] = table_name
data_dict['output_table'] = output_table
data_dict['list_of_indep_cols'] = ','.join(indep)
data_dict['indep_cols_arr_name'] = indep_cols_arr_name
if(dep):
convert_to_arr_stmt = '''
drop table if exists {output_table} cascade;
create table {output_table} as
(
select array[{list_of_indep_cols}] as {indep_cols_arr_name},
{dep}
from {table_name}
);
'''
data_dict['dep'] = dep
else:
convert_to_arr_stmt = '''
drop table if exists {output_table} cascade;
create table {output_table} as
(
select array[{list_of_indep_cols}] as {indep_cols_arr_name}
from {table_name}
);
'''
convert_to_arr_stmt = convert_to_arr_stmt.format(**data_dict)
conn.executeQuery(convert_to_arr_stmt)
return output_table, indep_cols_arr_name
if(__name__=='__main__'):
from pymadlib import DBConnect
conn = DBConnect()
output_table, indep, dep, cols_distinct_vals = pivotCategoricalColumns(conn,'cuse_dat',['1','age','education','wantsmore','notusing'],'yesusing')
print 'output table :', output_table
print 'output independent columns :', indep
print 'dependent col :',dep
#Verify if the input has all numeric columns, the input table is returned unchanged.
output_table, indep, dep, cols_distinct_vals = pivotCategoricalColumns(conn,'wine_training_set',['1','alcohol','proline','hue','color_intensity','flavanoids'],'quality')
print 'output table :', output_table
print 'output independent columns :', indep
print 'dependent col :',dep
| bsd-2-clause |
sanghinitin/golismero | thirdparty_libs/chardet/langgreekmodel.py | 235 | 12664 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import constants
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# Character Mapping Table:
Latin7_CharToOrderMap = ( \
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 82,100,104, 94, 98,101,116,102,111,187,117, 92, 88,113, 85, # 40
79,118,105, 83, 67,114,119, 95, 99,109,188,253,253,253,253,253, # 50
253, 72, 70, 80, 81, 60, 96, 93, 89, 68,120, 97, 77, 86, 69, 55, # 60
78,115, 65, 66, 58, 76,106,103, 87,107,112,253,253,253,253,253, # 70
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 80
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 90
253,233, 90,253,253,253,253,253,253,253,253,253,253, 74,253,253, # a0
253,253,253,253,247,248, 61, 36, 46, 71, 73,253, 54,253,108,123, # b0
110, 31, 51, 43, 41, 34, 91, 40, 52, 47, 44, 53, 38, 49, 59, 39, # c0
35, 48,250, 37, 33, 45, 56, 50, 84, 57,120,121, 17, 18, 22, 15, # d0
124, 1, 29, 20, 21, 3, 32, 13, 25, 5, 11, 16, 10, 6, 30, 4, # e0
9, 8, 14, 7, 2, 12, 28, 23, 42, 24, 64, 75, 19, 26, 27,253, # f0
)
win1253_CharToOrderMap = ( \
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 82,100,104, 94, 98,101,116,102,111,187,117, 92, 88,113, 85, # 40
79,118,105, 83, 67,114,119, 95, 99,109,188,253,253,253,253,253, # 50
253, 72, 70, 80, 81, 60, 96, 93, 89, 68,120, 97, 77, 86, 69, 55, # 60
78,115, 65, 66, 58, 76,106,103, 87,107,112,253,253,253,253,253, # 70
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 80
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 90
253,233, 61,253,253,253,253,253,253,253,253,253,253, 74,253,253, # a0
253,253,253,253,247,253,253, 36, 46, 71, 73,253, 54,253,108,123, # b0
110, 31, 51, 43, 41, 34, 91, 40, 52, 47, 44, 53, 38, 49, 59, 39, # c0
35, 48,250, 37, 33, 45, 56, 50, 84, 57,120,121, 17, 18, 22, 15, # d0
124, 1, 29, 20, 21, 3, 32, 13, 25, 5, 11, 16, 10, 6, 30, 4, # e0
9, 8, 14, 7, 2, 12, 28, 23, 42, 24, 64, 75, 19, 26, 27,253, # f0
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 98.2851%
# first 1024 sequences:1.7001%
# rest sequences: 0.0359%
# negative sequences: 0.0148%
GreekLangModel = ( \
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,2,2,3,3,3,3,3,3,3,3,1,3,3,3,0,2,2,3,3,0,3,0,3,2,0,3,3,3,0,
3,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,0,3,3,0,3,2,3,3,0,3,2,3,3,3,0,0,3,0,3,0,3,3,2,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,
0,2,3,2,2,3,3,3,3,3,3,3,3,0,3,3,3,3,0,2,3,3,0,3,3,3,3,2,3,3,3,0,
2,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,0,2,1,3,3,3,3,2,3,3,2,3,3,2,0,
0,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,0,3,3,3,3,3,3,0,3,3,0,3,3,3,3,3,3,3,3,3,3,0,3,2,3,3,0,
2,0,1,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,2,3,0,0,0,0,3,3,0,3,1,3,3,3,0,3,3,0,3,3,3,3,0,0,0,0,
2,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,0,3,0,3,3,3,3,3,0,3,2,2,2,3,0,2,3,3,3,3,3,2,3,3,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,3,2,2,2,3,3,3,3,0,3,1,3,3,3,3,2,3,3,3,3,3,3,3,2,2,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,2,0,3,0,0,0,3,3,2,3,3,3,3,3,0,0,3,2,3,0,2,3,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,3,0,0,3,3,0,2,3,0,3,0,3,3,3,0,0,3,0,3,0,2,2,3,3,0,0,
0,0,1,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,2,0,3,2,3,3,3,3,0,3,3,3,3,3,0,3,3,2,3,2,3,3,2,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,2,3,2,3,3,3,3,3,3,0,2,3,2,3,2,2,2,3,2,3,3,2,3,0,2,2,2,3,0,
2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,0,0,0,3,3,3,2,3,3,0,0,3,0,3,0,0,0,3,2,0,3,0,3,0,0,2,0,2,0,
0,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,0,3,3,3,3,3,3,0,3,3,0,3,0,0,0,3,3,0,3,3,3,0,0,1,2,3,0,
3,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,2,0,0,3,2,2,3,3,0,3,3,3,3,3,2,1,3,0,3,2,3,3,2,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,3,0,2,3,3,3,3,3,3,0,0,3,0,3,0,0,0,3,3,0,3,2,3,0,0,3,3,3,0,
3,0,0,0,2,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,0,3,3,3,3,3,3,0,0,3,0,3,0,0,0,3,2,0,3,2,3,0,0,3,2,3,0,
2,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,1,2,2,3,3,3,3,3,3,0,2,3,0,3,0,0,0,3,3,0,3,0,2,0,0,2,3,1,0,
2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,3,0,3,0,3,3,2,3,0,3,3,3,3,3,3,0,3,3,3,0,2,3,0,0,3,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,0,0,3,0,0,0,3,3,0,3,0,2,3,3,0,0,3,0,3,0,3,3,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,0,0,0,3,3,3,3,3,3,0,0,3,0,2,0,0,0,3,3,0,3,0,3,0,0,2,0,2,0,
0,0,0,0,1,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,3,0,3,0,2,0,3,2,0,3,2,3,2,3,0,0,3,2,3,2,3,3,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,0,0,2,3,3,3,3,3,0,0,0,3,0,2,1,0,0,3,2,2,2,0,3,0,0,2,2,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,2,0,3,0,3,0,3,3,0,2,1,2,3,3,0,0,3,0,3,0,3,3,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,3,3,3,0,3,3,3,3,3,3,0,2,3,0,3,0,0,0,2,1,0,2,2,3,0,0,2,2,2,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,0,0,2,3,3,3,2,3,0,0,1,3,0,2,0,0,0,0,3,0,1,0,2,0,0,1,1,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,1,0,3,0,0,0,3,2,0,3,2,3,3,3,0,0,3,0,3,2,2,2,1,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,0,0,3,0,0,0,0,2,0,2,3,3,2,2,2,2,3,0,2,0,2,2,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,2,0,0,0,0,0,0,2,3,0,2,0,2,3,2,0,0,3,0,3,0,3,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,3,2,3,3,2,2,3,0,2,0,3,0,0,0,2,0,0,0,0,1,2,0,2,0,2,0,
0,2,0,2,0,2,2,0,0,1,0,2,2,2,0,2,2,2,0,2,2,2,0,0,2,0,0,1,0,0,0,0,
0,2,0,3,3,2,0,0,0,0,0,0,1,3,0,2,0,2,2,2,0,0,2,0,3,0,0,2,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,2,3,2,0,2,2,0,2,0,2,2,0,2,0,2,2,2,0,0,0,0,0,0,2,3,0,0,0,2,
0,1,2,0,0,0,0,2,2,0,0,0,2,1,0,2,2,0,0,0,0,0,0,1,0,2,0,0,0,0,0,0,
0,0,2,1,0,2,3,2,2,3,2,3,2,0,0,3,3,3,0,0,3,2,0,0,0,1,1,0,2,0,2,2,
0,2,0,2,0,2,2,0,0,2,0,2,2,2,0,2,2,2,2,0,0,2,0,0,0,2,0,1,0,0,0,0,
0,3,0,3,3,2,2,0,3,0,0,0,2,2,0,2,2,2,1,2,0,0,1,2,2,0,0,3,0,0,0,2,
0,1,2,0,0,0,1,2,0,0,0,0,0,0,0,2,2,0,1,0,0,2,0,0,0,2,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,3,3,2,2,0,0,0,2,0,2,3,3,0,2,0,0,0,0,0,0,2,2,2,0,2,2,0,2,0,2,
0,2,2,0,0,2,2,2,2,1,0,0,2,2,0,2,0,0,2,0,0,0,0,0,0,2,0,0,0,0,0,0,
0,2,0,3,2,3,0,0,0,3,0,0,2,2,0,2,0,2,2,2,0,0,2,0,0,0,0,0,0,0,0,2,
0,0,2,2,0,0,2,2,2,0,0,0,0,0,0,2,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,2,0,0,3,2,0,2,2,2,2,2,0,0,0,2,0,0,0,0,2,0,1,0,0,2,0,1,0,0,0,
0,2,2,2,0,2,2,0,1,2,0,2,2,2,0,2,2,2,2,1,2,2,0,0,2,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,2,0,2,0,2,2,0,0,0,0,1,2,1,0,0,2,2,0,0,2,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,3,2,3,0,0,2,0,0,0,2,2,0,2,0,0,0,1,0,0,2,0,2,0,2,2,0,0,0,0,
0,0,2,0,0,0,0,2,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,
0,2,2,3,2,2,0,0,0,0,0,0,1,3,0,2,0,2,2,0,0,0,1,0,2,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,0,2,0,3,2,0,2,0,0,0,0,0,0,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
0,0,2,0,0,0,0,1,1,0,0,2,1,2,0,2,2,0,1,0,0,1,0,0,0,2,0,0,0,0,0,0,
0,3,0,2,2,2,0,0,2,0,0,0,2,0,0,0,2,3,0,2,0,0,0,0,0,0,2,2,0,0,0,2,
0,1,2,0,0,0,1,2,2,1,0,0,0,2,0,0,2,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,1,2,0,2,2,0,2,0,0,2,0,0,0,0,1,2,1,0,2,1,0,0,0,0,0,0,0,0,0,0,
0,0,2,0,0,0,3,1,2,2,0,2,0,0,0,0,2,0,0,0,2,0,0,3,0,0,0,0,2,2,2,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,1,0,2,0,1,2,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,0,0,0,0,0,0,2,
0,2,2,0,0,2,2,2,2,2,0,1,2,0,0,0,2,2,0,1,0,2,0,0,2,2,0,0,0,0,0,0,
0,0,0,0,1,0,0,0,0,0,0,0,3,0,0,2,0,0,0,0,0,0,0,0,2,0,2,0,0,0,0,2,
0,1,2,0,0,0,0,2,2,1,0,1,0,1,0,2,2,2,1,0,0,0,0,0,0,1,0,0,0,0,0,0,
0,2,0,1,2,0,0,0,0,0,0,0,0,0,0,2,0,0,2,2,0,0,0,0,1,0,0,0,0,0,0,2,
0,2,2,0,0,0,0,2,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,2,0,0,0,
0,2,2,2,2,0,0,0,3,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,2,0,0,0,0,0,0,1,
0,0,2,0,0,0,0,1,2,0,0,0,0,0,0,2,2,1,1,0,0,0,0,0,0,1,0,0,0,0,0,0,
0,2,0,2,2,2,0,0,2,0,0,0,0,0,0,0,2,2,2,0,0,0,2,0,0,0,0,0,0,0,0,2,
0,0,1,0,0,0,0,2,1,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,
0,3,0,2,0,0,0,0,0,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,0,2,
0,0,2,0,0,0,0,2,2,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,0,2,2,1,0,0,0,0,0,0,2,0,0,2,0,2,2,2,0,0,0,0,0,0,2,0,0,0,0,2,
0,0,2,0,0,2,0,2,2,0,0,0,0,2,0,2,0,0,0,0,0,2,0,0,0,2,0,0,0,0,0,0,
0,0,3,0,0,0,2,2,0,2,2,0,0,0,0,0,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,2,0,0,0,0,0,
0,2,2,2,2,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,1,
0,0,0,0,0,0,0,2,1,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,2,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,2,0,0,0,2,0,0,0,0,0,1,0,0,0,0,2,2,0,0,0,1,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,2,0,0,0,
0,2,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,1,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,0,0,2,0,2,0,0,0,
0,0,0,0,0,0,0,0,2,1,0,0,0,0,0,0,2,0,0,0,1,2,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
)
Latin7GreekModel = { \
'charToOrderMap': Latin7_CharToOrderMap,
'precedenceMatrix': GreekLangModel,
'mTypicalPositiveRatio': 0.982851,
'keepEnglishLetter': constants.False,
'charsetName': "ISO-8859-7"
}
Win1253GreekModel = { \
'charToOrderMap': win1253_CharToOrderMap,
'precedenceMatrix': GreekLangModel,
'mTypicalPositiveRatio': 0.982851,
'keepEnglishLetter': constants.False,
'charsetName': "windows-1253"
}
| gpl-2.0 |
s3nk4s/flaskTutorials | FlaskApp/FlaskApp/venv/local/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/jpcntx.py | 949 | 19104 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .compat import wrap_ord
NUM_OF_CATEGORY = 6
DONT_KNOW = -1
ENOUGH_REL_THRESHOLD = 100
MAX_REL_THRESHOLD = 1000
MINIMUM_DATA_THRESHOLD = 4
# This is hiragana 2-char sequence table, the number in each cell represents its frequency category
jp2CharContext = (
(0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1),
(2,4,0,4,0,3,0,4,0,3,4,4,4,2,4,3,3,4,3,2,3,3,4,2,3,3,3,2,4,1,4,3,3,1,5,4,3,4,3,4,3,5,3,0,3,5,4,2,0,3,1,0,3,3,0,3,3,0,1,1,0,4,3,0,3,3,0,4,0,2,0,3,5,5,5,5,4,0,4,1,0,3,4),
(0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2),
(0,4,0,5,0,5,0,4,0,4,5,4,4,3,5,3,5,1,5,3,4,3,4,4,3,4,3,3,4,3,5,4,4,3,5,5,3,5,5,5,3,5,5,3,4,5,5,3,1,3,2,0,3,4,0,4,2,0,4,2,1,5,3,2,3,5,0,4,0,2,0,5,4,4,5,4,5,0,4,0,0,4,4),
(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0),
(0,3,0,4,0,3,0,3,0,4,5,4,3,3,3,3,4,3,5,4,4,3,5,4,4,3,4,3,4,4,4,4,5,3,4,4,3,4,5,5,4,5,5,1,4,5,4,3,0,3,3,1,3,3,0,4,4,0,3,3,1,5,3,3,3,5,0,4,0,3,0,4,4,3,4,3,3,0,4,1,1,3,4),
(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0),
(0,4,0,3,0,3,0,4,0,3,4,4,3,2,2,1,2,1,3,1,3,3,3,3,3,4,3,1,3,3,5,3,3,0,4,3,0,5,4,3,3,5,4,4,3,4,4,5,0,1,2,0,1,2,0,2,2,0,1,0,0,5,2,2,1,4,0,3,0,1,0,4,4,3,5,4,3,0,2,1,0,4,3),
(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0),
(0,3,0,5,0,4,0,2,1,4,4,2,4,1,4,2,4,2,4,3,3,3,4,3,3,3,3,1,4,2,3,3,3,1,4,4,1,1,1,4,3,3,2,0,2,4,3,2,0,3,3,0,3,1,1,0,0,0,3,3,0,4,2,2,3,4,0,4,0,3,0,4,4,5,3,4,4,0,3,0,0,1,4),
(1,4,0,4,0,4,0,4,0,3,5,4,4,3,4,3,5,4,3,3,4,3,5,4,4,4,4,3,4,2,4,3,3,1,5,4,3,2,4,5,4,5,5,4,4,5,4,4,0,3,2,2,3,3,0,4,3,1,3,2,1,4,3,3,4,5,0,3,0,2,0,4,5,5,4,5,4,0,4,0,0,5,4),
(0,5,0,5,0,4,0,3,0,4,4,3,4,3,3,3,4,0,4,4,4,3,4,3,4,3,3,1,4,2,4,3,4,0,5,4,1,4,5,4,4,5,3,2,4,3,4,3,2,4,1,3,3,3,2,3,2,0,4,3,3,4,3,3,3,4,0,4,0,3,0,4,5,4,4,4,3,0,4,1,0,1,3),
(0,3,1,4,0,3,0,2,0,3,4,4,3,1,4,2,3,3,4,3,4,3,4,3,4,4,3,2,3,1,5,4,4,1,4,4,3,5,4,4,3,5,5,4,3,4,4,3,1,2,3,1,2,2,0,3,2,0,3,1,0,5,3,3,3,4,3,3,3,3,4,4,4,4,5,4,2,0,3,3,2,4,3),
(0,2,0,3,0,1,0,1,0,0,3,2,0,0,2,0,1,0,2,1,3,3,3,1,2,3,1,0,1,0,4,2,1,1,3,3,0,4,3,3,1,4,3,3,0,3,3,2,0,0,0,0,1,0,0,2,0,0,0,0,0,4,1,0,2,3,2,2,2,1,3,3,3,4,4,3,2,0,3,1,0,3,3),
(0,4,0,4,0,3,0,3,0,4,4,4,3,3,3,3,3,3,4,3,4,2,4,3,4,3,3,2,4,3,4,5,4,1,4,5,3,5,4,5,3,5,4,0,3,5,5,3,1,3,3,2,2,3,0,3,4,1,3,3,2,4,3,3,3,4,0,4,0,3,0,4,5,4,4,5,3,0,4,1,0,3,4),
(0,2,0,3,0,3,0,0,0,2,2,2,1,0,1,0,0,0,3,0,3,0,3,0,1,3,1,0,3,1,3,3,3,1,3,3,3,0,1,3,1,3,4,0,0,3,1,1,0,3,2,0,0,0,0,1,3,0,1,0,0,3,3,2,0,3,0,0,0,0,0,3,4,3,4,3,3,0,3,0,0,2,3),
(2,3,0,3,0,2,0,1,0,3,3,4,3,1,3,1,1,1,3,1,4,3,4,3,3,3,0,0,3,1,5,4,3,1,4,3,2,5,5,4,4,4,4,3,3,4,4,4,0,2,1,1,3,2,0,1,2,0,0,1,0,4,1,3,3,3,0,3,0,1,0,4,4,4,5,5,3,0,2,0,0,4,4),
(0,2,0,1,0,3,1,3,0,2,3,3,3,0,3,1,0,0,3,0,3,2,3,1,3,2,1,1,0,0,4,2,1,0,2,3,1,4,3,2,0,4,4,3,1,3,1,3,0,1,0,0,1,0,0,0,1,0,0,0,0,4,1,1,1,2,0,3,0,0,0,3,4,2,4,3,2,0,1,0,0,3,3),
(0,1,0,4,0,5,0,4,0,2,4,4,2,3,3,2,3,3,5,3,3,3,4,3,4,2,3,0,4,3,3,3,4,1,4,3,2,1,5,5,3,4,5,1,3,5,4,2,0,3,3,0,1,3,0,4,2,0,1,3,1,4,3,3,3,3,0,3,0,1,0,3,4,4,4,5,5,0,3,0,1,4,5),
(0,2,0,3,0,3,0,0,0,2,3,1,3,0,4,0,1,1,3,0,3,4,3,2,3,1,0,3,3,2,3,1,3,0,2,3,0,2,1,4,1,2,2,0,0,3,3,0,0,2,0,0,0,1,0,0,0,0,2,2,0,3,2,1,3,3,0,2,0,2,0,0,3,3,1,2,4,0,3,0,2,2,3),
(2,4,0,5,0,4,0,4,0,2,4,4,4,3,4,3,3,3,1,2,4,3,4,3,4,4,5,0,3,3,3,3,2,0,4,3,1,4,3,4,1,4,4,3,3,4,4,3,1,2,3,0,4,2,0,4,1,0,3,3,0,4,3,3,3,4,0,4,0,2,0,3,5,3,4,5,2,0,3,0,0,4,5),
(0,3,0,4,0,1,0,1,0,1,3,2,2,1,3,0,3,0,2,0,2,0,3,0,2,0,0,0,1,0,1,1,0,0,3,1,0,0,0,4,0,3,1,0,2,1,3,0,0,0,0,0,0,3,0,0,0,0,0,0,0,4,2,2,3,1,0,3,0,0,0,1,4,4,4,3,0,0,4,0,0,1,4),
(1,4,1,5,0,3,0,3,0,4,5,4,4,3,5,3,3,4,4,3,4,1,3,3,3,3,2,1,4,1,5,4,3,1,4,4,3,5,4,4,3,5,4,3,3,4,4,4,0,3,3,1,2,3,0,3,1,0,3,3,0,5,4,4,4,4,4,4,3,3,5,4,4,3,3,5,4,0,3,2,0,4,4),
(0,2,0,3,0,1,0,0,0,1,3,3,3,2,4,1,3,0,3,1,3,0,2,2,1,1,0,0,2,0,4,3,1,0,4,3,0,4,4,4,1,4,3,1,1,3,3,1,0,2,0,0,1,3,0,0,0,0,2,0,0,4,3,2,4,3,5,4,3,3,3,4,3,3,4,3,3,0,2,1,0,3,3),
(0,2,0,4,0,3,0,2,0,2,5,5,3,4,4,4,4,1,4,3,3,0,4,3,4,3,1,3,3,2,4,3,0,3,4,3,0,3,4,4,2,4,4,0,4,5,3,3,2,2,1,1,1,2,0,1,5,0,3,3,2,4,3,3,3,4,0,3,0,2,0,4,4,3,5,5,0,0,3,0,2,3,3),
(0,3,0,4,0,3,0,1,0,3,4,3,3,1,3,3,3,0,3,1,3,0,4,3,3,1,1,0,3,0,3,3,0,0,4,4,0,1,5,4,3,3,5,0,3,3,4,3,0,2,0,1,1,1,0,1,3,0,1,2,1,3,3,2,3,3,0,3,0,1,0,1,3,3,4,4,1,0,1,2,2,1,3),
(0,1,0,4,0,4,0,3,0,1,3,3,3,2,3,1,1,0,3,0,3,3,4,3,2,4,2,0,1,0,4,3,2,0,4,3,0,5,3,3,2,4,4,4,3,3,3,4,0,1,3,0,0,1,0,0,1,0,0,0,0,4,2,3,3,3,0,3,0,0,0,4,4,4,5,3,2,0,3,3,0,3,5),
(0,2,0,3,0,0,0,3,0,1,3,0,2,0,0,0,1,0,3,1,1,3,3,0,0,3,0,0,3,0,2,3,1,0,3,1,0,3,3,2,0,4,2,2,0,2,0,0,0,4,0,0,0,0,0,0,0,0,0,0,0,2,1,2,0,1,0,1,0,0,0,1,3,1,2,0,0,0,1,0,0,1,4),
(0,3,0,3,0,5,0,1,0,2,4,3,1,3,3,2,1,1,5,2,1,0,5,1,2,0,0,0,3,3,2,2,3,2,4,3,0,0,3,3,1,3,3,0,2,5,3,4,0,3,3,0,1,2,0,2,2,0,3,2,0,2,2,3,3,3,0,2,0,1,0,3,4,4,2,5,4,0,3,0,0,3,5),
(0,3,0,3,0,3,0,1,0,3,3,3,3,0,3,0,2,0,2,1,1,0,2,0,1,0,0,0,2,1,0,0,1,0,3,2,0,0,3,3,1,2,3,1,0,3,3,0,0,1,0,0,0,0,0,2,0,0,0,0,0,2,3,1,2,3,0,3,0,1,0,3,2,1,0,4,3,0,1,1,0,3,3),
(0,4,0,5,0,3,0,3,0,4,5,5,4,3,5,3,4,3,5,3,3,2,5,3,4,4,4,3,4,3,4,5,5,3,4,4,3,4,4,5,4,4,4,3,4,5,5,4,2,3,4,2,3,4,0,3,3,1,4,3,2,4,3,3,5,5,0,3,0,3,0,5,5,5,5,4,4,0,4,0,1,4,4),
(0,4,0,4,0,3,0,3,0,3,5,4,4,2,3,2,5,1,3,2,5,1,4,2,3,2,3,3,4,3,3,3,3,2,5,4,1,3,3,5,3,4,4,0,4,4,3,1,1,3,1,0,2,3,0,2,3,0,3,0,0,4,3,1,3,4,0,3,0,2,0,4,4,4,3,4,5,0,4,0,0,3,4),
(0,3,0,3,0,3,1,2,0,3,4,4,3,3,3,0,2,2,4,3,3,1,3,3,3,1,1,0,3,1,4,3,2,3,4,4,2,4,4,4,3,4,4,3,2,4,4,3,1,3,3,1,3,3,0,4,1,0,2,2,1,4,3,2,3,3,5,4,3,3,5,4,4,3,3,0,4,0,3,2,2,4,4),
(0,2,0,1,0,0,0,0,0,1,2,1,3,0,0,0,0,0,2,0,1,2,1,0,0,1,0,0,0,0,3,0,0,1,0,1,1,3,1,0,0,0,1,1,0,1,1,0,0,0,0,0,2,0,0,0,0,0,0,0,0,1,1,2,2,0,3,4,0,0,0,1,1,0,0,1,0,0,0,0,0,1,1),
(0,1,0,0,0,1,0,0,0,0,4,0,4,1,4,0,3,0,4,0,3,0,4,0,3,0,3,0,4,1,5,1,4,0,0,3,0,5,0,5,2,0,1,0,0,0,2,1,4,0,1,3,0,0,3,0,0,3,1,1,4,1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0),
(1,4,0,5,0,3,0,2,0,3,5,4,4,3,4,3,5,3,4,3,3,0,4,3,3,3,3,3,3,2,4,4,3,1,3,4,4,5,4,4,3,4,4,1,3,5,4,3,3,3,1,2,2,3,3,1,3,1,3,3,3,5,3,3,4,5,0,3,0,3,0,3,4,3,4,4,3,0,3,0,2,4,3),
(0,1,0,4,0,0,0,0,0,1,4,0,4,1,4,2,4,0,3,0,1,0,1,0,0,0,0,0,2,0,3,1,1,1,0,3,0,0,0,1,2,1,0,0,1,1,1,1,0,1,0,0,0,1,0,0,3,0,0,0,0,3,2,0,2,2,0,1,0,0,0,2,3,2,3,3,0,0,0,0,2,1,0),
(0,5,1,5,0,3,0,3,0,5,4,4,5,1,5,3,3,0,4,3,4,3,5,3,4,3,3,2,4,3,4,3,3,0,3,3,1,4,4,3,4,4,4,3,4,5,5,3,2,3,1,1,3,3,1,3,1,1,3,3,2,4,5,3,3,5,0,4,0,3,0,4,4,3,5,3,3,0,3,4,0,4,3),
(0,5,0,5,0,3,0,2,0,4,4,3,5,2,4,3,3,3,4,4,4,3,5,3,5,3,3,1,4,0,4,3,3,0,3,3,0,4,4,4,4,5,4,3,3,5,5,3,2,3,1,2,3,2,0,1,0,0,3,2,2,4,4,3,1,5,0,4,0,3,0,4,3,1,3,2,1,0,3,3,0,3,3),
(0,4,0,5,0,5,0,4,0,4,5,5,5,3,4,3,3,2,5,4,4,3,5,3,5,3,4,0,4,3,4,4,3,2,4,4,3,4,5,4,4,5,5,0,3,5,5,4,1,3,3,2,3,3,1,3,1,0,4,3,1,4,4,3,4,5,0,4,0,2,0,4,3,4,4,3,3,0,4,0,0,5,5),
(0,4,0,4,0,5,0,1,1,3,3,4,4,3,4,1,3,0,5,1,3,0,3,1,3,1,1,0,3,0,3,3,4,0,4,3,0,4,4,4,3,4,4,0,3,5,4,1,0,3,0,0,2,3,0,3,1,0,3,1,0,3,2,1,3,5,0,3,0,1,0,3,2,3,3,4,4,0,2,2,0,4,4),
(2,4,0,5,0,4,0,3,0,4,5,5,4,3,5,3,5,3,5,3,5,2,5,3,4,3,3,4,3,4,5,3,2,1,5,4,3,2,3,4,5,3,4,1,2,5,4,3,0,3,3,0,3,2,0,2,3,0,4,1,0,3,4,3,3,5,0,3,0,1,0,4,5,5,5,4,3,0,4,2,0,3,5),
(0,5,0,4,0,4,0,2,0,5,4,3,4,3,4,3,3,3,4,3,4,2,5,3,5,3,4,1,4,3,4,4,4,0,3,5,0,4,4,4,4,5,3,1,3,4,5,3,3,3,3,3,3,3,0,2,2,0,3,3,2,4,3,3,3,5,3,4,1,3,3,5,3,2,0,0,0,0,4,3,1,3,3),
(0,1,0,3,0,3,0,1,0,1,3,3,3,2,3,3,3,0,3,0,0,0,3,1,3,0,0,0,2,2,2,3,0,0,3,2,0,1,2,4,1,3,3,0,0,3,3,3,0,1,0,0,2,1,0,0,3,0,3,1,0,3,0,0,1,3,0,2,0,1,0,3,3,1,3,3,0,0,1,1,0,3,3),
(0,2,0,3,0,2,1,4,0,2,2,3,1,1,3,1,1,0,2,0,3,1,2,3,1,3,0,0,1,0,4,3,2,3,3,3,1,4,2,3,3,3,3,1,0,3,1,4,0,1,1,0,1,2,0,1,1,0,1,1,0,3,1,3,2,2,0,1,0,0,0,2,3,3,3,1,0,0,0,0,0,2,3),
(0,5,0,4,0,5,0,2,0,4,5,5,3,3,4,3,3,1,5,4,4,2,4,4,4,3,4,2,4,3,5,5,4,3,3,4,3,3,5,5,4,5,5,1,3,4,5,3,1,4,3,1,3,3,0,3,3,1,4,3,1,4,5,3,3,5,0,4,0,3,0,5,3,3,1,4,3,0,4,0,1,5,3),
(0,5,0,5,0,4,0,2,0,4,4,3,4,3,3,3,3,3,5,4,4,4,4,4,4,5,3,3,5,2,4,4,4,3,4,4,3,3,4,4,5,5,3,3,4,3,4,3,3,4,3,3,3,3,1,2,2,1,4,3,3,5,4,4,3,4,0,4,0,3,0,4,4,4,4,4,1,0,4,2,0,2,4),
(0,4,0,4,0,3,0,1,0,3,5,2,3,0,3,0,2,1,4,2,3,3,4,1,4,3,3,2,4,1,3,3,3,0,3,3,0,0,3,3,3,5,3,3,3,3,3,2,0,2,0,0,2,0,0,2,0,0,1,0,0,3,1,2,2,3,0,3,0,2,0,4,4,3,3,4,1,0,3,0,0,2,4),
(0,0,0,4,0,0,0,0,0,0,1,0,1,0,2,0,0,0,0,0,1,0,2,0,1,0,0,0,0,0,3,1,3,0,3,2,0,0,0,1,0,3,2,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,4,0,2,0,0,0,0,0,0,2),
(0,2,1,3,0,2,0,2,0,3,3,3,3,1,3,1,3,3,3,3,3,3,4,2,2,1,2,1,4,0,4,3,1,3,3,3,2,4,3,5,4,3,3,3,3,3,3,3,0,1,3,0,2,0,0,1,0,0,1,0,0,4,2,0,2,3,0,3,3,0,3,3,4,2,3,1,4,0,1,2,0,2,3),
(0,3,0,3,0,1,0,3,0,2,3,3,3,0,3,1,2,0,3,3,2,3,3,2,3,2,3,1,3,0,4,3,2,0,3,3,1,4,3,3,2,3,4,3,1,3,3,1,1,0,1,1,0,1,0,1,0,1,0,0,0,4,1,1,0,3,0,3,1,0,2,3,3,3,3,3,1,0,0,2,0,3,3),
(0,0,0,0,0,0,0,0,0,0,3,0,2,0,3,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,3,0,3,0,3,1,0,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,2,0,2,3,0,0,0,0,0,0,0,0,3),
(0,2,0,3,1,3,0,3,0,2,3,3,3,1,3,1,3,1,3,1,3,3,3,1,3,0,2,3,1,1,4,3,3,2,3,3,1,2,2,4,1,3,3,0,1,4,2,3,0,1,3,0,3,0,0,1,3,0,2,0,0,3,3,2,1,3,0,3,0,2,0,3,4,4,4,3,1,0,3,0,0,3,3),
(0,2,0,1,0,2,0,0,0,1,3,2,2,1,3,0,1,1,3,0,3,2,3,1,2,0,2,0,1,1,3,3,3,0,3,3,1,1,2,3,2,3,3,1,2,3,2,0,0,1,0,0,0,0,0,0,3,0,1,0,0,2,1,2,1,3,0,3,0,0,0,3,4,4,4,3,2,0,2,0,0,2,4),
(0,0,0,1,0,1,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,2,2,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,3,1,0,0,0,0,0,0,0,3),
(0,3,0,3,0,2,0,3,0,3,3,3,2,3,2,2,2,0,3,1,3,3,3,2,3,3,0,0,3,0,3,2,2,0,2,3,1,4,3,4,3,3,2,3,1,5,4,4,0,3,1,2,1,3,0,3,1,1,2,0,2,3,1,3,1,3,0,3,0,1,0,3,3,4,4,2,1,0,2,1,0,2,4),
(0,1,0,3,0,1,0,2,0,1,4,2,5,1,4,0,2,0,2,1,3,1,4,0,2,1,0,0,2,1,4,1,1,0,3,3,0,5,1,3,2,3,3,1,0,3,2,3,0,1,0,0,0,0,0,0,1,0,0,0,0,4,0,1,0,3,0,2,0,1,0,3,3,3,4,3,3,0,0,0,0,2,3),
(0,0,0,1,0,0,0,0,0,0,2,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,1,0,0,1,0,0,0,0,0,3),
(0,1,0,3,0,4,0,3,0,2,4,3,1,0,3,2,2,1,3,1,2,2,3,1,1,1,2,1,3,0,1,2,0,1,3,2,1,3,0,5,5,1,0,0,1,3,2,1,0,3,0,0,1,0,0,0,0,0,3,4,0,1,1,1,3,2,0,2,0,1,0,2,3,3,1,2,3,0,1,0,1,0,4),
(0,0,0,1,0,3,0,3,0,2,2,1,0,0,4,0,3,0,3,1,3,0,3,0,3,0,1,0,3,0,3,1,3,0,3,3,0,0,1,2,1,1,1,0,1,2,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,2,2,1,2,0,0,2,0,0,0,0,2,3,3,3,3,0,0,0,0,1,4),
(0,0,0,3,0,3,0,0,0,0,3,1,1,0,3,0,1,0,2,0,1,0,0,0,0,0,0,0,1,0,3,0,2,0,2,3,0,0,2,2,3,1,2,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,2,0,0,0,0,2,3),
(2,4,0,5,0,5,0,4,0,3,4,3,3,3,4,3,3,3,4,3,4,4,5,4,5,5,5,2,3,0,5,5,4,1,5,4,3,1,5,4,3,4,4,3,3,4,3,3,0,3,2,0,2,3,0,3,0,0,3,3,0,5,3,2,3,3,0,3,0,3,0,3,4,5,4,5,3,0,4,3,0,3,4),
(0,3,0,3,0,3,0,3,0,3,3,4,3,2,3,2,3,0,4,3,3,3,3,3,3,3,3,0,3,2,4,3,3,1,3,4,3,4,4,4,3,4,4,3,2,4,4,1,0,2,0,0,1,1,0,2,0,0,3,1,0,5,3,2,1,3,0,3,0,1,2,4,3,2,4,3,3,0,3,2,0,4,4),
(0,3,0,3,0,1,0,0,0,1,4,3,3,2,3,1,3,1,4,2,3,2,4,2,3,4,3,0,2,2,3,3,3,0,3,3,3,0,3,4,1,3,3,0,3,4,3,3,0,1,1,0,1,0,0,0,4,0,3,0,0,3,1,2,1,3,0,4,0,1,0,4,3,3,4,3,3,0,2,0,0,3,3),
(0,3,0,4,0,1,0,3,0,3,4,3,3,0,3,3,3,1,3,1,3,3,4,3,3,3,0,0,3,1,5,3,3,1,3,3,2,5,4,3,3,4,5,3,2,5,3,4,0,1,0,0,0,0,0,2,0,0,1,1,0,4,2,2,1,3,0,3,0,2,0,4,4,3,5,3,2,0,1,1,0,3,4),
(0,5,0,4,0,5,0,2,0,4,4,3,3,2,3,3,3,1,4,3,4,1,5,3,4,3,4,0,4,2,4,3,4,1,5,4,0,4,4,4,4,5,4,1,3,5,4,2,1,4,1,1,3,2,0,3,1,0,3,2,1,4,3,3,3,4,0,4,0,3,0,4,4,4,3,3,3,0,4,2,0,3,4),
(1,4,0,4,0,3,0,1,0,3,3,3,1,1,3,3,2,2,3,3,1,0,3,2,2,1,2,0,3,1,2,1,2,0,3,2,0,2,2,3,3,4,3,0,3,3,1,2,0,1,1,3,1,2,0,0,3,0,1,1,0,3,2,2,3,3,0,3,0,0,0,2,3,3,4,3,3,0,1,0,0,1,4),
(0,4,0,4,0,4,0,0,0,3,4,4,3,1,4,2,3,2,3,3,3,1,4,3,4,0,3,0,4,2,3,3,2,2,5,4,2,1,3,4,3,4,3,1,3,3,4,2,0,2,1,0,3,3,0,0,2,0,3,1,0,4,4,3,4,3,0,4,0,1,0,2,4,4,4,4,4,0,3,2,0,3,3),
(0,0,0,1,0,4,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,3,2,0,0,1,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,2),
(0,2,0,3,0,4,0,4,0,1,3,3,3,0,4,0,2,1,2,1,1,1,2,0,3,1,1,0,1,0,3,1,0,0,3,3,2,0,1,1,0,0,0,0,0,1,0,2,0,2,2,0,3,1,0,0,1,0,1,1,0,1,2,0,3,0,0,0,0,1,0,0,3,3,4,3,1,0,1,0,3,0,2),
(0,0,0,3,0,5,0,0,0,0,1,0,2,0,3,1,0,1,3,0,0,0,2,0,0,0,1,0,0,0,1,1,0,0,4,0,0,0,2,3,0,1,4,1,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,1,0,0,0,0,0,0,0,2,0,0,3,0,0,0,0,0,3),
(0,2,0,5,0,5,0,1,0,2,4,3,3,2,5,1,3,2,3,3,3,0,4,1,2,0,3,0,4,0,2,2,1,1,5,3,0,0,1,4,2,3,2,0,3,3,3,2,0,2,4,1,1,2,0,1,1,0,3,1,0,1,3,1,2,3,0,2,0,0,0,1,3,5,4,4,4,0,3,0,0,1,3),
(0,4,0,5,0,4,0,4,0,4,5,4,3,3,4,3,3,3,4,3,4,4,5,3,4,5,4,2,4,2,3,4,3,1,4,4,1,3,5,4,4,5,5,4,4,5,5,5,2,3,3,1,4,3,1,3,3,0,3,3,1,4,3,4,4,4,0,3,0,4,0,3,3,4,4,5,0,0,4,3,0,4,5),
(0,4,0,4,0,3,0,3,0,3,4,4,4,3,3,2,4,3,4,3,4,3,5,3,4,3,2,1,4,2,4,4,3,1,3,4,2,4,5,5,3,4,5,4,1,5,4,3,0,3,2,2,3,2,1,3,1,0,3,3,3,5,3,3,3,5,4,4,2,3,3,4,3,3,3,2,1,0,3,2,1,4,3),
(0,4,0,5,0,4,0,3,0,3,5,5,3,2,4,3,4,0,5,4,4,1,4,4,4,3,3,3,4,3,5,5,2,3,3,4,1,2,5,5,3,5,5,2,3,5,5,4,0,3,2,0,3,3,1,1,5,1,4,1,0,4,3,2,3,5,0,4,0,3,0,5,4,3,4,3,0,0,4,1,0,4,4),
(1,3,0,4,0,2,0,2,0,2,5,5,3,3,3,3,3,0,4,2,3,4,4,4,3,4,0,0,3,4,5,4,3,3,3,3,2,5,5,4,5,5,5,4,3,5,5,5,1,3,1,0,1,0,0,3,2,0,4,2,0,5,2,3,2,4,1,3,0,3,0,4,5,4,5,4,3,0,4,2,0,5,4),
(0,3,0,4,0,5,0,3,0,3,4,4,3,2,3,2,3,3,3,3,3,2,4,3,3,2,2,0,3,3,3,3,3,1,3,3,3,0,4,4,3,4,4,1,1,4,4,2,0,3,1,0,1,1,0,4,1,0,2,3,1,3,3,1,3,4,0,3,0,1,0,3,1,3,0,0,1,0,2,0,0,4,4),
(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0),
(0,3,0,3,0,2,0,3,0,1,5,4,3,3,3,1,4,2,1,2,3,4,4,2,4,4,5,0,3,1,4,3,4,0,4,3,3,3,2,3,2,5,3,4,3,2,2,3,0,0,3,0,2,1,0,1,2,0,0,0,0,2,1,1,3,1,0,2,0,4,0,3,4,4,4,5,2,0,2,0,0,1,3),
(0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,1,1,0,0,1,1,0,0,0,4,2,1,1,0,1,0,3,2,0,0,3,1,1,1,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,1,0,0,0,2,0,0,0,1,4,0,4,2,1,0,0,0,0,0,1),
(0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,1,0,0,0,0,0,0,1,0,1,0,0,0,0,3,1,0,0,0,2,0,2,1,0,0,1,2,1,0,1,1,0,0,3,0,0,0,0,0,0,0,0,0,0,0,1,3,1,0,0,0,0,0,1,0,0,2,1,0,0,0,0,0,0,0,0,2),
(0,4,0,4,0,4,0,3,0,4,4,3,4,2,4,3,2,0,4,4,4,3,5,3,5,3,3,2,4,2,4,3,4,3,1,4,0,2,3,4,4,4,3,3,3,4,4,4,3,4,1,3,4,3,2,1,2,1,3,3,3,4,4,3,3,5,0,4,0,3,0,4,3,3,3,2,1,0,3,0,0,3,3),
(0,4,0,3,0,3,0,3,0,3,5,5,3,3,3,3,4,3,4,3,3,3,4,4,4,3,3,3,3,4,3,5,3,3,1,3,2,4,5,5,5,5,4,3,4,5,5,3,2,2,3,3,3,3,2,3,3,1,2,3,2,4,3,3,3,4,0,4,0,2,0,4,3,2,2,1,2,0,3,0,0,4,1),
)
class JapaneseContextAnalysis:
def __init__(self):
self.reset()
def reset(self):
self._mTotalRel = 0 # total sequence received
# category counters, each interger counts sequence in its category
self._mRelSample = [0] * NUM_OF_CATEGORY
# if last byte in current buffer is not the last byte of a character,
# we need to know how many bytes to skip in next buffer
self._mNeedToSkipCharNum = 0
self._mLastCharOrder = -1 # The order of previous char
# If this flag is set to True, detection is done and conclusion has
# been made
self._mDone = False
def feed(self, aBuf, aLen):
if self._mDone:
return
# The buffer we got is byte oriented, and a character may span in more than one
# buffers. In case the last one or two byte in last buffer is not
# complete, we record how many byte needed to complete that character
# and skip these bytes here. We can choose to record those bytes as
# well and analyse the character once it is complete, but since a
# character will not make much difference, by simply skipping
# this character will simply our logic and improve performance.
i = self._mNeedToSkipCharNum
while i < aLen:
order, charLen = self.get_order(aBuf[i:i + 2])
i += charLen
if i > aLen:
self._mNeedToSkipCharNum = i - aLen
self._mLastCharOrder = -1
else:
if (order != -1) and (self._mLastCharOrder != -1):
self._mTotalRel += 1
if self._mTotalRel > MAX_REL_THRESHOLD:
self._mDone = True
break
self._mRelSample[jp2CharContext[self._mLastCharOrder][order]] += 1
self._mLastCharOrder = order
def got_enough_data(self):
return self._mTotalRel > ENOUGH_REL_THRESHOLD
def get_confidence(self):
# This is just one way to calculate confidence. It works well for me.
if self._mTotalRel > MINIMUM_DATA_THRESHOLD:
return (self._mTotalRel - self._mRelSample[0]) / self._mTotalRel
else:
return DONT_KNOW
def get_order(self, aBuf):
return -1, 1
class SJISContextAnalysis(JapaneseContextAnalysis):
def get_order(self, aBuf):
if not aBuf:
return -1, 1
# find out current char's byte length
first_char = wrap_ord(aBuf[0])
if ((0x81 <= first_char <= 0x9F) or (0xE0 <= first_char <= 0xFC)):
charLen = 2
else:
charLen = 1
# return its order if it is hiragana
if len(aBuf) > 1:
second_char = wrap_ord(aBuf[1])
if (first_char == 202) and (0x9F <= second_char <= 0xF1):
return second_char - 0x9F, charLen
return -1, charLen
class EUCJPContextAnalysis(JapaneseContextAnalysis):
def get_order(self, aBuf):
if not aBuf:
return -1, 1
# find out current char's byte length
first_char = wrap_ord(aBuf[0])
if (first_char == 0x8E) or (0xA1 <= first_char <= 0xFE):
charLen = 2
elif first_char == 0x8F:
charLen = 3
else:
charLen = 1
# return its order if it is hiragana
if len(aBuf) > 1:
second_char = wrap_ord(aBuf[1])
if (first_char == 0xA4) and (0xA1 <= second_char <= 0xF3):
return second_char - 0xA1, charLen
return -1, charLen
# flake8: noqa
| mit |
caldwell/servo | tests/jquery/run_jquery.py | 215 | 9582 | #!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import os
import re
import subprocess
import sys
import BaseHTTPServer
import SimpleHTTPServer
import SocketServer
import threading
import urlparse
# List of jQuery modules that will be tested.
# TODO(gw): Disabled most of them as something has been
# introduced very recently that causes the resource task
# to panic - and hard fail doesn't exit the servo
# process when this happens.
# See https://github.com/servo/servo/issues/6210 and
# https://github.com/servo/servo/issues/6211
JQUERY_MODULES = [
# "ajax", # panics
# "attributes",
# "callbacks",
# "core", # mozjs crash
# "css",
# "data",
# "deferred",
# "dimensions",
# "effects",
# "event", # panics
# "manipulation", # mozjs crash
# "offset",
# "queue",
"selector",
# "serialize",
# "support",
# "traversing",
# "wrap"
]
# Port to run the HTTP server on for jQuery.
TEST_SERVER_PORT = 8192
# A regex for matching console.log output lines from the test runner.
REGEX_PATTERN = "^\[jQuery test\] \[([0-9]+)/([0-9]+)/([0-9]+)] (.*)"
# The result of a single test group.
class TestResult:
def __init__(self, success, fail, total, text):
self.success = int(success)
self.fail = int(fail)
self.total = int(total)
self.text = text
def __key(self):
return (self.success, self.fail, self.total, self.text)
def __eq__(self, other):
return self.__key() == other.__key()
def __ne__(self, other):
return self.__key() != other.__key()
def __hash__(self):
return hash(self.__key())
def __repr__(self):
return "ok={0} fail={1} total={2}".format(self.success, self.fail, self.total)
# Parse a line, producing a TestResult.
# Throws if unable to parse.
def parse_line_to_result(line):
match = re.match(REGEX_PATTERN, line)
success, fail, total, name = match.groups()
return name, TestResult(success, fail, total, line)
# Parse an entire buffer of lines to a dictionary
# of test results, keyed by the test name.
def parse_string_to_results(buffer):
test_results = {}
lines = buffer.splitlines()
for line in lines:
name, test_result = parse_line_to_result(line)
test_results[name] = test_result
return test_results
# Run servo and print / parse the results for a specific jQuery test module.
def run_servo(servo_exe, module):
url = "http://localhost:{0}/jquery/test/?module={1}".format(TEST_SERVER_PORT, module)
args = [servo_exe, url, "-z", "-f"]
proc = subprocess.Popen(args, stdout=subprocess.PIPE)
while True:
line = proc.stdout.readline()
if len(line) == 0:
break
line = line.rstrip()
try:
name, test_result = parse_line_to_result(line)
yield name, test_result
except AttributeError:
pass
# Build the filename for an expected results file.
def module_filename(module):
return 'expected_{0}.txt'.format(module)
# Read an existing set of expected results to compare against.
def read_existing_results(module):
with open(module_filename(module), 'r') as file:
buffer = file.read()
return parse_string_to_results(buffer)
# Write a set of results to file
def write_results(module, results):
with open(module_filename(module), 'w') as file:
for result in test_results.itervalues():
file.write(result.text + '\n')
# Print usage if command line args are incorrect
def print_usage():
print("USAGE: {0} test|update servo_binary jquery_base_dir".format(sys.argv[0]))
# Run a simple HTTP server to serve up the jQuery test suite
def run_http_server():
class ThreadingSimpleServer(SocketServer.ThreadingMixIn,
BaseHTTPServer.HTTPServer):
allow_reuse_address = True
class RequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
# TODO(gw): HACK copy the fixed version from python
# main repo - due to https://bugs.python.org/issue23112
def send_head(self):
path = self.translate_path(self.path)
f = None
if os.path.isdir(path):
parts = urlparse.urlsplit(self.path)
if not parts.path.endswith('/'):
# redirect browser - doing basically what apache does
self.send_response(301)
new_parts = (parts[0], parts[1], parts[2] + '/',
parts[3], parts[4])
new_url = urlparse.urlunsplit(new_parts)
self.send_header("Location", new_url)
self.end_headers()
return None
for index in "index.html", "index.htm":
index = os.path.join(path, index)
if os.path.exists(index):
path = index
break
else:
return self.list_directory(path)
ctype = self.guess_type(path)
try:
# Always read in binary mode. Opening files in text mode may cause
# newline translations, making the actual size of the content
# transmitted *less* than the content-length!
f = open(path, 'rb')
except IOError:
self.send_error(404, "File not found")
return None
try:
self.send_response(200)
self.send_header("Content-type", ctype)
fs = os.fstat(f.fileno())
self.send_header("Content-Length", str(fs[6]))
self.send_header("Last-Modified", self.date_time_string(fs.st_mtime))
self.end_headers()
return f
except:
f.close()
raise
def log_message(self, format, *args):
return
server = ThreadingSimpleServer(('', TEST_SERVER_PORT), RequestHandler)
while True:
sys.stdout.flush()
server.handle_request()
if __name__ == '__main__':
if len(sys.argv) == 4:
cmd = sys.argv[1]
servo_exe = sys.argv[2]
base_dir = sys.argv[3]
os.chdir(base_dir)
# Ensure servo binary can be found
if not os.path.isfile(servo_exe):
print("Unable to find {0}. This script expects an existing build of Servo.".format(servo_exe))
sys.exit(1)
# Start the test server
httpd_thread = threading.Thread(target=run_http_server)
httpd_thread.setDaemon(True)
httpd_thread.start()
if cmd == "test":
print("Testing jQuery on Servo!")
test_count = 0
unexpected_count = 0
individual_success = 0
individual_total = 0
# Test each module separately
for module in JQUERY_MODULES:
print("\t{0}".format(module))
prev_test_results = read_existing_results(module)
for name, current_result in run_servo(servo_exe, module):
test_count += 1
individual_success += current_result.success
individual_total += current_result.total
# If this test was in the previous results, compare them.
if name in prev_test_results:
prev_result = prev_test_results[name]
if prev_result == current_result:
print("\t\tOK: {0}".format(name))
else:
unexpected_count += 1
print("\t\tFAIL: {0}: WAS {1} NOW {2}".format(name, prev_result, current_result))
del prev_test_results[name]
else:
# There was a new test that wasn't expected
unexpected_count += 1
print("\t\tNEW: {0}".format(current_result.text))
# Check what's left over, these are tests that were expected but didn't run this time.
for name in prev_test_results:
test_count += 1
unexpected_count += 1
print("\t\tMISSING: {0}".format(prev_test_results[name].text))
print("\tRan {0} test groups. {1} unexpected results.".format(test_count, unexpected_count))
print("\t{0} tests succeeded of {1} ({2:.2f}%)".format(individual_success,
individual_total,
100.0 * individual_success / individual_total))
if unexpected_count > 0:
sys.exit(1)
elif cmd == "update":
print("Updating jQuery expected results")
for module in JQUERY_MODULES:
print("\t{0}".format(module))
test_results = {}
for name, test_result in run_servo(servo_exe, module):
print("\t\t{0} {1}".format(name, test_result))
test_results[name] = test_result
write_results(module, test_results)
else:
print_usage()
else:
print_usage()
| mpl-2.0 |
repotvsupertuga/repo | plugin.video.SportsDevil/service/oscrypto/errors.py | 7 | 1657 | # coding: utf-8
from __future__ import unicode_literals, division, absolute_import, print_function
import sys
import socket
__all__ = [
'AsymmetricKeyError',
'CACertsError',
'LibraryNotFoundError',
'SignatureError',
'TLSError',
'TLSVerificationError',
]
class LibraryNotFoundError(Exception):
"""
An exception when trying to find a shared library
"""
pass
class SignatureError(Exception):
"""
An exception when validating a signature
"""
pass
class AsymmetricKeyError(Exception):
"""
An exception when a key is invalid or unsupported
"""
pass
class IncompleteAsymmetricKeyError(AsymmetricKeyError):
"""
An exception when a key is missing necessary information
"""
pass
class CACertsError(Exception):
"""
An exception when exporting CA certs from the OS trust store
"""
pass
class TLSError(socket.error):
"""
An exception related to TLS functionality
"""
message = None
def __init__(self, message):
self.args = (message,)
self.message = message
def __str__(self):
output = self.__unicode__()
if sys.version_info < (3,):
output = output.encode('utf-8')
return output
def __unicode__(self):
return self.message
class TLSVerificationError(TLSError):
"""
A server certificate verification error happened during a TLS handshake
"""
certificate = None
def __init__(self, message, certificate):
TLSError.__init__(self, message)
self.certificate = certificate
self.args = (message, certificate)
| gpl-2.0 |
djcatter/diaphora | pygments/style.py | 13 | 3779 | # -*- coding: utf-8 -*-
"""
pygments.style
~~~~~~~~~~~~~~
Basic style object.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.token import Token, STANDARD_TYPES
from pygments.util import add_metaclass
class StyleMeta(type):
def __new__(mcs, name, bases, dct):
obj = type.__new__(mcs, name, bases, dct)
for token in STANDARD_TYPES:
if token not in obj.styles:
obj.styles[token] = ''
def colorformat(text):
if text[0:1] == '#':
col = text[1:]
if len(col) == 6:
return col
elif len(col) == 3:
return col[0]*2 + col[1]*2 + col[2]*2
elif text == '':
return ''
assert False, "wrong color format %r" % text
_styles = obj._styles = {}
for ttype in obj.styles:
for token in ttype.split():
if token in _styles:
continue
ndef = _styles.get(token.parent, None)
styledefs = obj.styles.get(token, '').split()
if not ndef or token is None:
ndef = ['', 0, 0, 0, '', '', 0, 0, 0]
elif 'noinherit' in styledefs and token is not Token:
ndef = _styles[Token][:]
else:
ndef = ndef[:]
_styles[token] = ndef
for styledef in obj.styles.get(token, '').split():
if styledef == 'noinherit':
pass
elif styledef == 'bold':
ndef[1] = 1
elif styledef == 'nobold':
ndef[1] = 0
elif styledef == 'italic':
ndef[2] = 1
elif styledef == 'noitalic':
ndef[2] = 0
elif styledef == 'underline':
ndef[3] = 1
elif styledef == 'nounderline':
ndef[3] = 0
elif styledef[:3] == 'bg:':
ndef[4] = colorformat(styledef[3:])
elif styledef[:7] == 'border:':
ndef[5] = colorformat(styledef[7:])
elif styledef == 'roman':
ndef[6] = 1
elif styledef == 'sans':
ndef[7] = 1
elif styledef == 'mono':
ndef[8] = 1
else:
ndef[0] = colorformat(styledef)
return obj
def style_for_token(cls, token):
t = cls._styles[token]
return {
'color': t[0] or None,
'bold': bool(t[1]),
'italic': bool(t[2]),
'underline': bool(t[3]),
'bgcolor': t[4] or None,
'border': t[5] or None,
'roman': bool(t[6]) or None,
'sans': bool(t[7]) or None,
'mono': bool(t[8]) or None,
}
def list_styles(cls):
return list(cls)
def styles_token(cls, ttype):
return ttype in cls._styles
def __iter__(cls):
for token in cls._styles:
yield token, cls.style_for_token(token)
def __len__(cls):
return len(cls._styles)
@add_metaclass(StyleMeta)
class Style(object):
#: overall background color (``None`` means transparent)
background_color = '#ffffff'
#: highlight background color
highlight_color = '#ffffcc'
#: Style definitions for individual token types.
styles = {}
| gpl-2.0 |
deKupini/erp | addons/website_event_sale/models/sale_order.py | 1 | 4845 | # -*- coding: utf-8 -*-
from openerp import SUPERUSER_ID
from openerp.osv import osv
from openerp.tools.translate import _
from openerp.exceptions import UserError
class sale_order(osv.Model):
_inherit = "sale.order"
def _cart_find_product_line(self, cr, uid, ids, product_id=None, line_id=None, context=None, **kwargs):
line_ids = super(sale_order, self)._cart_find_product_line(cr, uid, ids, product_id, line_id, context=context)
if line_id:
return line_ids
for so in self.browse(cr, uid, ids, context=context):
domain = [('id', 'in', line_ids)]
if context.get("event_ticket_id"):
domain += [('event_ticket_id', '=', context.get("event_ticket_id"))]
return self.pool.get('sale.order.line').search(cr, SUPERUSER_ID, domain, context=context)
def _website_product_id_change(self, cr, uid, ids, order_id, product_id, qty=0, line_id=None, context=None):
values = super(sale_order, self)._website_product_id_change(cr, uid, ids, order_id, product_id, qty=qty, line_id=line_id, context=None)
event_ticket_id = None
if context.get("event_ticket_id"):
event_ticket_id = context.get("event_ticket_id")
elif line_id:
line = self.pool.get('sale.order.line').browse(cr, SUPERUSER_ID, line_id, context=context)
if line.event_ticket_id:
event_ticket_id = line.event_ticket_id.id
else:
product = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
if product.event_ticket_ids:
event_ticket_id = product.event_ticket_ids[0].id
if event_ticket_id:
ticket = self.pool.get('event.event.ticket').browse(cr, uid, event_ticket_id, context=context)
if product_id != ticket.product_id.id:
raise UserError(_("The ticket doesn't match with this product."))
values['product_id'] = ticket.product_id.id
values['event_id'] = ticket.event_id.id
values['event_ticket_id'] = ticket.id
values['price_unit'] = ticket.price
values['name'] = "%s\n%s" % (ticket.event_id.display_name, ticket.name)
return values
def _cart_update(self, cr, uid, ids, product_id=None, line_id=None, add_qty=0, set_qty=0, context=None, **kwargs):
OrderLine = self.pool['sale.order.line']
Attendee = self.pool['event.registration']
Ticket = self.pool['event.event.ticket']
if line_id:
line = OrderLine.browse(cr, uid, line_id, context=context)
ticket = line.event_ticket_id
old_qty = int(line.product_uom_qty)
else:
line, ticket = None, None
ticket_ids = Ticket.search(cr, uid, [('product_id', '=', product_id)], limit=1, context=context)
if ticket_ids:
ticket = Ticket.browse(cr, uid, ticket_ids[0], context=context)
old_qty = 0
new_qty = set_qty if set_qty else (add_qty or 0 + old_qty)
# case: buying tickets for a sold out ticket
values = {}
if ticket and ticket.seats_available <= 0:
values['warning'] = _('Sorry, The %(ticket)s tickets for the %(event)s event are sold out.') % {
'ticket': ticket.name,
'event': ticket.event_id.name}
new_qty, set_qty, add_qty = 0, 0, 0
# case: buying tickets, too much attendees
elif ticket and new_qty > ticket.seats_available:
values['warning'] = _('Sorry, only %(remaining_seats)d seats are still available for the %(ticket)s ticket for the %(event)s event.') % {
'remaining_seats': ticket.seats_available,
'ticket': ticket.name,
'event': ticket.event_id.name}
new_qty, set_qty, add_qty = ticket.seats_available, ticket.seats_available, 0
values.update(super(sale_order, self)._cart_update(
cr, uid, ids, product_id, line_id, add_qty, set_qty, context, **kwargs))
# removing attendees
if ticket and new_qty < old_qty:
attendees = Attendee.search(
cr, uid, [
('state', '!=', 'cancel'),
('sale_order_id', '=', ids[0])
('event_ticket_id', '=', ticket.id)
], offset=new_qty, limit=(old_qty-new_qty),
order='create_date asc', context=context)
Attendee.button_reg_cancel(cr, uid, attendees, context=context)
# adding attendees
elif ticket and new_qty > old_qty:
line = OrderLine.browse(cr, uid, values['line_id'], context=context)
line._update_registrations(confirm=False, registration_data=kwargs.get('registration_data', []))
return values
| agpl-3.0 |
yarikoptic/pystatsmodels | statsmodels/tsa/tests/test_stattools.py | 3 | 7864 | from statsmodels.tsa.stattools import (adfuller, acf, pacf_ols, pacf_yw,
pacf, grangercausalitytests,
coint, acovf)
from statsmodels.tsa.base.datetools import dates_from_range
import numpy as np
from numpy.testing import assert_almost_equal, assert_equal, assert_raises
from numpy import genfromtxt#, concatenate
from statsmodels.datasets import macrodata, sunspots
from pandas import Series, Index
import os
DECIMAL_8 = 8
DECIMAL_6 = 6
DECIMAL_5 = 5
DECIMAL_4 = 4
DECIMAL_3 = 3
DECIMAL_2 = 2
DECIMAL_1 = 1
class CheckADF(object):
"""
Test Augmented Dickey-Fuller
Test values taken from Stata.
"""
levels = ['1%', '5%', '10%']
data = macrodata.load()
x = data.data['realgdp']
y = data.data['infl']
def test_teststat(self):
assert_almost_equal(self.res1[0], self.teststat, DECIMAL_5)
def test_pvalue(self):
assert_almost_equal(self.res1[1], self.pvalue, DECIMAL_5)
def test_critvalues(self):
critvalues = [self.res1[4][lev] for lev in self.levels]
assert_almost_equal(critvalues, self.critvalues, DECIMAL_2)
class TestADFConstant(CheckADF):
"""
Dickey-Fuller test for unit root
"""
def __init__(self):
self.res1 = adfuller(self.x, regression="c", autolag=None,
maxlag=4)
self.teststat = .97505319
self.pvalue = .99399563
self.critvalues = [-3.476, -2.883, -2.573]
class TestADFConstantTrend(CheckADF):
"""
"""
def __init__(self):
self.res1 = adfuller(self.x, regression="ct", autolag=None,
maxlag=4)
self.teststat = -1.8566374
self.pvalue = .67682968
self.critvalues = [-4.007, -3.437, -3.137]
#class TestADFConstantTrendSquared(CheckADF):
# """
# """
# pass
#TODO: get test values from R?
class TestADFNoConstant(CheckADF):
"""
"""
def __init__(self):
self.res1 = adfuller(self.x, regression="nc", autolag=None,
maxlag=4)
self.teststat = 3.5227498
self.pvalue = .99999 # Stata does not return a p-value for noconstant.
# Tau^max in MacKinnon (1994) is missing, so it is
# assumed that its right-tail is well-behaved
self.critvalues = [-2.587, -1.950, -1.617]
# No Unit Root
class TestADFConstant2(CheckADF):
def __init__(self):
self.res1 = adfuller(self.y, regression="c", autolag=None,
maxlag=1)
self.teststat = -4.3346988
self.pvalue = .00038661
self.critvalues = [-3.476, -2.883, -2.573]
class TestADFConstantTrend2(CheckADF):
def __init__(self):
self.res1 = adfuller(self.y, regression="ct", autolag=None,
maxlag=1)
self.teststat = -4.425093
self.pvalue = .00199633
self.critvalues = [-4.006, -3.437, -3.137]
class TestADFNoConstant2(CheckADF):
def __init__(self):
self.res1 = adfuller(self.y, regression="nc", autolag=None,
maxlag=1)
self.teststat = -2.4511596
self.pvalue = 0.013747 # Stata does not return a p-value for noconstant
# this value is just taken from our results
self.critvalues = [-2.587,-1.950,-1.617]
class CheckCorrGram(object):
"""
Set up for ACF, PACF tests.
"""
data = macrodata.load()
x = data.data['realgdp']
filename = os.path.dirname(os.path.abspath(__file__))+\
"/results/results_corrgram.csv"
results = genfromtxt(open(filename, "rb"), delimiter=",", names=True,dtype=float)
#not needed: add 1. for lag zero
#self.results['acvar'] = np.concatenate(([1.], self.results['acvar']))
class TestACF(CheckCorrGram):
"""
Test Autocorrelation Function
"""
def __init__(self):
self.acf = self.results['acvar']
#self.acf = np.concatenate(([1.], self.acf))
self.qstat = self.results['Q1']
self.res1 = acf(self.x, nlags=40, qstat=True, alpha=.05)
self.confint_res = self.results[['acvar_lb','acvar_ub']].view((float,
2))
def test_acf(self):
assert_almost_equal(self.res1[0][1:41], self.acf, DECIMAL_8)
def test_confint(self):
centered = self.res1[1] - self.res1[1].mean(1)[:,None]
assert_almost_equal(centered[1:41], self.confint_res, DECIMAL_8)
def test_qstat(self):
assert_almost_equal(self.res1[2][:40], self.qstat, DECIMAL_3)
# 3 decimal places because of stata rounding
# def pvalue(self):
# pass
#NOTE: shouldn't need testing if Q stat is correct
class TestACF_FFT(CheckCorrGram):
"""
Test Autocorrelation Function using FFT
"""
def __init__(self):
self.acf = self.results['acvarfft']
self.qstat = self.results['Q1']
self.res1 = acf(self.x, nlags=40, qstat=True, fft=True)
def test_acf(self):
assert_almost_equal(self.res1[0][1:], self.acf, DECIMAL_8)
def test_qstat(self):
#todo why is res1/qstat 1 short
assert_almost_equal(self.res1[1], self.qstat, DECIMAL_3)
class TestPACF(CheckCorrGram):
def __init__(self):
self.pacfols = self.results['PACOLS']
self.pacfyw = self.results['PACYW']
def test_ols(self):
pacfols, confint = pacf(self.x, nlags=40, alpha=.05, method="ols")
assert_almost_equal(pacfols[1:], self.pacfols, DECIMAL_6)
centered = confint - confint.mean(1)[:,None]
# from edited Stata ado file
res = [[-.1375625, .1375625]] * 40
assert_almost_equal(centered[1:41], res, DECIMAL_6)
def test_yw(self):
pacfyw = pacf_yw(self.x, nlags=40, method="mle")
assert_almost_equal(pacfyw[1:], self.pacfyw, DECIMAL_8)
def test_ld(self):
pacfyw = pacf_yw(self.x, nlags=40, method="mle")
pacfld = pacf(self.x, nlags=40, method="ldb")
assert_almost_equal(pacfyw, pacfld, DECIMAL_8)
pacfyw = pacf(self.x, nlags=40, method="yw")
pacfld = pacf(self.x, nlags=40, method="ldu")
assert_almost_equal(pacfyw, pacfld, DECIMAL_8)
class CheckCoint(object):
"""
Test Cointegration Test Results for 2-variable system
Test values taken from Stata
"""
levels = ['1%', '5%', '10%']
data = macrodata.load()
y1 = data.data['realcons']
y2 = data.data['realgdp']
def test_tstat(self):
assert_almost_equal(self.coint_t,self.teststat, DECIMAL_4)
class TestCoint_t(CheckCoint):
"""
Get AR(1) parameter on residuals
"""
def __init__(self):
self.coint_t = coint(self.y1, self.y2, regression ="c")[0]
self.teststat = -1.8208817
def test_grangercausality():
# some example data
mdata = macrodata.load().data
mdata = mdata[['realgdp','realcons']]
data = mdata.view((float,2))
data = np.diff(np.log(data), axis=0)
#R: lmtest:grangertest
r_result = [0.243097, 0.7844328, 195, 2] #f_test
gr = grangercausalitytests(data[:,1::-1], 2, verbose=False)
assert_almost_equal(r_result, gr[2][0]['ssr_ftest'], decimal=7)
assert_almost_equal(gr[2][0]['params_ftest'], gr[2][0]['ssr_ftest'],
decimal=7)
def test_pandasacovf():
s = Series(range(1, 11))
assert_almost_equal(acovf(s), acovf(s.values))
def test_acovf2d():
dta = sunspots.load_pandas().data
dta.index = Index(dates_from_range('1700', '2008'))
del dta["YEAR"]
res = acovf(dta)
assert_equal(res, acovf(dta.values))
X = np.random.random((10,2))
assert_raises(ValueError, acovf, X)
if __name__=="__main__":
import nose
# nose.runmodule(argv=[__file__, '-vvs','-x','-pdb'], exit=False)
import numpy as np
np.testing.run_module_suite()
| bsd-3-clause |
danielhers/dynet | examples/variational-autoencoder/basic-image-recon/vae.py | 5 | 6690 | from __future__ import print_function
from utils import load_mnist, make_grid, pre_pillow_float_img_process, save_image
import numpy as np
import argparse
import dynet as dy
import os
if not os.path.exists('results'):
os.makedirs('results')
parser = argparse.ArgumentParser(description='VAE MNIST Example')
parser.add_argument('--batch-size', type=int, default=128, metavar='N',
help='input batch size for training (default: 128)')
parser.add_argument('--epochs', type=int, default=10, metavar='N',
help='number of epochs to train (default: 10)')
parser.add_argument('--dynet-gpu', action='store_true', default=False,
help='enables DyNet CUDA training')
parser.add_argument('--dynet-gpus', type=int, default=1, metavar='N',
help='number of gpu devices to use')
parser.add_argument('--dynet-seed', type=int, default=None, metavar='N',
help='random seed (default: random inside DyNet)')
parser.add_argument('--dynet-mem', type=int, default=None, metavar='N',
help='allocating memory (default: default of DyNet 512MB)')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='how many batches to wait before logging training status')
args = parser.parse_args()
train_data = load_mnist('training', './data')
batch_size = args.batch_size
test_data = load_mnist('testing', './data')
def generate_batch_loader(data, batch_size):
i = 0
n = len(data)
while i + batch_size <= n:
yield np.asarray(data[i:i+batch_size])
i += batch_size
# if i < n:
# pass # last short batch ignored
# # yield data[i:]
class DynetLinear:
def __init__(self, dim_in, dim_out, dyParameterCollection):
assert(isinstance(dyParameterCollection, dy.ParameterCollection))
self.dim_in = dim_in
self.dim_out = dim_out
self.pW = dyParameterCollection.add_parameters((dim_out, dim_in))
self.pb = dyParameterCollection.add_parameters((dim_out))
def __call__(self, x):
assert(isinstance(x, dy.Expression))
self.W = dy.parameter(self.pW) # add parameters to graph as expressions # m2.add_parameters((8, len(inputs)))
self.b = dy.parameter(self.pb)
self.x = x
return self.W * self.x + self.b
pc = dy.ParameterCollection()
class VAE:
def __init__(self, dyParameterCollection):
assert (isinstance(dyParameterCollection, dy.ParameterCollection))
self.fc1 = DynetLinear(784, 400, dyParameterCollection)
self.fc21 = DynetLinear(400, 20, dyParameterCollection)
self.fc22 = DynetLinear(400, 20, dyParameterCollection)
self.fc3 = DynetLinear(20, 400, dyParameterCollection)
self.fc4 = DynetLinear(400, 784, dyParameterCollection)
self.relu = dy.rectify
self.sigmoid = dy.logistic
self.training = False
def encode(self, x):
h1 = self.relu(self.fc1(x))
return self.fc21(h1), self.fc22(h1)
def reparameterize(self, mu, logvar):
if self.training:
std = dy.exp(logvar * 0.5)
eps = dy.random_normal(dim=std.dim()[0], mean=0.0, stddev=1.0)
return dy.cmult(eps, std) + mu
else:
return mu
def decode(self, z):
h3 = self.relu(self.fc3(z))
return self.sigmoid(self.fc4(h3))
def forward(self, x):
assert(isinstance(x, dy.Expression))
mu, logvar = self.encode(x)
z = self.reparameterize(mu, logvar)
return self.decode(z), mu, logvar
model = VAE(pc)
optimizer = dy.AdamTrainer(pc, alpha=1e-3) # alpha: initial learning rate
# # Reconstruction + KL divergence losses summed over all elements and batch
def loss_function(recon_x, x, mu, logvar):
BCE = dy.binary_log_loss(recon_x, x) # equiv to torch.nn.functional.binary_cross_entropy(?,?, size_average=False)
# see Appendix B from VAE paper:
# Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014
# https://arxiv.org/abs/1312.6114
# 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)
KLD = -0.5 * dy.sum_elems(1 + logvar - dy.pow(mu, dy.scalarInput(2)) - dy.exp(logvar))
return BCE + KLD
def train(epoch):
model.training = True
train_loss = 0
train_loader = generate_batch_loader(train_data, batch_size=batch_size)
for batch_idx, data in enumerate(train_loader):
# Dymanic Construction of Graph
dy.renew_cg()
x = dy.inputTensor(data.reshape(-1, 784).T)
recon_x, mu, logvar = model.forward(x)
loss = loss_function(recon_x, x, mu, logvar)
# Forward
loss_value = loss.value()
train_loss += loss_value
# Backward
loss.backward()
optimizer.update()
if batch_idx % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_data),
100. * batch_idx / (len(train_data) / batch_size),
loss_value / len(data)))
print('====> Epoch: {} Average loss: {:.4f}'.format(
epoch, train_loss / len(train_data)))
def test(epoch):
model.training = False
test_loss = 0
test_loader = generate_batch_loader(test_data, batch_size=batch_size)
for i, data in enumerate(test_loader):
# Dymanic Construction of Graph
dy.renew_cg()
x = dy.inputTensor(data.reshape(-1, 784).T)
recon_x, mu, logvar = model.forward(x)
loss = loss_function(recon_x, x, mu, logvar)
# Forward
loss_value = loss.value()
test_loss += loss_value
if i == 0:
n = min(data.shape[0], 8)
comparison = np.concatenate([data[:n],
recon_x.npvalue().T.reshape(batch_size, 1, 28, 28)[:n]])
save_image(comparison,
'results/reconstruction_' + str(epoch) + '.png', nrow=n)
test_loss /= len(test_data)
print('====> Test set loss: {:.4f}'.format(test_loss))
import time
tictocs = []
for epoch in range(1, args.epochs + 1):
tic = time.time()
train(epoch)
test(epoch)
sample = dy.inputTensor(np.random.randn(20, 64))
sample = model.decode(sample)
save_image(sample.npvalue().T.reshape(64, 1, 28, 28),
'results/sample_' + str(epoch) + '.png')
toc = time.time()
tictocs.append(toc - tic)
print('############\n\n')
print('Total Time Cost:', np.sum(tictocs))
print('Epoch Time Cost', np.average(tictocs), '+-', np.std(tictocs) / np.sqrt(len(tictocs)))
print('\n\n############')
| apache-2.0 |
stinebuu/nest-simulator | extras/help_generator/generate_help.py | 16 | 5251 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# generate_help.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Generate NEST help files
========================
Scan all source files for documentation and build the help files.
The helpindex is built during installation in a separate step.
"""
import os
import html
import io
import re
import sys
import textwrap
from writers import coll_data
from helpers import check_ifdef, create_helpdirs, cut_it
from helpers import delete_helpdir
from helpers import help_generation_required
if len(sys.argv) != 3:
print("Usage: python3 generate_help.py <source_dir> <build_dir>")
sys.exit(1)
source_dir, build_dir = sys.argv[1:]
helpdir = os.path.join(build_dir, "doc", "help")
delete_helpdir(helpdir)
if not help_generation_required():
sys.exit(0)
create_helpdirs(helpdir)
allfiles = []
for dirpath, dirnames, files in os.walk(source_dir):
for f in files:
if f.endswith((".sli", ".cpp", ".cc", ".h", ".py")) and \
not f.startswith(".#"):
allfiles.append(os.path.join(dirpath, f))
num = 0
full_list = []
sli_command_list = []
cc_command_list = []
index_dic_list = []
# which keywords to ignore: can put e.g. doxygen commands here
keywords_ignore = ["@ingroup"]
keywords = ["Name:", "Synopsis:", "Examples:", "Description:", "Parameters:",
"Options:", "Requires:", "Require:", "Receives:", "Transmits:",
"Sends:", "Variants:", "Bugs:", "Diagnostics:", "Remarks:",
"Availability:", "References:", "SeeAlso:", "Author:", "Authors:",
"FirstVersion:", "Source:"]
# Now begin to collect the data for the help files and start generating.
dcs = r'\/\**\s*@BeginDocumentation[\s?]*\:?[\s?]*[.?]*\n(.*?)\n*?\*\/'
# compile the sli command list
for fname in allfiles:
if fname.endswith('.sli'):
f = io.open(fname, encoding='utf-8')
filetext = f.read()
f.close()
items = re.findall(dcs, filetext, re.DOTALL)
for item in items:
for line in item.splitlines():
name_line = re.findall(r"([\s*]?Name[\s*]?\:)(.*)", line)
if name_line:
# Clean the Name: line!
name_line_0 = name_line[0][0].strip()
name_line_1 = name_line[0][1].strip()
sliname = cut_it(' - ', name_line_1)[0]
sli_command_list.append(sliname)
dcs = r'\/\*[(\*|\s)?]*[\n?]*@BeginDocumentation' \
r'[\s?]*\:?[\s?]*[.?]*\n(.*?)\n*?\*\/'
for fname in allfiles:
# .py is for future use
if not fname.endswith('.py'):
f = io.open(fname, encoding='utf-8')
filetext = f.read()
f.close()
# Multiline matching to find codeblock
items = re.findall(dcs, filetext, re.DOTALL)
for item in items:
# remove paragraph if this keyword is to be ignored
for kw in keywords_ignore:
item = re.sub(r"(" + kw + ".+?\n\n|" + kw + ".+?$)", "", item,
flags=re.DOTALL)
# Check the ifdef in code
require = check_ifdef(item, filetext, dcs)
if require:
item = '\n\nRequire: ' + require + item
alllines = []
s = " ######\n"
for line in item.splitlines():
name_line = re.findall(r"([\s*]?Name[\s*]?\:)(.*)", line)
if name_line:
# Clean the Name: line
name_line_0 = name_line[0][0].strip()
name_line_1 = name_line[0][1].strip()
line = name_line_0 + ' ' + name_line_1
line = textwrap.dedent(line).strip()
# Tricks for the blanks
line = html.escape(line)
line = re.sub('^(\s)*- ', ' • ', line)
line = re.sub('^(\s)*@note', ' • ', line)
alllines.append(line)
item = '\n'.join(alllines)
num += 1
documentation = {}
split_items = re.split("(^|\n)(" + "|".join(keywords) + ")", item)
keyword_curr = ""
for i, token in enumerate(split_items):
if token in keywords:
keyword_curr = token
documentation[keyword_curr] = ""
else:
if keyword_curr in documentation:
documentation[keyword_curr] += " " + token
all_data = coll_data(keywords, documentation, num, helpdir, fname,
sli_command_list)
| gpl-2.0 |
abazad/SizotrixKernel | scripts/gcc-wrapper.py | 1276 | 3382 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of The Linux Foundation nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Invoke gcc, looking for warnings, and causing a failure if there are
# non-whitelisted warnings.
import errno
import re
import os
import sys
import subprocess
# Note that gcc uses unicode, which may depend on the locale. TODO:
# force LANG to be set to en_US.UTF-8 to get consistent warnings.
allowed_warnings = set([
"return_address.c:62",
])
# Capture the name of the object file, can find it.
ofile = None
warning_re = re.compile(r'''(.*/|)([^/]+\.[a-z]+:\d+):(\d+:)? warning:''')
def interpret_warning(line):
"""Decode the message from gcc. The messages we care about have a filename, and a warning"""
line = line.rstrip('\n')
m = warning_re.match(line)
if m and m.group(2) not in allowed_warnings:
print "error, forbidden warning:", m.group(2)
# If there is a warning, remove any object if it exists.
if ofile:
try:
os.remove(ofile)
except OSError:
pass
sys.exit(1)
def run_gcc():
args = sys.argv[1:]
# Look for -o
try:
i = args.index('-o')
global ofile
ofile = args[i+1]
except (ValueError, IndexError):
pass
compiler = sys.argv[0]
try:
proc = subprocess.Popen(args, stderr=subprocess.PIPE)
for line in proc.stderr:
print line,
interpret_warning(line)
result = proc.wait()
except OSError as e:
result = e.errno
if result == errno.ENOENT:
print args[0] + ':',e.strerror
print 'Is your PATH set correctly?'
else:
print ' '.join(args), str(e)
return result
if __name__ == '__main__':
status = run_gcc()
sys.exit(status)
| gpl-2.0 |
wiki-ai/revscoring | revscoring/features/wikitext/features/tokenized.py | 1 | 22071 | from revscoring.datasources.meta import dicts, filters, mappers
from ...meta import aggregators
from . import base
class Revision(base.BaseRevision):
def __init__(self, name, revision_datasources, tokens_datasource=None):
super().__init__(name, revision_datasources)
if tokens_datasource is None:
self.cjk = Revision(self._name + ".cjk", revision_datasources.cjk, tokens_datasource='CJK')
"`int` : Features in the revision after the CJK tokenization"
self.tokens = aggregators.len(self.datasources.tokens)
"`int` : The number of tokens in the revision"
self.numbers = aggregators.len(self.datasources.numbers)
"`int` : The number of number tokens in the revision"
self.whitespaces = aggregators.len(self.datasources.whitespaces)
"`int` : The number of whitespace tokens in the revision"
self.markups = aggregators.len(self.datasources.markups)
"`int` : The number of markup tokens in the revision"
self.cjks = aggregators.len(self.datasources.cjks)
"`int` : The number of Chinese/Japanese/Korean tokens in the revision"
self.entities = aggregators.len(self.datasources.entities)
"`int` : The number of HTML entity tokens in the revision"
self.urls = aggregators.len(self.datasources.urls)
"`int` : The number of URL tokens in the revision"
self.words = aggregators.len(self.datasources.words)
"`int` : The number of word tokens in the revision"
self.uppercase_words = \
aggregators.len(self.datasources.uppercase_words)
"`int` : The number of UPPERCASE word tokens in the revision"
self.punctuations = aggregators.len(self.datasources.punctuations)
"`int` : The number of punctuation tokens in the revision"
self.breaks = aggregators.len(self.datasources.breaks)
"`int` : The number of break tokens in the revision"
self.longest_token = aggregators.max(
mappers.map(len, self.datasources.tokens), returns=int)
"`int` : The longest single token in the revision"
self.longest_word = aggregators.max(
mappers.map(len, self.datasources.words), returns=int)
"`int` : The longest single word-token in the revision"
class Diff(base.BaseDiff):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.token_delta_sum = aggregators.sum(
dicts.values(self.datasources.token_delta),
name=self._name + ".token_delta_sum"
)
"`int` : The sum of delta changes in the token frequency table"
self.token_delta_increase = aggregators.sum(
filters.positive(dicts.values(self.datasources.token_delta)),
name=self._name + ".token_delta_increase"
)
"`int` : The sum of delta increases in the token frequency table"
self.token_delta_decrease = aggregators.sum(
filters.negative(dicts.values(self.datasources.token_delta)),
name=self._name + ".token_delta_decrease"
)
"`int` : The sum of delta decreases in the token frequency table"
self.token_prop_delta_sum = aggregators.sum(
dicts.values(self.datasources.token_prop_delta),
name=self._name + ".token_prop_delta_sum"
)
"""
`int` : The sum of proportional delta changes in the token
frequency table
"""
self.token_prop_delta_increase = aggregators.sum(
filters.positive(dicts.values(self.datasources.token_prop_delta)),
name=self._name + ".token_prop_delta_increase"
)
"""
`int` : The sum of proportional delta increases in the token
frequency table
"""
self.token_prop_delta_decrease = aggregators.sum(
filters.negative(dicts.values(self.datasources.token_prop_delta)),
name=self._name + ".token_prop_delta_decrease"
)
"""
`int` : The sum of proportional delta decreases in the token
frequency table
"""
# number
self.number_delta_sum = aggregators.sum(
dicts.values(self.datasources.number_delta),
name=self._name + ".number_delta_sum"
)
"`int` : The sum of delta changes in the number frequency table"
self.number_delta_increase = aggregators.sum(
filters.positive(dicts.values(self.datasources.number_delta)),
name=self._name + ".number_delta_increase"
)
"`int` : The sum of delta increases in the number frequency table"
self.number_delta_decrease = aggregators.sum(
filters.negative(dicts.values(self.datasources.number_delta)),
name=self._name + ".number_delta_decrease"
)
"`int` : The sum of delta decreases in the number frequency table"
self.number_prop_delta_sum = aggregators.sum(
dicts.values(self.datasources.number_prop_delta),
name=self._name + ".number_prop_delta_sum"
)
"""
`int` : The sum of proportional delta changes in the number
frequency table
"""
self.number_prop_delta_increase = aggregators.sum(
filters.positive(dicts.values(self.datasources.number_prop_delta)),
name=self._name + ".number_prop_delta_increase"
)
"""
`int` : The sum of proportional delta increases in the number
frequency table
"""
self.number_prop_delta_decrease = aggregators.sum(
filters.negative(dicts.values(self.datasources.number_prop_delta)),
name=self._name + ".number_prop_delta_decrease"
)
"""
`int` : The sum of proportional delta decreases in the number
frequency table
"""
# whitespace
self.whitespace_delta_sum = aggregators.sum(
dicts.values(self.datasources.whitespace_delta),
name=self._name + ".whitespace_delta_sum"
)
"`int` : The sum of delta changes in the whitespace frequency table"
self.whitespace_delta_increase = aggregators.sum(
filters.positive(dicts.values(self.datasources.whitespace_delta)),
name=self._name + ".whitespace_delta_increase"
)
"`int` : The sum of delta increases in the whitespace frequency table"
self.whitespace_delta_decrease = aggregators.sum(
filters.negative(dicts.values(self.datasources.whitespace_delta)),
name=self._name + ".whitespace_delta_decrease"
)
"`int` : The sum of delta decreases in the whitespace frequency table"
self.whitespace_prop_delta_sum = aggregators.sum(
dicts.values(self.datasources.whitespace_prop_delta),
name=self._name + ".whitespace_prop_delta_sum"
)
"""
`int` : The sum of proportional delta changes in the whitespace
frequency table
"""
self.whitespace_prop_delta_increase = aggregators.sum(
filters.positive(dicts.values(
self.datasources.whitespace_prop_delta)),
name=self._name + ".whitespace_prop_delta_increase"
)
"""
`int` : The sum of proportional delta increases in the whitespace
frequency table
"""
self.whitespace_prop_delta_decrease = aggregators.sum(
filters.negative(dicts.values(
self.datasources.whitespace_prop_delta)),
name=self._name + ".whitespace_prop_delta_decrease"
)
"""
`int` : The sum of proportional delta decreases in the whitespace
frequency table
"""
# markup
self.markup_delta_sum = aggregators.sum(
dicts.values(self.datasources.markup_delta),
name=self._name + ".markup_delta_sum"
)
"`int` : The sum of delta changes in the markup frequency table"
self.markup_delta_increase = aggregators.sum(
filters.positive(dicts.values(self.datasources.markup_delta)),
name=self._name + ".markup_delta_increase"
)
"`int` : The sum of delta increases in the markup frequency table"
self.markup_delta_decrease = aggregators.sum(
filters.negative(dicts.values(self.datasources.markup_delta)),
name=self._name + ".markup_delta_decrease"
)
"`int` : The sum of delta decreases in the markup frequency table"
self.markup_prop_delta_sum = aggregators.sum(
dicts.values(self.datasources.markup_prop_delta),
name=self._name + ".markup_prop_delta_sum"
)
"""
`int` : The sum of proportional delta changes in the markup
frequency table
"""
self.markup_prop_delta_increase = aggregators.sum(
filters.positive(dicts.values(self.datasources.markup_prop_delta)),
name=self._name + ".markup_prop_delta_increase"
)
"""
`int` : The sum of proportional delta increases in the markup
frequency table
"""
self.markup_prop_delta_decrease = aggregators.sum(
filters.negative(dicts.values(self.datasources.markup_prop_delta)),
name=self._name + ".markup_prop_delta_decrease"
)
"""
`int` : The sum of proportional delta decreases in the markup
frequency table
"""
# cjk
self.cjk_delta_sum = aggregators.sum(
dicts.values(self.datasources.cjk_delta),
name=self._name + ".cjk_delta_sum"
)
"`int` : The sum of delta changes in the cjk frequency table"
self.cjk_delta_increase = aggregators.sum(
filters.positive(dicts.values(self.datasources.cjk_delta)),
name=self._name + ".cjk_delta_increase"
)
"`int` : The sum of delta increases in the cjk frequency table"
self.cjk_delta_decrease = aggregators.sum(
filters.negative(dicts.values(self.datasources.cjk_delta)),
name=self._name + ".cjk_delta_decrease"
)
"`int` : The sum of delta decreases in the cjk frequency table"
self.cjk_prop_delta_sum = aggregators.sum(
dicts.values(self.datasources.cjk_prop_delta),
name=self._name + ".cjk_prop_delta_sum"
)
"""
`int` : The sum of proportional delta changes in the cjk
frequency table
"""
self.cjk_prop_delta_increase = aggregators.sum(
filters.positive(dicts.values(self.datasources.cjk_prop_delta)),
name=self._name + ".cjk_prop_delta_increase"
)
"""
`int` : The sum of proportional delta increases in the cjk
frequency table
"""
self.cjk_prop_delta_decrease = aggregators.sum(
filters.negative(dicts.values(self.datasources.cjk_prop_delta)),
name=self._name + ".cjk_prop_delta_decrease"
)
"""
`int` : The sum of proportional delta decreases in the cjk
frequency table
"""
# entity
self.entity_delta_sum = aggregators.sum(
dicts.values(self.datasources.entity_delta),
name=self._name + ".entity_delta_sum"
)
"`int` : The sum of delta changes in the entity frequency table"
self.entity_delta_increase = aggregators.sum(
filters.positive(dicts.values(self.datasources.entity_delta)),
name=self._name + ".entity_delta_increase"
)
"`int` : The sum of delta increases in the entity frequency table"
self.entity_delta_decrease = aggregators.sum(
filters.negative(dicts.values(self.datasources.entity_delta)),
name=self._name + ".entity_delta_decrease"
)
"`int` : The sum of delta decreases in the entity frequency table"
self.entity_prop_delta_sum = aggregators.sum(
dicts.values(self.datasources.entity_prop_delta),
name=self._name + ".entity_prop_delta_sum"
)
"""
`int` : The sum of proportional delta changes in the entity
frequency table
"""
self.entity_prop_delta_increase = aggregators.sum(
filters.positive(dicts.values(self.datasources.entity_prop_delta)),
name=self._name + ".entity_prop_delta_increase"
)
"""
`int` : The sum of proportional delta increases in the entity
frequency table
"""
self.entity_prop_delta_decrease = aggregators.sum(
filters.negative(dicts.values(self.datasources.entity_prop_delta)),
name=self._name + ".entity_prop_delta_decrease"
)
"""
`int` : The sum of proportional delta decreases in the entity
frequency table
"""
# url
self.url_delta_sum = aggregators.sum(
dicts.values(self.datasources.url_delta),
name=self._name + ".url_delta_sum"
)
"`int` : The sum of delta changes in the url frequency table"
self.url_delta_increase = aggregators.sum(
filters.positive(dicts.values(self.datasources.url_delta)),
name=self._name + ".url_delta_increase"
)
"`int` : The sum of delta increases in the url frequency table"
self.url_delta_decrease = aggregators.sum(
filters.negative(dicts.values(self.datasources.url_delta)),
name=self._name + ".url_delta_decrease"
)
"`int` : The sum of delta decreases in the url frequency table"
self.url_prop_delta_sum = aggregators.sum(
dicts.values(self.datasources.url_prop_delta),
name=self._name + ".url_prop_delta_sum"
)
"""
`int` : The sum of proportional delta changes in the url
frequency table
"""
self.url_prop_delta_increase = aggregators.sum(
filters.positive(dicts.values(self.datasources.url_prop_delta)),
name=self._name + ".url_prop_delta_increase"
)
"""
`int` : The sum of proportional delta increases in the url
frequency table
"""
self.url_prop_delta_decrease = aggregators.sum(
filters.negative(dicts.values(self.datasources.url_prop_delta)),
name=self._name + ".url_prop_delta_decrease"
)
"""
`int` : The sum of proportional delta decreases in the url
frequency table
"""
# word
self.word_delta_sum = aggregators.sum(
dicts.values(self.datasources.word_delta),
name=self._name + ".word_delta_sum"
)
"`int` : The sum of delta changes in the word frequency table"
self.word_delta_increase = aggregators.sum(
filters.positive(dicts.values(self.datasources.word_delta)),
name=self._name + ".word_delta_increase"
)
"`int` : The sum of delta increases in the word frequency table"
self.word_delta_decrease = aggregators.sum(
filters.negative(dicts.values(self.datasources.word_delta)),
name=self._name + ".word_delta_decrease"
)
"`int` : The sum of delta decreases in the word frequency table"
self.word_prop_delta_sum = aggregators.sum(
dicts.values(self.datasources.word_prop_delta),
name=self._name + ".word_prop_delta_sum"
)
"""
`int` : The sum of proportional delta changes in the word
frequency table
"""
self.word_prop_delta_increase = aggregators.sum(
filters.positive(dicts.values(self.datasources.word_prop_delta)),
name=self._name + ".word_prop_delta_increase"
)
"""
`int` : The sum of proportional delta increases in the word
frequency table
"""
self.word_prop_delta_decrease = aggregators.sum(
filters.negative(dicts.values(self.datasources.word_prop_delta)),
name=self._name + ".word_prop_delta_decrease"
)
"""
`int` : The sum of proportional delta decreases in the word
frequency table
"""
# UPPERCASE word
uppercase_word_delta_values = \
dicts.values(self.datasources.uppercase_word_delta)
self.uppercase_word_delta_sum = aggregators.sum(
uppercase_word_delta_values,
name=self._name + ".uppercase_word_delta_sum"
)
"""
`int` : The sum of delta changes in the UPPERCASE word frequency
table
"""
self.uppercase_word_delta_increase = aggregators.sum(
filters.positive(uppercase_word_delta_values),
name=self._name + ".uppercase_word_delta_increase"
)
"""
`int` : The sum of delta increases in the UPPERCASE word frequency
table
"""
self.uppercase_word_delta_decrease = aggregators.sum(
filters.negative(uppercase_word_delta_values),
name=self._name + ".uppercase_word_delta_decrease"
)
"""
`int` : The sum of delta decreases in the UPPERCASE word frequency
table
"""
uppercase_word_prop_delta_values = \
dicts.values(self.datasources.uppercase_word_prop_delta)
self.uppercase_word_prop_delta_sum = aggregators.sum(
uppercase_word_prop_delta_values,
name=self._name + ".uppercase_word_prop_delta_sum"
)
"""
`float` : The sum of proportional delta changes in the UPPERCASE word
frequency table
"""
self.uppercase_word_prop_delta_increase = aggregators.sum(
filters.positive(uppercase_word_prop_delta_values),
name=self._name + ".uppercase_word_prop_delta_increase"
)
"""
`float` : The sum of proportional delta increases in the UPPERCASE word
frequency table
"""
self.uppercase_word_prop_delta_decrease = aggregators.sum(
filters.negative(uppercase_word_prop_delta_values),
name=self._name + ".uppercase_word_prop_delta_decrease"
)
"""
`float` : The sum of proportional delta decreases in the UPPERCASE word
frequency table
"""
# punctuation
self.punctuation_delta_sum = aggregators.sum(
dicts.values(self.datasources.punctuation_delta),
name=self._name + ".punctuation_delta_sum"
)
"`int` : The sum of delta changes in the punctuation frequency table"
self.punctuation_delta_increase = aggregators.sum(
filters.positive(dicts.values(self.datasources.punctuation_delta)),
name=self._name + ".punctuation_delta_increase"
)
"`int` : The sum of delta increases in the punctuation frequency table"
self.punctuation_delta_decrease = aggregators.sum(
filters.negative(dicts.values(self.datasources.punctuation_delta)),
name=self._name + ".punctuation_delta_decrease"
)
"`int` : The sum of delta decreases in the punctuation frequency table"
self.punctuation_prop_delta_sum = aggregators.sum(
dicts.values(self.datasources.punctuation_prop_delta),
name=self._name + ".punctuation_prop_delta_sum"
)
"""
`int` : The sum of proportional delta changes in the punctuation
frequency table
"""
self.punctuation_prop_delta_increase = aggregators.sum(
filters.positive(dicts.values(
self.datasources.punctuation_prop_delta)),
name=self._name + ".punctuation_prop_delta_increase"
)
"""
`int` : The sum of proportional delta increases in the punctuation
frequency table
"""
self.punctuation_prop_delta_decrease = aggregators.sum(
filters.negative(dicts.values(
self.datasources.punctuation_prop_delta)),
name=self._name + ".punctuation_prop_delta_decrease"
)
"""
`int` : The sum of proportional delta decreases in the punctuation
frequency table
"""
# break
self.break_delta_sum = aggregators.sum(
dicts.values(self.datasources.break_delta),
name=self._name + ".break_delta_sum"
)
"`int` : The sum of delta changes in the break frequency table"
self.break_delta_increase = aggregators.sum(
filters.positive(dicts.values(self.datasources.break_delta)),
name=self._name + ".break_delta_increase"
)
"`int` : The sum of delta increases in the break frequency table"
self.break_delta_decrease = aggregators.sum(
filters.negative(dicts.values(self.datasources.break_delta)),
name=self._name + ".break_delta_decrease"
)
"`int` : The sum of delta decreases in the break frequency table"
self.break_prop_delta_sum = aggregators.sum(
dicts.values(self.datasources.break_prop_delta),
name=self._name + ".break_prop_delta_sum"
)
"""
`int` : The sum of proportional delta changes in the break
frequency table
"""
self.break_prop_delta_increase = aggregators.sum(
filters.positive(dicts.values(self.datasources.break_prop_delta)),
name=self._name + ".break_prop_delta_increase"
)
"""
`int` : The sum of proportional delta increases in the break
frequency table
"""
self.break_prop_delta_decrease = aggregators.sum(
filters.negative(dicts.values(self.datasources.break_prop_delta)),
name=self._name + ".break_prop_delta_decrease"
)
"""
`int` : The sum of proportional delta decreases in the break
frequency table
"""
| mit |
akatrevorjay/pyrox | pyrox/log.py | 3 | 1268 | import logging
_LOG_LEVEL_NOTSET = 'NOTSET'
def get_logger(logger_name):
return _LOGGING_MANAGER.get_logger(logger_name)
def get_log_manager():
return _LOGGING_MANAGER
class LoggingManager(object):
def __init__(self):
self._root_logger = logging.getLogger()
self._handlers = list()
def _add_handler(self, handler):
self._handlers.append(handler)
self._root_logger.addHandler(handler)
def _clean_handlers(self):
"""
Removes all current handlers.
TODO:Review - Not sure if this may cause problems.
"""
[self._root_logger.removeHandler(hdlr) for hdlr in self._handlers]
del self._handlers[:]
def configure(self, cfg):
self._clean_handlers()
# Configuration handling
self._root_logger.setLevel(cfg.logging.verbosity)
if cfg.logging.logfile is not None:
self._add_handler(logging.FileHandler(cfg.logging.logfile))
if cfg.logging.console is True:
self._add_handler(logging.StreamHandler())
def get_logger(self, logger_name):
logger = logging.getLogger(logger_name)
logger.setLevel(_LOG_LEVEL_NOTSET)
return logger
globals()['_LOGGING_MANAGER'] = LoggingManager()
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.